diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/8139cp.c | 37 | ||||
-rw-r--r-- | drivers/net/e1000/e1000.h | 3 | ||||
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 119 | ||||
-rw-r--r-- | drivers/net/sky2.c | 77 | ||||
-rw-r--r-- | drivers/net/sky2.h | 1 | ||||
-rw-r--r-- | drivers/net/tg3.c | 9 | ||||
-rw-r--r-- | drivers/net/via-velocity.c | 3 |
7 files changed, 79 insertions, 170 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index f822cd3025ff..dd410496aadb 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -1118,13 +1118,18 @@ err_out: | |||
1118 | return -ENOMEM; | 1118 | return -ENOMEM; |
1119 | } | 1119 | } |
1120 | 1120 | ||
1121 | static void cp_init_rings_index (struct cp_private *cp) | ||
1122 | { | ||
1123 | cp->rx_tail = 0; | ||
1124 | cp->tx_head = cp->tx_tail = 0; | ||
1125 | } | ||
1126 | |||
1121 | static int cp_init_rings (struct cp_private *cp) | 1127 | static int cp_init_rings (struct cp_private *cp) |
1122 | { | 1128 | { |
1123 | memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); | 1129 | memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); |
1124 | cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); | 1130 | cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); |
1125 | 1131 | ||
1126 | cp->rx_tail = 0; | 1132 | cp_init_rings_index(cp); |
1127 | cp->tx_head = cp->tx_tail = 0; | ||
1128 | 1133 | ||
1129 | return cp_refill_rx (cp); | 1134 | return cp_refill_rx (cp); |
1130 | } | 1135 | } |
@@ -1886,30 +1891,30 @@ static int cp_suspend (struct pci_dev *pdev, pm_message_t state) | |||
1886 | 1891 | ||
1887 | spin_unlock_irqrestore (&cp->lock, flags); | 1892 | spin_unlock_irqrestore (&cp->lock, flags); |
1888 | 1893 | ||
1889 | if (cp->pdev && cp->wol_enabled) { | 1894 | pci_save_state(pdev); |
1890 | pci_save_state (cp->pdev); | 1895 | pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled); |
1891 | cp_set_d3_state (cp); | 1896 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); |
1892 | } | ||
1893 | 1897 | ||
1894 | return 0; | 1898 | return 0; |
1895 | } | 1899 | } |
1896 | 1900 | ||
1897 | static int cp_resume (struct pci_dev *pdev) | 1901 | static int cp_resume (struct pci_dev *pdev) |
1898 | { | 1902 | { |
1899 | struct net_device *dev; | 1903 | struct net_device *dev = pci_get_drvdata (pdev); |
1900 | struct cp_private *cp; | 1904 | struct cp_private *cp = netdev_priv(dev); |
1901 | unsigned long flags; | 1905 | unsigned long flags; |
1902 | 1906 | ||
1903 | dev = pci_get_drvdata (pdev); | 1907 | if (!netif_running(dev)) |
1904 | cp = netdev_priv(dev); | 1908 | return 0; |
1905 | 1909 | ||
1906 | netif_device_attach (dev); | 1910 | netif_device_attach (dev); |
1907 | 1911 | ||
1908 | if (cp->pdev && cp->wol_enabled) { | 1912 | pci_set_power_state(pdev, PCI_D0); |
1909 | pci_set_power_state (cp->pdev, PCI_D0); | 1913 | pci_restore_state(pdev); |
1910 | pci_restore_state (cp->pdev); | 1914 | pci_enable_wake(pdev, PCI_D0, 0); |
1911 | } | 1915 | |
1912 | 1916 | /* FIXME: sh*t may happen if the Rx ring buffer is depleted */ | |
1917 | cp_init_rings_index (cp); | ||
1913 | cp_init_hw (cp); | 1918 | cp_init_hw (cp); |
1914 | netif_start_queue (dev); | 1919 | netif_start_queue (dev); |
1915 | 1920 | ||
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 27c77306193b..99baf0e099fc 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h | |||
@@ -225,9 +225,6 @@ struct e1000_rx_ring { | |||
225 | struct e1000_ps_page *ps_page; | 225 | struct e1000_ps_page *ps_page; |
226 | struct e1000_ps_page_dma *ps_page_dma; | 226 | struct e1000_ps_page_dma *ps_page_dma; |
227 | 227 | ||
228 | struct sk_buff *rx_skb_top; | ||
229 | struct sk_buff *rx_skb_prev; | ||
230 | |||
231 | /* cpu for rx queue */ | 228 | /* cpu for rx queue */ |
232 | int cpu; | 229 | int cpu; |
233 | 230 | ||
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 31e332935e5a..5b7d0f425af2 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -103,7 +103,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; | |||
103 | #else | 103 | #else |
104 | #define DRIVERNAPI "-NAPI" | 104 | #define DRIVERNAPI "-NAPI" |
105 | #endif | 105 | #endif |
106 | #define DRV_VERSION "6.3.9-k2"DRIVERNAPI | 106 | #define DRV_VERSION "6.3.9-k4"DRIVERNAPI |
107 | char e1000_driver_version[] = DRV_VERSION; | 107 | char e1000_driver_version[] = DRV_VERSION; |
108 | static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; | 108 | static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; |
109 | 109 | ||
@@ -1635,8 +1635,6 @@ setup_rx_desc_die: | |||
1635 | 1635 | ||
1636 | rxdr->next_to_clean = 0; | 1636 | rxdr->next_to_clean = 0; |
1637 | rxdr->next_to_use = 0; | 1637 | rxdr->next_to_use = 0; |
1638 | rxdr->rx_skb_top = NULL; | ||
1639 | rxdr->rx_skb_prev = NULL; | ||
1640 | 1638 | ||
1641 | return 0; | 1639 | return 0; |
1642 | } | 1640 | } |
@@ -1713,8 +1711,23 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1713 | rctl |= adapter->rx_buffer_len << 0x11; | 1711 | rctl |= adapter->rx_buffer_len << 0x11; |
1714 | } else { | 1712 | } else { |
1715 | rctl &= ~E1000_RCTL_SZ_4096; | 1713 | rctl &= ~E1000_RCTL_SZ_4096; |
1716 | rctl &= ~E1000_RCTL_BSEX; | 1714 | rctl |= E1000_RCTL_BSEX; |
1717 | rctl |= E1000_RCTL_SZ_2048; | 1715 | switch (adapter->rx_buffer_len) { |
1716 | case E1000_RXBUFFER_2048: | ||
1717 | default: | ||
1718 | rctl |= E1000_RCTL_SZ_2048; | ||
1719 | rctl &= ~E1000_RCTL_BSEX; | ||
1720 | break; | ||
1721 | case E1000_RXBUFFER_4096: | ||
1722 | rctl |= E1000_RCTL_SZ_4096; | ||
1723 | break; | ||
1724 | case E1000_RXBUFFER_8192: | ||
1725 | rctl |= E1000_RCTL_SZ_8192; | ||
1726 | break; | ||
1727 | case E1000_RXBUFFER_16384: | ||
1728 | rctl |= E1000_RCTL_SZ_16384; | ||
1729 | break; | ||
1730 | } | ||
1718 | } | 1731 | } |
1719 | 1732 | ||
1720 | #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT | 1733 | #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT |
@@ -2107,16 +2120,6 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter, | |||
2107 | } | 2120 | } |
2108 | } | 2121 | } |
2109 | 2122 | ||
2110 | /* there also may be some cached data in our adapter */ | ||
2111 | if (rx_ring->rx_skb_top) { | ||
2112 | dev_kfree_skb(rx_ring->rx_skb_top); | ||
2113 | |||
2114 | /* rx_skb_prev will be wiped out by rx_skb_top */ | ||
2115 | rx_ring->rx_skb_top = NULL; | ||
2116 | rx_ring->rx_skb_prev = NULL; | ||
2117 | } | ||
2118 | |||
2119 | |||
2120 | size = sizeof(struct e1000_buffer) * rx_ring->count; | 2123 | size = sizeof(struct e1000_buffer) * rx_ring->count; |
2121 | memset(rx_ring->buffer_info, 0, size); | 2124 | memset(rx_ring->buffer_info, 0, size); |
2122 | size = sizeof(struct e1000_ps_page) * rx_ring->count; | 2125 | size = sizeof(struct e1000_ps_page) * rx_ring->count; |
@@ -3106,24 +3109,27 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3106 | break; | 3109 | break; |
3107 | } | 3110 | } |
3108 | 3111 | ||
3109 | /* since the driver code now supports splitting a packet across | 3112 | |
3110 | * multiple descriptors, most of the fifo related limitations on | ||
3111 | * jumbo frame traffic have gone away. | ||
3112 | * simply use 2k descriptors for everything. | ||
3113 | * | ||
3114 | * NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | ||
3115 | * means we reserve 2 more, this pushes us to allocate from the next | ||
3116 | * larger slab size | ||
3117 | * i.e. RXBUFFER_2048 --> size-4096 slab */ | ||
3118 | |||
3119 | /* recent hardware supports 1KB granularity */ | ||
3120 | if (adapter->hw.mac_type > e1000_82547_rev_2) { | 3113 | if (adapter->hw.mac_type > e1000_82547_rev_2) { |
3121 | adapter->rx_buffer_len = | 3114 | adapter->rx_buffer_len = max_frame; |
3122 | ((max_frame < E1000_RXBUFFER_2048) ? | ||
3123 | max_frame : E1000_RXBUFFER_2048); | ||
3124 | E1000_ROUNDUP(adapter->rx_buffer_len, 1024); | 3115 | E1000_ROUNDUP(adapter->rx_buffer_len, 1024); |
3125 | } else | 3116 | } else { |
3126 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; | 3117 | if(unlikely((adapter->hw.mac_type < e1000_82543) && |
3118 | (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) { | ||
3119 | DPRINTK(PROBE, ERR, "Jumbo Frames not supported " | ||
3120 | "on 82542\n"); | ||
3121 | return -EINVAL; | ||
3122 | } else { | ||
3123 | if(max_frame <= E1000_RXBUFFER_2048) | ||
3124 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; | ||
3125 | else if(max_frame <= E1000_RXBUFFER_4096) | ||
3126 | adapter->rx_buffer_len = E1000_RXBUFFER_4096; | ||
3127 | else if(max_frame <= E1000_RXBUFFER_8192) | ||
3128 | adapter->rx_buffer_len = E1000_RXBUFFER_8192; | ||
3129 | else if(max_frame <= E1000_RXBUFFER_16384) | ||
3130 | adapter->rx_buffer_len = E1000_RXBUFFER_16384; | ||
3131 | } | ||
3132 | } | ||
3127 | 3133 | ||
3128 | netdev->mtu = new_mtu; | 3134 | netdev->mtu = new_mtu; |
3129 | 3135 | ||
@@ -3620,7 +3626,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3620 | uint8_t last_byte; | 3626 | uint8_t last_byte; |
3621 | unsigned int i; | 3627 | unsigned int i; |
3622 | int cleaned_count = 0; | 3628 | int cleaned_count = 0; |
3623 | boolean_t cleaned = FALSE, multi_descriptor = FALSE; | 3629 | boolean_t cleaned = FALSE; |
3624 | 3630 | ||
3625 | i = rx_ring->next_to_clean; | 3631 | i = rx_ring->next_to_clean; |
3626 | rx_desc = E1000_RX_DESC(*rx_ring, i); | 3632 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
@@ -3652,43 +3658,12 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3652 | 3658 | ||
3653 | length = le16_to_cpu(rx_desc->length); | 3659 | length = le16_to_cpu(rx_desc->length); |
3654 | 3660 | ||
3655 | skb_put(skb, length); | 3661 | if (unlikely(!(status & E1000_RXD_STAT_EOP))) { |
3656 | 3662 | /* All receives must fit into a single buffer */ | |
3657 | if (!(status & E1000_RXD_STAT_EOP)) { | 3663 | E1000_DBG("%s: Receive packet consumed multiple" |
3658 | if (!rx_ring->rx_skb_top) { | 3664 | " buffers\n", netdev->name); |
3659 | rx_ring->rx_skb_top = skb; | 3665 | dev_kfree_skb_irq(skb); |
3660 | rx_ring->rx_skb_top->len = length; | ||
3661 | rx_ring->rx_skb_prev = skb; | ||
3662 | } else { | ||
3663 | if (skb_shinfo(rx_ring->rx_skb_top)->frag_list) { | ||
3664 | rx_ring->rx_skb_prev->next = skb; | ||
3665 | skb->prev = rx_ring->rx_skb_prev; | ||
3666 | } else { | ||
3667 | skb_shinfo(rx_ring->rx_skb_top)->frag_list = skb; | ||
3668 | } | ||
3669 | rx_ring->rx_skb_prev = skb; | ||
3670 | rx_ring->rx_skb_top->data_len += length; | ||
3671 | } | ||
3672 | goto next_desc; | 3666 | goto next_desc; |
3673 | } else { | ||
3674 | if (rx_ring->rx_skb_top) { | ||
3675 | if (skb_shinfo(rx_ring->rx_skb_top) | ||
3676 | ->frag_list) { | ||
3677 | rx_ring->rx_skb_prev->next = skb; | ||
3678 | skb->prev = rx_ring->rx_skb_prev; | ||
3679 | } else | ||
3680 | skb_shinfo(rx_ring->rx_skb_top) | ||
3681 | ->frag_list = skb; | ||
3682 | |||
3683 | rx_ring->rx_skb_top->data_len += length; | ||
3684 | rx_ring->rx_skb_top->len += | ||
3685 | rx_ring->rx_skb_top->data_len; | ||
3686 | |||
3687 | skb = rx_ring->rx_skb_top; | ||
3688 | multi_descriptor = TRUE; | ||
3689 | rx_ring->rx_skb_top = NULL; | ||
3690 | rx_ring->rx_skb_prev = NULL; | ||
3691 | } | ||
3692 | } | 3667 | } |
3693 | 3668 | ||
3694 | if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { | 3669 | if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { |
@@ -3712,10 +3687,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3712 | * performance for small packets with large amounts | 3687 | * performance for small packets with large amounts |
3713 | * of reassembly being done in the stack */ | 3688 | * of reassembly being done in the stack */ |
3714 | #define E1000_CB_LENGTH 256 | 3689 | #define E1000_CB_LENGTH 256 |
3715 | if ((length < E1000_CB_LENGTH) && | 3690 | if (length < E1000_CB_LENGTH) { |
3716 | !rx_ring->rx_skb_top && | ||
3717 | /* or maybe (status & E1000_RXD_STAT_EOP) && */ | ||
3718 | !multi_descriptor) { | ||
3719 | struct sk_buff *new_skb = | 3691 | struct sk_buff *new_skb = |
3720 | dev_alloc_skb(length + NET_IP_ALIGN); | 3692 | dev_alloc_skb(length + NET_IP_ALIGN); |
3721 | if (new_skb) { | 3693 | if (new_skb) { |
@@ -3729,7 +3701,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3729 | skb = new_skb; | 3701 | skb = new_skb; |
3730 | skb_put(skb, length); | 3702 | skb_put(skb, length); |
3731 | } | 3703 | } |
3732 | } | 3704 | } else |
3705 | skb_put(skb, length); | ||
3733 | 3706 | ||
3734 | /* end copybreak code */ | 3707 | /* end copybreak code */ |
3735 | 3708 | ||
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index ca8160d68229..72c1630977d6 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -96,10 +96,6 @@ static int copybreak __read_mostly = 256; | |||
96 | module_param(copybreak, int, 0); | 96 | module_param(copybreak, int, 0); |
97 | MODULE_PARM_DESC(copybreak, "Receive copy threshold"); | 97 | MODULE_PARM_DESC(copybreak, "Receive copy threshold"); |
98 | 98 | ||
99 | static int disable_msi = 0; | ||
100 | module_param(disable_msi, int, 0); | ||
101 | MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); | ||
102 | |||
103 | static const struct pci_device_id sky2_id_table[] = { | 99 | static const struct pci_device_id sky2_id_table[] = { |
104 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, | 100 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, |
105 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, | 101 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, |
@@ -3126,61 +3122,6 @@ static void __devinit sky2_show_addr(struct net_device *dev) | |||
3126 | dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); | 3122 | dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); |
3127 | } | 3123 | } |
3128 | 3124 | ||
3129 | /* Handle software interrupt used during MSI test */ | ||
3130 | static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id, | ||
3131 | struct pt_regs *regs) | ||
3132 | { | ||
3133 | struct sky2_hw *hw = dev_id; | ||
3134 | u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2); | ||
3135 | |||
3136 | if (status == 0) | ||
3137 | return IRQ_NONE; | ||
3138 | |||
3139 | if (status & Y2_IS_IRQ_SW) { | ||
3140 | sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); | ||
3141 | hw->msi = 1; | ||
3142 | } | ||
3143 | sky2_write32(hw, B0_Y2_SP_ICR, 2); | ||
3144 | |||
3145 | sky2_read32(hw, B0_IMSK); | ||
3146 | return IRQ_HANDLED; | ||
3147 | } | ||
3148 | |||
3149 | /* Test interrupt path by forcing a a software IRQ */ | ||
3150 | static int __devinit sky2_test_msi(struct sky2_hw *hw) | ||
3151 | { | ||
3152 | struct pci_dev *pdev = hw->pdev; | ||
3153 | int i, err; | ||
3154 | |||
3155 | sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW); | ||
3156 | |||
3157 | err = request_irq(pdev->irq, sky2_test_intr, SA_SHIRQ, DRV_NAME, hw); | ||
3158 | if (err) { | ||
3159 | printk(KERN_ERR PFX "%s: cannot assign irq %d\n", | ||
3160 | pci_name(pdev), pdev->irq); | ||
3161 | return err; | ||
3162 | } | ||
3163 | |||
3164 | sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); | ||
3165 | wmb(); | ||
3166 | |||
3167 | for (i = 0; i < 10; i++) { | ||
3168 | barrier(); | ||
3169 | if (hw->msi) | ||
3170 | goto found; | ||
3171 | mdelay(1); | ||
3172 | } | ||
3173 | |||
3174 | err = -EOPNOTSUPP; | ||
3175 | sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); | ||
3176 | found: | ||
3177 | sky2_write32(hw, B0_IMSK, 0); | ||
3178 | |||
3179 | free_irq(pdev->irq, hw); | ||
3180 | |||
3181 | return err; | ||
3182 | } | ||
3183 | |||
3184 | static int __devinit sky2_probe(struct pci_dev *pdev, | 3125 | static int __devinit sky2_probe(struct pci_dev *pdev, |
3185 | const struct pci_device_id *ent) | 3126 | const struct pci_device_id *ent) |
3186 | { | 3127 | { |
@@ -3302,20 +3243,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev, | |||
3302 | } | 3243 | } |
3303 | } | 3244 | } |
3304 | 3245 | ||
3305 | if (!disable_msi && pci_enable_msi(pdev) == 0) { | ||
3306 | err = sky2_test_msi(hw); | ||
3307 | if (err == -EOPNOTSUPP) { | ||
3308 | /* MSI test failed, go back to INTx mode */ | ||
3309 | printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, " | ||
3310 | "switching to INTx mode. Please report this failure to " | ||
3311 | "the PCI maintainer and include system chipset information.\n", | ||
3312 | pci_name(pdev)); | ||
3313 | pci_disable_msi(pdev); | ||
3314 | } | ||
3315 | else if (err) | ||
3316 | goto err_out_unregister; | ||
3317 | } | ||
3318 | |||
3319 | err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ | SA_SAMPLE_RANDOM, | 3246 | err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ | SA_SAMPLE_RANDOM, |
3320 | DRV_NAME, hw); | 3247 | DRV_NAME, hw); |
3321 | if (err) { | 3248 | if (err) { |
@@ -3332,8 +3259,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev, | |||
3332 | return 0; | 3259 | return 0; |
3333 | 3260 | ||
3334 | err_out_unregister: | 3261 | err_out_unregister: |
3335 | if (hw->msi) | ||
3336 | pci_disable_msi(pdev); | ||
3337 | if (dev1) { | 3262 | if (dev1) { |
3338 | unregister_netdev(dev1); | 3263 | unregister_netdev(dev1); |
3339 | free_netdev(dev1); | 3264 | free_netdev(dev1); |
@@ -3376,8 +3301,6 @@ static void __devexit sky2_remove(struct pci_dev *pdev) | |||
3376 | sky2_read8(hw, B0_CTST); | 3301 | sky2_read8(hw, B0_CTST); |
3377 | 3302 | ||
3378 | free_irq(pdev->irq, hw); | 3303 | free_irq(pdev->irq, hw); |
3379 | if (hw->msi) | ||
3380 | pci_disable_msi(pdev); | ||
3381 | pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); | 3304 | pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); |
3382 | pci_release_regions(pdev); | 3305 | pci_release_regions(pdev); |
3383 | pci_disable_device(pdev); | 3306 | pci_disable_device(pdev); |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 3edb98075e0a..dce955c76f3c 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -1881,7 +1881,6 @@ struct sky2_hw { | |||
1881 | u32 intr_mask; | 1881 | u32 intr_mask; |
1882 | 1882 | ||
1883 | int pm_cap; | 1883 | int pm_cap; |
1884 | int msi; | ||
1885 | u8 chip_id; | 1884 | u8 chip_id; |
1886 | u8 chip_rev; | 1885 | u8 chip_rev; |
1887 | u8 copper; | 1886 | u8 copper; |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index e7dc653d5bd6..e8e92c853e89 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -9408,6 +9408,15 @@ static int __devinit tg3_is_sun_570X(struct tg3 *tp) | |||
9408 | return 0; | 9408 | return 0; |
9409 | if (venid == PCI_VENDOR_ID_SUN) | 9409 | if (venid == PCI_VENDOR_ID_SUN) |
9410 | return 1; | 9410 | return 1; |
9411 | |||
9412 | /* TG3 chips onboard the SunBlade-2500 don't have the | ||
9413 | * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they | ||
9414 | * are distinguishable from non-Sun variants by being | ||
9415 | * named "network" by the firmware. Non-Sun cards will | ||
9416 | * show up as being named "ethernet". | ||
9417 | */ | ||
9418 | if (!strcmp(pcp->prom_name, "network")) | ||
9419 | return 1; | ||
9411 | } | 9420 | } |
9412 | return 0; | 9421 | return 0; |
9413 | } | 9422 | } |
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index c2d5907dc8e0..ed1f837c8fda 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -1106,6 +1106,9 @@ static void velocity_free_rd_ring(struct velocity_info *vptr) | |||
1106 | 1106 | ||
1107 | for (i = 0; i < vptr->options.numrx; i++) { | 1107 | for (i = 0; i < vptr->options.numrx; i++) { |
1108 | struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); | 1108 | struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); |
1109 | struct rx_desc *rd = vptr->rd_ring + i; | ||
1110 | |||
1111 | memset(rd, 0, sizeof(*rd)); | ||
1109 | 1112 | ||
1110 | if (!rd_info->skb) | 1113 | if (!rd_info->skb) |
1111 | continue; | 1114 | continue; |