aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000')
-rw-r--r--drivers/net/e1000/e1000.h6
-rw-r--r--drivers/net/e1000/e1000_ethtool.c58
-rw-r--r--drivers/net/e1000/e1000_main.c237
3 files changed, 174 insertions, 127 deletions
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 42e2b7e21c29..2f29c2131851 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -167,6 +167,7 @@ struct e1000_buffer {
167 unsigned long time_stamp; 167 unsigned long time_stamp;
168 u16 length; 168 u16 length;
169 u16 next_to_watch; 169 u16 next_to_watch;
170 u16 mapped_as_page;
170}; 171};
171 172
172struct e1000_tx_ring { 173struct e1000_tx_ring {
@@ -260,7 +261,6 @@ struct e1000_adapter {
260 /* TX */ 261 /* TX */
261 struct e1000_tx_ring *tx_ring; /* One per active queue */ 262 struct e1000_tx_ring *tx_ring; /* One per active queue */
262 unsigned int restart_queue; 263 unsigned int restart_queue;
263 unsigned long tx_queue_len;
264 u32 txd_cmd; 264 u32 txd_cmd;
265 u32 tx_int_delay; 265 u32 tx_int_delay;
266 u32 tx_abs_int_delay; 266 u32 tx_abs_int_delay;
@@ -302,7 +302,6 @@ struct e1000_adapter {
302 /* OS defined structs */ 302 /* OS defined structs */
303 struct net_device *netdev; 303 struct net_device *netdev;
304 struct pci_dev *pdev; 304 struct pci_dev *pdev;
305 struct net_device_stats net_stats;
306 305
307 /* structs defined in e1000_hw.h */ 306 /* structs defined in e1000_hw.h */
308 struct e1000_hw hw; 307 struct e1000_hw hw;
@@ -326,6 +325,8 @@ struct e1000_adapter {
326 /* for ioport free */ 325 /* for ioport free */
327 int bars; 326 int bars;
328 int need_ioport; 327 int need_ioport;
328
329 bool discarding;
329}; 330};
330 331
331enum e1000_state_t { 332enum e1000_state_t {
@@ -347,6 +348,7 @@ extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
347extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter); 348extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
348extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter); 349extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
349extern void e1000_update_stats(struct e1000_adapter *adapter); 350extern void e1000_update_stats(struct e1000_adapter *adapter);
351extern bool e1000_has_link(struct e1000_adapter *adapter);
350extern void e1000_power_up_phy(struct e1000_adapter *); 352extern void e1000_power_up_phy(struct e1000_adapter *);
351extern void e1000_set_ethtool_ops(struct net_device *netdev); 353extern void e1000_set_ethtool_ops(struct net_device *netdev);
352extern void e1000_check_options(struct e1000_adapter *adapter); 354extern void e1000_check_options(struct e1000_adapter *adapter);
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 490b2b7cd3ab..c67e93117271 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -31,14 +31,22 @@
31#include "e1000.h" 31#include "e1000.h"
32#include <asm/uaccess.h> 32#include <asm/uaccess.h>
33 33
34enum {NETDEV_STATS, E1000_STATS};
35
34struct e1000_stats { 36struct e1000_stats {
35 char stat_string[ETH_GSTRING_LEN]; 37 char stat_string[ETH_GSTRING_LEN];
38 int type;
36 int sizeof_stat; 39 int sizeof_stat;
37 int stat_offset; 40 int stat_offset;
38}; 41};
39 42
40#define E1000_STAT(m) FIELD_SIZEOF(struct e1000_adapter, m), \ 43#define E1000_STAT(m) E1000_STATS, \
41 offsetof(struct e1000_adapter, m) 44 sizeof(((struct e1000_adapter *)0)->m), \
45 offsetof(struct e1000_adapter, m)
46#define E1000_NETDEV_STAT(m) NETDEV_STATS, \
47 sizeof(((struct net_device *)0)->m), \
48 offsetof(struct net_device, m)
49
42static const struct e1000_stats e1000_gstrings_stats[] = { 50static const struct e1000_stats e1000_gstrings_stats[] = {
43 { "rx_packets", E1000_STAT(stats.gprc) }, 51 { "rx_packets", E1000_STAT(stats.gprc) },
44 { "tx_packets", E1000_STAT(stats.gptc) }, 52 { "tx_packets", E1000_STAT(stats.gptc) },
@@ -50,19 +58,19 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
50 { "tx_multicast", E1000_STAT(stats.mptc) }, 58 { "tx_multicast", E1000_STAT(stats.mptc) },
51 { "rx_errors", E1000_STAT(stats.rxerrc) }, 59 { "rx_errors", E1000_STAT(stats.rxerrc) },
52 { "tx_errors", E1000_STAT(stats.txerrc) }, 60 { "tx_errors", E1000_STAT(stats.txerrc) },
53 { "tx_dropped", E1000_STAT(net_stats.tx_dropped) }, 61 { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) },
54 { "multicast", E1000_STAT(stats.mprc) }, 62 { "multicast", E1000_STAT(stats.mprc) },
55 { "collisions", E1000_STAT(stats.colc) }, 63 { "collisions", E1000_STAT(stats.colc) },
56 { "rx_length_errors", E1000_STAT(stats.rlerrc) }, 64 { "rx_length_errors", E1000_STAT(stats.rlerrc) },
57 { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) }, 65 { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) },
58 { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, 66 { "rx_crc_errors", E1000_STAT(stats.crcerrs) },
59 { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, 67 { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) },
60 { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, 68 { "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
61 { "rx_missed_errors", E1000_STAT(stats.mpc) }, 69 { "rx_missed_errors", E1000_STAT(stats.mpc) },
62 { "tx_aborted_errors", E1000_STAT(stats.ecol) }, 70 { "tx_aborted_errors", E1000_STAT(stats.ecol) },
63 { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, 71 { "tx_carrier_errors", E1000_STAT(stats.tncrs) },
64 { "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) }, 72 { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) },
65 { "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) }, 73 { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) },
66 { "tx_window_errors", E1000_STAT(stats.latecol) }, 74 { "tx_window_errors", E1000_STAT(stats.latecol) },
67 { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, 75 { "tx_abort_late_coll", E1000_STAT(stats.latecol) },
68 { "tx_deferred_ok", E1000_STAT(stats.dc) }, 76 { "tx_deferred_ok", E1000_STAT(stats.dc) },
@@ -207,6 +215,23 @@ static int e1000_set_settings(struct net_device *netdev,
207 return 0; 215 return 0;
208} 216}
209 217
218static u32 e1000_get_link(struct net_device *netdev)
219{
220 struct e1000_adapter *adapter = netdev_priv(netdev);
221
222 /*
223 * If the link is not reported up to netdev, interrupts are disabled,
224 * and so the physical link state may have changed since we last
225 * looked. Set get_link_status to make sure that the true link
226 * state is interrogated, rather than pulling a cached and possibly
227 * stale link state from the driver.
228 */
229 if (!netif_carrier_ok(netdev))
230 adapter->hw.get_link_status = 1;
231
232 return e1000_has_link(adapter);
233}
234
210static void e1000_get_pauseparam(struct net_device *netdev, 235static void e1000_get_pauseparam(struct net_device *netdev,
211 struct ethtool_pauseparam *pause) 236 struct ethtool_pauseparam *pause)
212{ 237{
@@ -861,10 +886,10 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
861 886
862 /* NOTE: we don't test MSI interrupts here, yet */ 887 /* NOTE: we don't test MSI interrupts here, yet */
863 /* Hook up test interrupt handler just for this test */ 888 /* Hook up test interrupt handler just for this test */
864 if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, 889 if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
865 netdev)) 890 netdev))
866 shared_int = false; 891 shared_int = false;
867 else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, 892 else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
868 netdev->name, netdev)) { 893 netdev->name, netdev)) {
869 *data = 1; 894 *data = 1;
870 return -1; 895 return -1;
@@ -1830,10 +1855,21 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1830{ 1855{
1831 struct e1000_adapter *adapter = netdev_priv(netdev); 1856 struct e1000_adapter *adapter = netdev_priv(netdev);
1832 int i; 1857 int i;
1858 char *p = NULL;
1833 1859
1834 e1000_update_stats(adapter); 1860 e1000_update_stats(adapter);
1835 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1861 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1836 char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; 1862 switch (e1000_gstrings_stats[i].type) {
1863 case NETDEV_STATS:
1864 p = (char *) netdev +
1865 e1000_gstrings_stats[i].stat_offset;
1866 break;
1867 case E1000_STATS:
1868 p = (char *) adapter +
1869 e1000_gstrings_stats[i].stat_offset;
1870 break;
1871 }
1872
1837 data[i] = (e1000_gstrings_stats[i].sizeof_stat == 1873 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
1838 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1874 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1839 } 1875 }
@@ -1873,7 +1909,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
1873 .get_msglevel = e1000_get_msglevel, 1909 .get_msglevel = e1000_get_msglevel,
1874 .set_msglevel = e1000_set_msglevel, 1910 .set_msglevel = e1000_set_msglevel,
1875 .nway_reset = e1000_nway_reset, 1911 .nway_reset = e1000_nway_reset,
1876 .get_link = ethtool_op_get_link, 1912 .get_link = e1000_get_link,
1877 .get_eeprom_len = e1000_get_eeprom_len, 1913 .get_eeprom_len = e1000_get_eeprom_len,
1878 .get_eeprom = e1000_get_eeprom, 1914 .get_eeprom = e1000_get_eeprom,
1879 .set_eeprom = e1000_set_eeprom, 1915 .set_eeprom = e1000_set_eeprom,
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index bcd192ca47b0..b15ece26ed84 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -42,7 +42,7 @@ static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation
42 * Macro expands to... 42 * Macro expands to...
43 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} 43 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
44 */ 44 */
45static struct pci_device_id e1000_pci_tbl[] = { 45static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
46 INTEL_E1000_ETHERNET_DEVICE(0x1000), 46 INTEL_E1000_ETHERNET_DEVICE(0x1000),
47 INTEL_E1000_ETHERNET_DEVICE(0x1001), 47 INTEL_E1000_ETHERNET_DEVICE(0x1001),
48 INTEL_E1000_ETHERNET_DEVICE(0x1004), 48 INTEL_E1000_ETHERNET_DEVICE(0x1004),
@@ -383,8 +383,6 @@ static void e1000_configure(struct e1000_adapter *adapter)
383 adapter->alloc_rx_buf(adapter, ring, 383 adapter->alloc_rx_buf(adapter, ring,
384 E1000_DESC_UNUSED(ring)); 384 E1000_DESC_UNUSED(ring));
385 } 385 }
386
387 adapter->tx_queue_len = netdev->tx_queue_len;
388} 386}
389 387
390int e1000_up(struct e1000_adapter *adapter) 388int e1000_up(struct e1000_adapter *adapter)
@@ -503,7 +501,6 @@ void e1000_down(struct e1000_adapter *adapter)
503 del_timer_sync(&adapter->watchdog_timer); 501 del_timer_sync(&adapter->watchdog_timer);
504 del_timer_sync(&adapter->phy_info_timer); 502 del_timer_sync(&adapter->phy_info_timer);
505 503
506 netdev->tx_queue_len = adapter->tx_queue_len;
507 adapter->link_speed = 0; 504 adapter->link_speed = 0;
508 adapter->link_duplex = 0; 505 adapter->link_duplex = 0;
509 netif_carrier_off(netdev); 506 netif_carrier_off(netdev);
@@ -847,6 +844,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
847 goto err_pci_reg; 844 goto err_pci_reg;
848 845
849 pci_set_master(pdev); 846 pci_set_master(pdev);
847 err = pci_save_state(pdev);
848 if (err)
849 goto err_alloc_etherdev;
850 850
851 err = -ENOMEM; 851 err = -ENOMEM;
852 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 852 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
@@ -1698,18 +1698,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1698 rctl &= ~E1000_RCTL_SZ_4096; 1698 rctl &= ~E1000_RCTL_SZ_4096;
1699 rctl |= E1000_RCTL_BSEX; 1699 rctl |= E1000_RCTL_BSEX;
1700 switch (adapter->rx_buffer_len) { 1700 switch (adapter->rx_buffer_len) {
1701 case E1000_RXBUFFER_256:
1702 rctl |= E1000_RCTL_SZ_256;
1703 rctl &= ~E1000_RCTL_BSEX;
1704 break;
1705 case E1000_RXBUFFER_512:
1706 rctl |= E1000_RCTL_SZ_512;
1707 rctl &= ~E1000_RCTL_BSEX;
1708 break;
1709 case E1000_RXBUFFER_1024:
1710 rctl |= E1000_RCTL_SZ_1024;
1711 rctl &= ~E1000_RCTL_BSEX;
1712 break;
1713 case E1000_RXBUFFER_2048: 1701 case E1000_RXBUFFER_2048:
1714 default: 1702 default:
1715 rctl |= E1000_RCTL_SZ_2048; 1703 rctl |= E1000_RCTL_SZ_2048;
@@ -1839,10 +1827,17 @@ void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1839static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1827static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1840 struct e1000_buffer *buffer_info) 1828 struct e1000_buffer *buffer_info)
1841{ 1829{
1842 buffer_info->dma = 0; 1830 if (buffer_info->dma) {
1831 if (buffer_info->mapped_as_page)
1832 pci_unmap_page(adapter->pdev, buffer_info->dma,
1833 buffer_info->length, PCI_DMA_TODEVICE);
1834 else
1835 pci_unmap_single(adapter->pdev, buffer_info->dma,
1836 buffer_info->length,
1837 PCI_DMA_TODEVICE);
1838 buffer_info->dma = 0;
1839 }
1843 if (buffer_info->skb) { 1840 if (buffer_info->skb) {
1844 skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
1845 DMA_TO_DEVICE);
1846 dev_kfree_skb_any(buffer_info->skb); 1841 dev_kfree_skb_any(buffer_info->skb);
1847 buffer_info->skb = NULL; 1842 buffer_info->skb = NULL;
1848 } 1843 }
@@ -2132,7 +2127,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2132 rctl |= E1000_RCTL_VFE; 2127 rctl |= E1000_RCTL_VFE;
2133 } 2128 }
2134 2129
2135 if (netdev->uc.count > rar_entries - 1) { 2130 if (netdev_uc_count(netdev) > rar_entries - 1) {
2136 rctl |= E1000_RCTL_UPE; 2131 rctl |= E1000_RCTL_UPE;
2137 } else if (!(netdev->flags & IFF_PROMISC)) { 2132 } else if (!(netdev->flags & IFF_PROMISC)) {
2138 rctl &= ~E1000_RCTL_UPE; 2133 rctl &= ~E1000_RCTL_UPE;
@@ -2155,7 +2150,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2155 */ 2150 */
2156 i = 1; 2151 i = 1;
2157 if (use_uc) 2152 if (use_uc)
2158 list_for_each_entry(ha, &netdev->uc.list, list) { 2153 netdev_for_each_uc_addr(ha, netdev) {
2159 if (i == rar_entries) 2154 if (i == rar_entries)
2160 break; 2155 break;
2161 e1000_rar_set(hw, ha->addr, i++); 2156 e1000_rar_set(hw, ha->addr, i++);
@@ -2163,29 +2158,25 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2163 2158
2164 WARN_ON(i == rar_entries); 2159 WARN_ON(i == rar_entries);
2165 2160
2166 mc_ptr = netdev->mc_list; 2161 netdev_for_each_mc_addr(mc_ptr, netdev) {
2167 2162 if (i == rar_entries) {
2168 for (; i < rar_entries; i++) { 2163 /* load any remaining addresses into the hash table */
2169 if (mc_ptr) { 2164 u32 hash_reg, hash_bit, mta;
2170 e1000_rar_set(hw, mc_ptr->da_addr, i); 2165 hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
2171 mc_ptr = mc_ptr->next; 2166 hash_reg = (hash_value >> 5) & 0x7F;
2167 hash_bit = hash_value & 0x1F;
2168 mta = (1 << hash_bit);
2169 mcarray[hash_reg] |= mta;
2172 } else { 2170 } else {
2173 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); 2171 e1000_rar_set(hw, mc_ptr->da_addr, i++);
2174 E1000_WRITE_FLUSH();
2175 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2176 E1000_WRITE_FLUSH();
2177 } 2172 }
2178 } 2173 }
2179 2174
2180 /* load any remaining addresses into the hash table */ 2175 for (; i < rar_entries; i++) {
2181 2176 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2182 for (; mc_ptr; mc_ptr = mc_ptr->next) { 2177 E1000_WRITE_FLUSH();
2183 u32 hash_reg, hash_bit, mta; 2178 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2184 hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr); 2179 E1000_WRITE_FLUSH();
2185 hash_reg = (hash_value >> 5) & 0x7F;
2186 hash_bit = hash_value & 0x1F;
2187 mta = (1 << hash_bit);
2188 mcarray[hash_reg] |= mta;
2189 } 2180 }
2190 2181
2191 /* write the hash table completely, write from bottom to avoid 2182 /* write the hash table completely, write from bottom to avoid
@@ -2251,7 +2242,7 @@ static void e1000_82547_tx_fifo_stall(unsigned long data)
2251 } 2242 }
2252} 2243}
2253 2244
2254static bool e1000_has_link(struct e1000_adapter *adapter) 2245bool e1000_has_link(struct e1000_adapter *adapter)
2255{ 2246{
2256 struct e1000_hw *hw = &adapter->hw; 2247 struct e1000_hw *hw = &adapter->hw;
2257 bool link_active = false; 2248 bool link_active = false;
@@ -2322,19 +2313,15 @@ static void e1000_watchdog(unsigned long data)
2322 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2313 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2323 E1000_CTRL_TFCE) ? "TX" : "None" ))); 2314 E1000_CTRL_TFCE) ? "TX" : "None" )));
2324 2315
2325 /* tweak tx_queue_len according to speed/duplex 2316 /* adjust timeout factor according to speed/duplex */
2326 * and adjust the timeout factor */
2327 netdev->tx_queue_len = adapter->tx_queue_len;
2328 adapter->tx_timeout_factor = 1; 2317 adapter->tx_timeout_factor = 1;
2329 switch (adapter->link_speed) { 2318 switch (adapter->link_speed) {
2330 case SPEED_10: 2319 case SPEED_10:
2331 txb2b = false; 2320 txb2b = false;
2332 netdev->tx_queue_len = 10;
2333 adapter->tx_timeout_factor = 16; 2321 adapter->tx_timeout_factor = 16;
2334 break; 2322 break;
2335 case SPEED_100: 2323 case SPEED_100:
2336 txb2b = false; 2324 txb2b = false;
2337 netdev->tx_queue_len = 100;
2338 /* maybe add some timeout factor ? */ 2325 /* maybe add some timeout factor ? */
2339 break; 2326 break;
2340 } 2327 }
@@ -2683,22 +2670,14 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2683 unsigned int mss) 2670 unsigned int mss)
2684{ 2671{
2685 struct e1000_hw *hw = &adapter->hw; 2672 struct e1000_hw *hw = &adapter->hw;
2673 struct pci_dev *pdev = adapter->pdev;
2686 struct e1000_buffer *buffer_info; 2674 struct e1000_buffer *buffer_info;
2687 unsigned int len = skb_headlen(skb); 2675 unsigned int len = skb_headlen(skb);
2688 unsigned int offset, size, count = 0, i; 2676 unsigned int offset = 0, size, count = 0, i;
2689 unsigned int f; 2677 unsigned int f;
2690 dma_addr_t *map;
2691 2678
2692 i = tx_ring->next_to_use; 2679 i = tx_ring->next_to_use;
2693 2680
2694 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
2695 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
2696 return 0;
2697 }
2698
2699 map = skb_shinfo(skb)->dma_maps;
2700 offset = 0;
2701
2702 while (len) { 2681 while (len) {
2703 buffer_info = &tx_ring->buffer_info[i]; 2682 buffer_info = &tx_ring->buffer_info[i];
2704 size = min(len, max_per_txd); 2683 size = min(len, max_per_txd);
@@ -2735,7 +2714,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2735 buffer_info->length = size; 2714 buffer_info->length = size;
2736 /* set time_stamp *before* dma to help avoid a possible race */ 2715 /* set time_stamp *before* dma to help avoid a possible race */
2737 buffer_info->time_stamp = jiffies; 2716 buffer_info->time_stamp = jiffies;
2738 buffer_info->dma = skb_shinfo(skb)->dma_head + offset; 2717 buffer_info->mapped_as_page = false;
2718 buffer_info->dma = pci_map_single(pdev, skb->data + offset,
2719 size, PCI_DMA_TODEVICE);
2720 if (pci_dma_mapping_error(pdev, buffer_info->dma))
2721 goto dma_error;
2739 buffer_info->next_to_watch = i; 2722 buffer_info->next_to_watch = i;
2740 2723
2741 len -= size; 2724 len -= size;
@@ -2753,7 +2736,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2753 2736
2754 frag = &skb_shinfo(skb)->frags[f]; 2737 frag = &skb_shinfo(skb)->frags[f];
2755 len = frag->size; 2738 len = frag->size;
2756 offset = 0; 2739 offset = frag->page_offset;
2757 2740
2758 while (len) { 2741 while (len) {
2759 i++; 2742 i++;
@@ -2777,7 +2760,12 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2777 2760
2778 buffer_info->length = size; 2761 buffer_info->length = size;
2779 buffer_info->time_stamp = jiffies; 2762 buffer_info->time_stamp = jiffies;
2780 buffer_info->dma = map[f] + offset; 2763 buffer_info->mapped_as_page = true;
2764 buffer_info->dma = pci_map_page(pdev, frag->page,
2765 offset, size,
2766 PCI_DMA_TODEVICE);
2767 if (pci_dma_mapping_error(pdev, buffer_info->dma))
2768 goto dma_error;
2781 buffer_info->next_to_watch = i; 2769 buffer_info->next_to_watch = i;
2782 2770
2783 len -= size; 2771 len -= size;
@@ -2790,6 +2778,22 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2790 tx_ring->buffer_info[first].next_to_watch = i; 2778 tx_ring->buffer_info[first].next_to_watch = i;
2791 2779
2792 return count; 2780 return count;
2781
2782dma_error:
2783 dev_err(&pdev->dev, "TX DMA map failed\n");
2784 buffer_info->dma = 0;
2785 if (count)
2786 count--;
2787
2788 while (count--) {
2789 if (i==0)
2790 i += tx_ring->count;
2791 i--;
2792 buffer_info = &tx_ring->buffer_info[i];
2793 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2794 }
2795
2796 return 0;
2793} 2797}
2794 2798
2795static void e1000_tx_queue(struct e1000_adapter *adapter, 2799static void e1000_tx_queue(struct e1000_adapter *adapter,
@@ -3101,10 +3105,8 @@ static void e1000_reset_task(struct work_struct *work)
3101 3105
3102static struct net_device_stats *e1000_get_stats(struct net_device *netdev) 3106static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3103{ 3107{
3104 struct e1000_adapter *adapter = netdev_priv(netdev);
3105
3106 /* only return the current stats */ 3108 /* only return the current stats */
3107 return &adapter->net_stats; 3109 return &netdev->stats;
3108} 3110}
3109 3111
3110/** 3112/**
@@ -3154,13 +3156,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3154 * however with the new *_jumbo_rx* routines, jumbo receives will use 3156 * however with the new *_jumbo_rx* routines, jumbo receives will use
3155 * fragmented skbs */ 3157 * fragmented skbs */
3156 3158
3157 if (max_frame <= E1000_RXBUFFER_256) 3159 if (max_frame <= E1000_RXBUFFER_2048)
3158 adapter->rx_buffer_len = E1000_RXBUFFER_256;
3159 else if (max_frame <= E1000_RXBUFFER_512)
3160 adapter->rx_buffer_len = E1000_RXBUFFER_512;
3161 else if (max_frame <= E1000_RXBUFFER_1024)
3162 adapter->rx_buffer_len = E1000_RXBUFFER_1024;
3163 else if (max_frame <= E1000_RXBUFFER_2048)
3164 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3160 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3165 else 3161 else
3166#if (PAGE_SIZE >= E1000_RXBUFFER_16384) 3162#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
@@ -3196,6 +3192,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3196 3192
3197void e1000_update_stats(struct e1000_adapter *adapter) 3193void e1000_update_stats(struct e1000_adapter *adapter)
3198{ 3194{
3195 struct net_device *netdev = adapter->netdev;
3199 struct e1000_hw *hw = &adapter->hw; 3196 struct e1000_hw *hw = &adapter->hw;
3200 struct pci_dev *pdev = adapter->pdev; 3197 struct pci_dev *pdev = adapter->pdev;
3201 unsigned long flags; 3198 unsigned long flags;
@@ -3288,32 +3285,32 @@ void e1000_update_stats(struct e1000_adapter *adapter)
3288 } 3285 }
3289 3286
3290 /* Fill out the OS statistics structure */ 3287 /* Fill out the OS statistics structure */
3291 adapter->net_stats.multicast = adapter->stats.mprc; 3288 netdev->stats.multicast = adapter->stats.mprc;
3292 adapter->net_stats.collisions = adapter->stats.colc; 3289 netdev->stats.collisions = adapter->stats.colc;
3293 3290
3294 /* Rx Errors */ 3291 /* Rx Errors */
3295 3292
3296 /* RLEC on some newer hardware can be incorrect so build 3293 /* RLEC on some newer hardware can be incorrect so build
3297 * our own version based on RUC and ROC */ 3294 * our own version based on RUC and ROC */
3298 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 3295 netdev->stats.rx_errors = adapter->stats.rxerrc +
3299 adapter->stats.crcerrs + adapter->stats.algnerrc + 3296 adapter->stats.crcerrs + adapter->stats.algnerrc +
3300 adapter->stats.ruc + adapter->stats.roc + 3297 adapter->stats.ruc + adapter->stats.roc +
3301 adapter->stats.cexterr; 3298 adapter->stats.cexterr;
3302 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; 3299 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3303 adapter->net_stats.rx_length_errors = adapter->stats.rlerrc; 3300 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3304 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 3301 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3305 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 3302 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3306 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; 3303 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3307 3304
3308 /* Tx Errors */ 3305 /* Tx Errors */
3309 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; 3306 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3310 adapter->net_stats.tx_errors = adapter->stats.txerrc; 3307 netdev->stats.tx_errors = adapter->stats.txerrc;
3311 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; 3308 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3312 adapter->net_stats.tx_window_errors = adapter->stats.latecol; 3309 netdev->stats.tx_window_errors = adapter->stats.latecol;
3313 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; 3310 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3314 if (hw->bad_tx_carr_stats_fd && 3311 if (hw->bad_tx_carr_stats_fd &&
3315 adapter->link_duplex == FULL_DUPLEX) { 3312 adapter->link_duplex == FULL_DUPLEX) {
3316 adapter->net_stats.tx_carrier_errors = 0; 3313 netdev->stats.tx_carrier_errors = 0;
3317 adapter->stats.tncrs = 0; 3314 adapter->stats.tncrs = 0;
3318 } 3315 }
3319 3316
@@ -3484,8 +3481,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3484 adapter->detect_tx_hung = false; 3481 adapter->detect_tx_hung = false;
3485 if (tx_ring->buffer_info[eop].time_stamp && 3482 if (tx_ring->buffer_info[eop].time_stamp &&
3486 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + 3483 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3487 (adapter->tx_timeout_factor * HZ)) 3484 (adapter->tx_timeout_factor * HZ)) &&
3488 && !(er32(STATUS) & E1000_STATUS_TXOFF)) { 3485 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3489 3486
3490 /* detected Tx unit hang */ 3487 /* detected Tx unit hang */
3491 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" 3488 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
@@ -3514,8 +3511,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3514 } 3511 }
3515 adapter->total_tx_bytes += total_tx_bytes; 3512 adapter->total_tx_bytes += total_tx_bytes;
3516 adapter->total_tx_packets += total_tx_packets; 3513 adapter->total_tx_packets += total_tx_packets;
3517 adapter->net_stats.tx_bytes += total_tx_bytes; 3514 netdev->stats.tx_bytes += total_tx_bytes;
3518 adapter->net_stats.tx_packets += total_tx_packets; 3515 netdev->stats.tx_packets += total_tx_packets;
3519 return (count < tx_ring->count); 3516 return (count < tx_ring->count);
3520} 3517}
3521 3518
@@ -3767,8 +3764,8 @@ next_desc:
3767 3764
3768 adapter->total_rx_packets += total_rx_packets; 3765 adapter->total_rx_packets += total_rx_packets;
3769 adapter->total_rx_bytes += total_rx_bytes; 3766 adapter->total_rx_bytes += total_rx_bytes;
3770 adapter->net_stats.rx_bytes += total_rx_bytes; 3767 netdev->stats.rx_bytes += total_rx_bytes;
3771 adapter->net_stats.rx_packets += total_rx_packets; 3768 netdev->stats.rx_packets += total_rx_packets;
3772 return cleaned; 3769 return cleaned;
3773} 3770}
3774 3771
@@ -3827,13 +3824,22 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3827 3824
3828 length = le16_to_cpu(rx_desc->length); 3825 length = le16_to_cpu(rx_desc->length);
3829 /* !EOP means multiple descriptors were used to store a single 3826 /* !EOP means multiple descriptors were used to store a single
3830 * packet, also make sure the frame isn't just CRC only */ 3827 * packet, if thats the case we need to toss it. In fact, we
3831 if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) { 3828 * to toss every packet with the EOP bit clear and the next
3829 * frame that _does_ have the EOP bit set, as it is by
3830 * definition only a frame fragment
3831 */
3832 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
3833 adapter->discarding = true;
3834
3835 if (adapter->discarding) {
3832 /* All receives must fit into a single buffer */ 3836 /* All receives must fit into a single buffer */
3833 E1000_DBG("%s: Receive packet consumed multiple" 3837 E1000_DBG("%s: Receive packet consumed multiple"
3834 " buffers\n", netdev->name); 3838 " buffers\n", netdev->name);
3835 /* recycle */ 3839 /* recycle */
3836 buffer_info->skb = skb; 3840 buffer_info->skb = skb;
3841 if (status & E1000_RXD_STAT_EOP)
3842 adapter->discarding = false;
3837 goto next_desc; 3843 goto next_desc;
3838 } 3844 }
3839 3845
@@ -3867,9 +3873,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3867 * of reassembly being done in the stack */ 3873 * of reassembly being done in the stack */
3868 if (length < copybreak) { 3874 if (length < copybreak) {
3869 struct sk_buff *new_skb = 3875 struct sk_buff *new_skb =
3870 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 3876 netdev_alloc_skb_ip_align(netdev, length);
3871 if (new_skb) { 3877 if (new_skb) {
3872 skb_reserve(new_skb, NET_IP_ALIGN);
3873 skb_copy_to_linear_data_offset(new_skb, 3878 skb_copy_to_linear_data_offset(new_skb,
3874 -NET_IP_ALIGN, 3879 -NET_IP_ALIGN,
3875 (skb->data - 3880 (skb->data -
@@ -3916,8 +3921,8 @@ next_desc:
3916 3921
3917 adapter->total_rx_packets += total_rx_packets; 3922 adapter->total_rx_packets += total_rx_packets;
3918 adapter->total_rx_bytes += total_rx_bytes; 3923 adapter->total_rx_bytes += total_rx_bytes;
3919 adapter->net_stats.rx_bytes += total_rx_bytes; 3924 netdev->stats.rx_bytes += total_rx_bytes;
3920 adapter->net_stats.rx_packets += total_rx_packets; 3925 netdev->stats.rx_packets += total_rx_packets;
3921 return cleaned; 3926 return cleaned;
3922} 3927}
3923 3928
@@ -3938,9 +3943,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3938 struct e1000_buffer *buffer_info; 3943 struct e1000_buffer *buffer_info;
3939 struct sk_buff *skb; 3944 struct sk_buff *skb;
3940 unsigned int i; 3945 unsigned int i;
3941 unsigned int bufsz = 256 - 3946 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
3942 16 /*for skb_reserve */ -
3943 NET_IP_ALIGN;
3944 3947
3945 i = rx_ring->next_to_use; 3948 i = rx_ring->next_to_use;
3946 buffer_info = &rx_ring->buffer_info[i]; 3949 buffer_info = &rx_ring->buffer_info[i];
@@ -3952,7 +3955,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3952 goto check_page; 3955 goto check_page;
3953 } 3956 }
3954 3957
3955 skb = netdev_alloc_skb(netdev, bufsz); 3958 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
3956 if (unlikely(!skb)) { 3959 if (unlikely(!skb)) {
3957 /* Better luck next round */ 3960 /* Better luck next round */
3958 adapter->alloc_rx_buff_failed++; 3961 adapter->alloc_rx_buff_failed++;
@@ -3965,7 +3968,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3965 DPRINTK(PROBE, ERR, "skb align check failed: %u bytes " 3968 DPRINTK(PROBE, ERR, "skb align check failed: %u bytes "
3966 "at %p\n", bufsz, skb->data); 3969 "at %p\n", bufsz, skb->data);
3967 /* Try again, without freeing the previous */ 3970 /* Try again, without freeing the previous */
3968 skb = netdev_alloc_skb(netdev, bufsz); 3971 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
3969 /* Failed allocation, critical failure */ 3972 /* Failed allocation, critical failure */
3970 if (!skb) { 3973 if (!skb) {
3971 dev_kfree_skb(oldskb); 3974 dev_kfree_skb(oldskb);
@@ -3983,12 +3986,6 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3983 /* Use new allocation */ 3986 /* Use new allocation */
3984 dev_kfree_skb(oldskb); 3987 dev_kfree_skb(oldskb);
3985 } 3988 }
3986 /* Make buffer alignment 2 beyond a 16 byte boundary
3987 * this will result in a 16 byte aligned IP header after
3988 * the 14 byte MAC header is removed
3989 */
3990 skb_reserve(skb, NET_IP_ALIGN);
3991
3992 buffer_info->skb = skb; 3989 buffer_info->skb = skb;
3993 buffer_info->length = adapter->rx_buffer_len; 3990 buffer_info->length = adapter->rx_buffer_len;
3994check_page: 3991check_page:
@@ -4001,11 +3998,21 @@ check_page:
4001 } 3998 }
4002 } 3999 }
4003 4000
4004 if (!buffer_info->dma) 4001 if (!buffer_info->dma) {
4005 buffer_info->dma = pci_map_page(pdev, 4002 buffer_info->dma = pci_map_page(pdev,
4006 buffer_info->page, 0, 4003 buffer_info->page, 0,
4007 buffer_info->length, 4004 buffer_info->length,
4008 PCI_DMA_FROMDEVICE); 4005 PCI_DMA_FROMDEVICE);
4006 if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
4007 put_page(buffer_info->page);
4008 dev_kfree_skb(skb);
4009 buffer_info->page = NULL;
4010 buffer_info->skb = NULL;
4011 buffer_info->dma = 0;
4012 adapter->alloc_rx_buff_failed++;
4013 break; /* while !buffer_info->skb */
4014 }
4015 }
4009 4016
4010 rx_desc = E1000_RX_DESC(*rx_ring, i); 4017 rx_desc = E1000_RX_DESC(*rx_ring, i);
4011 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4018 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -4045,7 +4052,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4045 struct e1000_buffer *buffer_info; 4052 struct e1000_buffer *buffer_info;
4046 struct sk_buff *skb; 4053 struct sk_buff *skb;
4047 unsigned int i; 4054 unsigned int i;
4048 unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; 4055 unsigned int bufsz = adapter->rx_buffer_len;
4049 4056
4050 i = rx_ring->next_to_use; 4057 i = rx_ring->next_to_use;
4051 buffer_info = &rx_ring->buffer_info[i]; 4058 buffer_info = &rx_ring->buffer_info[i];
@@ -4057,7 +4064,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4057 goto map_skb; 4064 goto map_skb;
4058 } 4065 }
4059 4066
4060 skb = netdev_alloc_skb(netdev, bufsz); 4067 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4061 if (unlikely(!skb)) { 4068 if (unlikely(!skb)) {
4062 /* Better luck next round */ 4069 /* Better luck next round */
4063 adapter->alloc_rx_buff_failed++; 4070 adapter->alloc_rx_buff_failed++;
@@ -4070,7 +4077,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4070 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " 4077 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
4071 "at %p\n", bufsz, skb->data); 4078 "at %p\n", bufsz, skb->data);
4072 /* Try again, without freeing the previous */ 4079 /* Try again, without freeing the previous */
4073 skb = netdev_alloc_skb(netdev, bufsz); 4080 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4074 /* Failed allocation, critical failure */ 4081 /* Failed allocation, critical failure */
4075 if (!skb) { 4082 if (!skb) {
4076 dev_kfree_skb(oldskb); 4083 dev_kfree_skb(oldskb);
@@ -4089,12 +4096,6 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4089 /* Use new allocation */ 4096 /* Use new allocation */
4090 dev_kfree_skb(oldskb); 4097 dev_kfree_skb(oldskb);
4091 } 4098 }
4092 /* Make buffer alignment 2 beyond a 16 byte boundary
4093 * this will result in a 16 byte aligned IP header after
4094 * the 14 byte MAC header is removed
4095 */
4096 skb_reserve(skb, NET_IP_ALIGN);
4097
4098 buffer_info->skb = skb; 4099 buffer_info->skb = skb;
4099 buffer_info->length = adapter->rx_buffer_len; 4100 buffer_info->length = adapter->rx_buffer_len;
4100map_skb: 4101map_skb:
@@ -4102,6 +4103,13 @@ map_skb:
4102 skb->data, 4103 skb->data,
4103 buffer_info->length, 4104 buffer_info->length,
4104 PCI_DMA_FROMDEVICE); 4105 PCI_DMA_FROMDEVICE);
4106 if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
4107 dev_kfree_skb(skb);
4108 buffer_info->skb = NULL;
4109 buffer_info->dma = 0;
4110 adapter->alloc_rx_buff_failed++;
4111 break; /* while !buffer_info->skb */
4112 }
4105 4113
4106 /* 4114 /*
4107 * XXX if it was allocated cleanly it will never map to a 4115 * XXX if it was allocated cleanly it will never map to a
@@ -4597,6 +4605,7 @@ static int e1000_resume(struct pci_dev *pdev)
4597 4605
4598 pci_set_power_state(pdev, PCI_D0); 4606 pci_set_power_state(pdev, PCI_D0);
4599 pci_restore_state(pdev); 4607 pci_restore_state(pdev);
4608 pci_save_state(pdev);
4600 4609
4601 if (adapter->need_ioport) 4610 if (adapter->need_ioport)
4602 err = pci_enable_device(pdev); 4611 err = pci_enable_device(pdev);