aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000/e1000_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r--drivers/net/e1000/e1000_main.c429
1 files changed, 225 insertions, 204 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index b15ece26ed84..ebdea0891665 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
31 31
32char e1000_driver_name[] = "e1000"; 32char e1000_driver_name[] = "e1000";
33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
34#define DRV_VERSION "7.3.21-k5-NAPI" 34#define DRV_VERSION "7.3.21-k6-NAPI"
35const char e1000_driver_version[] = DRV_VERSION; 35const char e1000_driver_version[] = DRV_VERSION;
36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
37 37
@@ -214,6 +214,17 @@ module_param(debug, int, 0);
214MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 214MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
215 215
216/** 216/**
217 * e1000_get_hw_dev - return device
218 * used by hardware layer to print debugging information
219 *
220 **/
221struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
222{
223 struct e1000_adapter *adapter = hw->back;
224 return adapter->netdev;
225}
226
227/**
217 * e1000_init_module - Driver Registration Routine 228 * e1000_init_module - Driver Registration Routine
218 * 229 *
219 * e1000_init_module is the first routine called when the driver is 230 * e1000_init_module is the first routine called when the driver is
@@ -223,18 +234,17 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
223static int __init e1000_init_module(void) 234static int __init e1000_init_module(void)
224{ 235{
225 int ret; 236 int ret;
226 printk(KERN_INFO "%s - version %s\n", 237 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
227 e1000_driver_string, e1000_driver_version);
228 238
229 printk(KERN_INFO "%s\n", e1000_copyright); 239 pr_info("%s\n", e1000_copyright);
230 240
231 ret = pci_register_driver(&e1000_driver); 241 ret = pci_register_driver(&e1000_driver);
232 if (copybreak != COPYBREAK_DEFAULT) { 242 if (copybreak != COPYBREAK_DEFAULT) {
233 if (copybreak == 0) 243 if (copybreak == 0)
234 printk(KERN_INFO "e1000: copybreak disabled\n"); 244 pr_info("copybreak disabled\n");
235 else 245 else
236 printk(KERN_INFO "e1000: copybreak enabled for " 246 pr_info("copybreak enabled for "
237 "packets <= %u bytes\n", copybreak); 247 "packets <= %u bytes\n", copybreak);
238 } 248 }
239 return ret; 249 return ret;
240} 250}
@@ -265,8 +275,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
265 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 275 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
266 netdev); 276 netdev);
267 if (err) { 277 if (err) {
268 DPRINTK(PROBE, ERR, 278 e_err("Unable to allocate interrupt Error: %d\n", err);
269 "Unable to allocate interrupt Error: %d\n", err);
270 } 279 }
271 280
272 return err; 281 return err;
@@ -648,7 +657,7 @@ void e1000_reset(struct e1000_adapter *adapter)
648 ew32(WUC, 0); 657 ew32(WUC, 0);
649 658
650 if (e1000_init_hw(hw)) 659 if (e1000_init_hw(hw))
651 DPRINTK(PROBE, ERR, "Hardware Error\n"); 660 e_err("Hardware Error\n");
652 e1000_update_mng_vlan(adapter); 661 e1000_update_mng_vlan(adapter);
653 662
654 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ 663 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
@@ -689,8 +698,7 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter)
689 698
690 data = kmalloc(eeprom.len, GFP_KERNEL); 699 data = kmalloc(eeprom.len, GFP_KERNEL);
691 if (!data) { 700 if (!data) {
692 printk(KERN_ERR "Unable to allocate memory to dump EEPROM" 701 pr_err("Unable to allocate memory to dump EEPROM data\n");
693 " data\n");
694 return; 702 return;
695 } 703 }
696 704
@@ -702,30 +710,25 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter)
702 csum_new += data[i] + (data[i + 1] << 8); 710 csum_new += data[i] + (data[i + 1] << 8);
703 csum_new = EEPROM_SUM - csum_new; 711 csum_new = EEPROM_SUM - csum_new;
704 712
705 printk(KERN_ERR "/*********************/\n"); 713 pr_err("/*********************/\n");
706 printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old); 714 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
707 printk(KERN_ERR "Calculated : 0x%04x\n", csum_new); 715 pr_err("Calculated : 0x%04x\n", csum_new);
708 716
709 printk(KERN_ERR "Offset Values\n"); 717 pr_err("Offset Values\n");
710 printk(KERN_ERR "======== ======\n"); 718 pr_err("======== ======\n");
711 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); 719 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
712 720
713 printk(KERN_ERR "Include this output when contacting your support " 721 pr_err("Include this output when contacting your support provider.\n");
714 "provider.\n"); 722 pr_err("This is not a software error! Something bad happened to\n");
715 printk(KERN_ERR "This is not a software error! Something bad " 723 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
716 "happened to your hardware or\n"); 724 pr_err("result in further problems, possibly loss of data,\n");
717 printk(KERN_ERR "EEPROM image. Ignoring this " 725 pr_err("corruption or system hangs!\n");
718 "problem could result in further problems,\n"); 726 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
719 printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n"); 727 pr_err("which is invalid and requires you to set the proper MAC\n");
720 printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, " 728 pr_err("address manually before continuing to enable this network\n");
721 "which is invalid\n"); 729 pr_err("device. Please inspect the EEPROM dump and report the\n");
722 printk(KERN_ERR "and requires you to set the proper MAC " 730 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
723 "address manually before continuing\n"); 731 pr_err("/*********************/\n");
724 printk(KERN_ERR "to enable this network device.\n");
725 printk(KERN_ERR "Please inspect the EEPROM dump and report the issue "
726 "to your hardware vendor\n");
727 printk(KERN_ERR "or Intel Customer Support.\n");
728 printk(KERN_ERR "/*********************/\n");
729 732
730 kfree(data); 733 kfree(data);
731} 734}
@@ -823,16 +826,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
823 if (err) 826 if (err)
824 return err; 827 return err;
825 828
826 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 829 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
827 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 830 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
828 pci_using_dac = 1; 831 pci_using_dac = 1;
829 } else { 832 } else {
830 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 833 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
831 if (err) { 834 if (err) {
832 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 835 err = dma_set_coherent_mask(&pdev->dev,
836 DMA_BIT_MASK(32));
833 if (err) { 837 if (err) {
834 E1000_ERR("No usable DMA configuration, " 838 pr_err("No usable DMA config, aborting\n");
835 "aborting\n");
836 goto err_dma; 839 goto err_dma;
837 } 840 }
838 } 841 }
@@ -922,7 +925,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
922 925
923 /* initialize eeprom parameters */ 926 /* initialize eeprom parameters */
924 if (e1000_init_eeprom_params(hw)) { 927 if (e1000_init_eeprom_params(hw)) {
925 E1000_ERR("EEPROM initialization failed\n"); 928 e_err("EEPROM initialization failed\n");
926 goto err_eeprom; 929 goto err_eeprom;
927 } 930 }
928 931
@@ -933,7 +936,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
933 936
934 /* make sure the EEPROM is good */ 937 /* make sure the EEPROM is good */
935 if (e1000_validate_eeprom_checksum(hw) < 0) { 938 if (e1000_validate_eeprom_checksum(hw) < 0) {
936 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); 939 e_err("The EEPROM Checksum Is Not Valid\n");
937 e1000_dump_eeprom(adapter); 940 e1000_dump_eeprom(adapter);
938 /* 941 /*
939 * set MAC address to all zeroes to invalidate and temporary 942 * set MAC address to all zeroes to invalidate and temporary
@@ -947,14 +950,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
947 } else { 950 } else {
948 /* copy the MAC address out of the EEPROM */ 951 /* copy the MAC address out of the EEPROM */
949 if (e1000_read_mac_addr(hw)) 952 if (e1000_read_mac_addr(hw))
950 DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); 953 e_err("EEPROM Read Error\n");
951 } 954 }
952 /* don't block initalization here due to bad MAC address */ 955 /* don't block initalization here due to bad MAC address */
953 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); 956 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
954 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); 957 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
955 958
956 if (!is_valid_ether_addr(netdev->perm_addr)) 959 if (!is_valid_ether_addr(netdev->perm_addr))
957 DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); 960 e_err("Invalid MAC Address\n");
958 961
959 e1000_get_bus_info(hw); 962 e1000_get_bus_info(hw);
960 963
@@ -1035,8 +1038,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1035 adapter->wol = adapter->eeprom_wol; 1038 adapter->wol = adapter->eeprom_wol;
1036 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1039 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1037 1040
1041 /* reset the hardware with the new settings */
1042 e1000_reset(adapter);
1043
1044 strcpy(netdev->name, "eth%d");
1045 err = register_netdev(netdev);
1046 if (err)
1047 goto err_register;
1048
1038 /* print bus type/speed/width info */ 1049 /* print bus type/speed/width info */
1039 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", 1050 e_info("(PCI%s:%s:%s) ",
1040 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), 1051 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1041 ((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" : 1052 ((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
1042 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" : 1053 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
@@ -1044,20 +1055,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1044 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"), 1055 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
1045 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit")); 1056 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit"));
1046 1057
1047 printk("%pM\n", netdev->dev_addr); 1058 e_info("%pM\n", netdev->dev_addr);
1048
1049 /* reset the hardware with the new settings */
1050 e1000_reset(adapter);
1051
1052 strcpy(netdev->name, "eth%d");
1053 err = register_netdev(netdev);
1054 if (err)
1055 goto err_register;
1056 1059
1057 /* carrier off reporting is important to ethtool even BEFORE open */ 1060 /* carrier off reporting is important to ethtool even BEFORE open */
1058 netif_carrier_off(netdev); 1061 netif_carrier_off(netdev);
1059 1062
1060 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); 1063 e_info("Intel(R) PRO/1000 Network Connection\n");
1061 1064
1062 cards_found++; 1065 cards_found++;
1063 return 0; 1066 return 0;
@@ -1157,7 +1160,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1157 /* identify the MAC */ 1160 /* identify the MAC */
1158 1161
1159 if (e1000_set_mac_type(hw)) { 1162 if (e1000_set_mac_type(hw)) {
1160 DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); 1163 e_err("Unknown MAC Type\n");
1161 return -EIO; 1164 return -EIO;
1162 } 1165 }
1163 1166
@@ -1190,7 +1193,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1190 adapter->num_rx_queues = 1; 1193 adapter->num_rx_queues = 1;
1191 1194
1192 if (e1000_alloc_queues(adapter)) { 1195 if (e1000_alloc_queues(adapter)) {
1193 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); 1196 e_err("Unable to allocate memory for queues\n");
1194 return -ENOMEM; 1197 return -ENOMEM;
1195 } 1198 }
1196 1199
@@ -1384,8 +1387,7 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1384 size = sizeof(struct e1000_buffer) * txdr->count; 1387 size = sizeof(struct e1000_buffer) * txdr->count;
1385 txdr->buffer_info = vmalloc(size); 1388 txdr->buffer_info = vmalloc(size);
1386 if (!txdr->buffer_info) { 1389 if (!txdr->buffer_info) {
1387 DPRINTK(PROBE, ERR, 1390 e_err("Unable to allocate memory for the Tx descriptor ring\n");
1388 "Unable to allocate memory for the transmit descriptor ring\n");
1389 return -ENOMEM; 1391 return -ENOMEM;
1390 } 1392 }
1391 memset(txdr->buffer_info, 0, size); 1393 memset(txdr->buffer_info, 0, size);
@@ -1395,12 +1397,12 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1395 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1397 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1396 txdr->size = ALIGN(txdr->size, 4096); 1398 txdr->size = ALIGN(txdr->size, 4096);
1397 1399
1398 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1400 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1401 GFP_KERNEL);
1399 if (!txdr->desc) { 1402 if (!txdr->desc) {
1400setup_tx_desc_die: 1403setup_tx_desc_die:
1401 vfree(txdr->buffer_info); 1404 vfree(txdr->buffer_info);
1402 DPRINTK(PROBE, ERR, 1405 e_err("Unable to allocate memory for the Tx descriptor ring\n");
1403 "Unable to allocate memory for the transmit descriptor ring\n");
1404 return -ENOMEM; 1406 return -ENOMEM;
1405 } 1407 }
1406 1408
@@ -1408,29 +1410,32 @@ setup_tx_desc_die:
1408 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1410 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1409 void *olddesc = txdr->desc; 1411 void *olddesc = txdr->desc;
1410 dma_addr_t olddma = txdr->dma; 1412 dma_addr_t olddma = txdr->dma;
1411 DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes " 1413 e_err("txdr align check failed: %u bytes at %p\n",
1412 "at %p\n", txdr->size, txdr->desc); 1414 txdr->size, txdr->desc);
1413 /* Try again, without freeing the previous */ 1415 /* Try again, without freeing the previous */
1414 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1416 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1417 &txdr->dma, GFP_KERNEL);
1415 /* Failed allocation, critical failure */ 1418 /* Failed allocation, critical failure */
1416 if (!txdr->desc) { 1419 if (!txdr->desc) {
1417 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1420 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1421 olddma);
1418 goto setup_tx_desc_die; 1422 goto setup_tx_desc_die;
1419 } 1423 }
1420 1424
1421 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1425 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1422 /* give up */ 1426 /* give up */
1423 pci_free_consistent(pdev, txdr->size, txdr->desc, 1427 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1424 txdr->dma); 1428 txdr->dma);
1425 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1429 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1426 DPRINTK(PROBE, ERR, 1430 olddma);
1427 "Unable to allocate aligned memory " 1431 e_err("Unable to allocate aligned memory "
1428 "for the transmit descriptor ring\n"); 1432 "for the transmit descriptor ring\n");
1429 vfree(txdr->buffer_info); 1433 vfree(txdr->buffer_info);
1430 return -ENOMEM; 1434 return -ENOMEM;
1431 } else { 1435 } else {
1432 /* Free old allocation, new allocation was successful */ 1436 /* Free old allocation, new allocation was successful */
1433 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1437 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1438 olddma);
1434 } 1439 }
1435 } 1440 }
1436 memset(txdr->desc, 0, txdr->size); 1441 memset(txdr->desc, 0, txdr->size);
@@ -1456,8 +1461,7 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1456 for (i = 0; i < adapter->num_tx_queues; i++) { 1461 for (i = 0; i < adapter->num_tx_queues; i++) {
1457 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1462 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1458 if (err) { 1463 if (err) {
1459 DPRINTK(PROBE, ERR, 1464 e_err("Allocation for Tx Queue %u failed\n", i);
1460 "Allocation for Tx Queue %u failed\n", i);
1461 for (i-- ; i >= 0; i--) 1465 for (i-- ; i >= 0; i--)
1462 e1000_free_tx_resources(adapter, 1466 e1000_free_tx_resources(adapter,
1463 &adapter->tx_ring[i]); 1467 &adapter->tx_ring[i]);
@@ -1577,8 +1581,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1577 size = sizeof(struct e1000_buffer) * rxdr->count; 1581 size = sizeof(struct e1000_buffer) * rxdr->count;
1578 rxdr->buffer_info = vmalloc(size); 1582 rxdr->buffer_info = vmalloc(size);
1579 if (!rxdr->buffer_info) { 1583 if (!rxdr->buffer_info) {
1580 DPRINTK(PROBE, ERR, 1584 e_err("Unable to allocate memory for the Rx descriptor ring\n");
1581 "Unable to allocate memory for the receive descriptor ring\n");
1582 return -ENOMEM; 1585 return -ENOMEM;
1583 } 1586 }
1584 memset(rxdr->buffer_info, 0, size); 1587 memset(rxdr->buffer_info, 0, size);
@@ -1590,11 +1593,11 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1590 rxdr->size = rxdr->count * desc_len; 1593 rxdr->size = rxdr->count * desc_len;
1591 rxdr->size = ALIGN(rxdr->size, 4096); 1594 rxdr->size = ALIGN(rxdr->size, 4096);
1592 1595
1593 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1596 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1597 GFP_KERNEL);
1594 1598
1595 if (!rxdr->desc) { 1599 if (!rxdr->desc) {
1596 DPRINTK(PROBE, ERR, 1600 e_err("Unable to allocate memory for the Rx descriptor ring\n");
1597 "Unable to allocate memory for the receive descriptor ring\n");
1598setup_rx_desc_die: 1601setup_rx_desc_die:
1599 vfree(rxdr->buffer_info); 1602 vfree(rxdr->buffer_info);
1600 return -ENOMEM; 1603 return -ENOMEM;
@@ -1604,31 +1607,33 @@ setup_rx_desc_die:
1604 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1607 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1605 void *olddesc = rxdr->desc; 1608 void *olddesc = rxdr->desc;
1606 dma_addr_t olddma = rxdr->dma; 1609 dma_addr_t olddma = rxdr->dma;
1607 DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes " 1610 e_err("rxdr align check failed: %u bytes at %p\n",
1608 "at %p\n", rxdr->size, rxdr->desc); 1611 rxdr->size, rxdr->desc);
1609 /* Try again, without freeing the previous */ 1612 /* Try again, without freeing the previous */
1610 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1613 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1614 &rxdr->dma, GFP_KERNEL);
1611 /* Failed allocation, critical failure */ 1615 /* Failed allocation, critical failure */
1612 if (!rxdr->desc) { 1616 if (!rxdr->desc) {
1613 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1617 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1614 DPRINTK(PROBE, ERR, 1618 olddma);
1615 "Unable to allocate memory " 1619 e_err("Unable to allocate memory for the Rx descriptor "
1616 "for the receive descriptor ring\n"); 1620 "ring\n");
1617 goto setup_rx_desc_die; 1621 goto setup_rx_desc_die;
1618 } 1622 }
1619 1623
1620 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1624 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1621 /* give up */ 1625 /* give up */
1622 pci_free_consistent(pdev, rxdr->size, rxdr->desc, 1626 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1623 rxdr->dma); 1627 rxdr->dma);
1624 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1628 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1625 DPRINTK(PROBE, ERR, 1629 olddma);
1626 "Unable to allocate aligned memory " 1630 e_err("Unable to allocate aligned memory for the Rx "
1627 "for the receive descriptor ring\n"); 1631 "descriptor ring\n");
1628 goto setup_rx_desc_die; 1632 goto setup_rx_desc_die;
1629 } else { 1633 } else {
1630 /* Free old allocation, new allocation was successful */ 1634 /* Free old allocation, new allocation was successful */
1631 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1635 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1636 olddma);
1632 } 1637 }
1633 } 1638 }
1634 memset(rxdr->desc, 0, rxdr->size); 1639 memset(rxdr->desc, 0, rxdr->size);
@@ -1655,8 +1660,7 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1655 for (i = 0; i < adapter->num_rx_queues; i++) { 1660 for (i = 0; i < adapter->num_rx_queues; i++) {
1656 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); 1661 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1657 if (err) { 1662 if (err) {
1658 DPRINTK(PROBE, ERR, 1663 e_err("Allocation for Rx Queue %u failed\n", i);
1659 "Allocation for Rx Queue %u failed\n", i);
1660 for (i-- ; i >= 0; i--) 1664 for (i-- ; i >= 0; i--)
1661 e1000_free_rx_resources(adapter, 1665 e1000_free_rx_resources(adapter,
1662 &adapter->rx_ring[i]); 1666 &adapter->rx_ring[i]);
@@ -1804,7 +1808,8 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1804 vfree(tx_ring->buffer_info); 1808 vfree(tx_ring->buffer_info);
1805 tx_ring->buffer_info = NULL; 1809 tx_ring->buffer_info = NULL;
1806 1810
1807 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 1811 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1812 tx_ring->dma);
1808 1813
1809 tx_ring->desc = NULL; 1814 tx_ring->desc = NULL;
1810} 1815}
@@ -1829,12 +1834,12 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1829{ 1834{
1830 if (buffer_info->dma) { 1835 if (buffer_info->dma) {
1831 if (buffer_info->mapped_as_page) 1836 if (buffer_info->mapped_as_page)
1832 pci_unmap_page(adapter->pdev, buffer_info->dma, 1837 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1833 buffer_info->length, PCI_DMA_TODEVICE); 1838 buffer_info->length, DMA_TO_DEVICE);
1834 else 1839 else
1835 pci_unmap_single(adapter->pdev, buffer_info->dma, 1840 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1836 buffer_info->length, 1841 buffer_info->length,
1837 PCI_DMA_TODEVICE); 1842 DMA_TO_DEVICE);
1838 buffer_info->dma = 0; 1843 buffer_info->dma = 0;
1839 } 1844 }
1840 if (buffer_info->skb) { 1845 if (buffer_info->skb) {
@@ -1912,7 +1917,8 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
1912 vfree(rx_ring->buffer_info); 1917 vfree(rx_ring->buffer_info);
1913 rx_ring->buffer_info = NULL; 1918 rx_ring->buffer_info = NULL;
1914 1919
1915 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 1920 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1921 rx_ring->dma);
1916 1922
1917 rx_ring->desc = NULL; 1923 rx_ring->desc = NULL;
1918} 1924}
@@ -1952,14 +1958,14 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
1952 buffer_info = &rx_ring->buffer_info[i]; 1958 buffer_info = &rx_ring->buffer_info[i];
1953 if (buffer_info->dma && 1959 if (buffer_info->dma &&
1954 adapter->clean_rx == e1000_clean_rx_irq) { 1960 adapter->clean_rx == e1000_clean_rx_irq) {
1955 pci_unmap_single(pdev, buffer_info->dma, 1961 dma_unmap_single(&pdev->dev, buffer_info->dma,
1956 buffer_info->length, 1962 buffer_info->length,
1957 PCI_DMA_FROMDEVICE); 1963 DMA_FROM_DEVICE);
1958 } else if (buffer_info->dma && 1964 } else if (buffer_info->dma &&
1959 adapter->clean_rx == e1000_clean_jumbo_rx_irq) { 1965 adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
1960 pci_unmap_page(pdev, buffer_info->dma, 1966 dma_unmap_page(&pdev->dev, buffer_info->dma,
1961 buffer_info->length, 1967 buffer_info->length,
1962 PCI_DMA_FROMDEVICE); 1968 DMA_FROM_DEVICE);
1963 } 1969 }
1964 1970
1965 buffer_info->dma = 0; 1971 buffer_info->dma = 0;
@@ -2098,7 +2104,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2098 struct e1000_hw *hw = &adapter->hw; 2104 struct e1000_hw *hw = &adapter->hw;
2099 struct netdev_hw_addr *ha; 2105 struct netdev_hw_addr *ha;
2100 bool use_uc = false; 2106 bool use_uc = false;
2101 struct dev_addr_list *mc_ptr;
2102 u32 rctl; 2107 u32 rctl;
2103 u32 hash_value; 2108 u32 hash_value;
2104 int i, rar_entries = E1000_RAR_ENTRIES; 2109 int i, rar_entries = E1000_RAR_ENTRIES;
@@ -2106,7 +2111,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2106 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); 2111 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2107 2112
2108 if (!mcarray) { 2113 if (!mcarray) {
2109 DPRINTK(PROBE, ERR, "memory allocation failed\n"); 2114 e_err("memory allocation failed\n");
2110 return; 2115 return;
2111 } 2116 }
2112 2117
@@ -2156,19 +2161,17 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2156 e1000_rar_set(hw, ha->addr, i++); 2161 e1000_rar_set(hw, ha->addr, i++);
2157 } 2162 }
2158 2163
2159 WARN_ON(i == rar_entries); 2164 netdev_for_each_mc_addr(ha, netdev) {
2160
2161 netdev_for_each_mc_addr(mc_ptr, netdev) {
2162 if (i == rar_entries) { 2165 if (i == rar_entries) {
2163 /* load any remaining addresses into the hash table */ 2166 /* load any remaining addresses into the hash table */
2164 u32 hash_reg, hash_bit, mta; 2167 u32 hash_reg, hash_bit, mta;
2165 hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr); 2168 hash_value = e1000_hash_mc_addr(hw, ha->addr);
2166 hash_reg = (hash_value >> 5) & 0x7F; 2169 hash_reg = (hash_value >> 5) & 0x7F;
2167 hash_bit = hash_value & 0x1F; 2170 hash_bit = hash_value & 0x1F;
2168 mta = (1 << hash_bit); 2171 mta = (1 << hash_bit);
2169 mcarray[hash_reg] |= mta; 2172 mcarray[hash_reg] |= mta;
2170 } else { 2173 } else {
2171 e1000_rar_set(hw, mc_ptr->da_addr, i++); 2174 e1000_rar_set(hw, ha->addr, i++);
2172 } 2175 }
2173 } 2176 }
2174 2177
@@ -2302,16 +2305,16 @@ static void e1000_watchdog(unsigned long data)
2302 &adapter->link_duplex); 2305 &adapter->link_duplex);
2303 2306
2304 ctrl = er32(CTRL); 2307 ctrl = er32(CTRL);
2305 printk(KERN_INFO "e1000: %s NIC Link is Up %d Mbps %s, " 2308 pr_info("%s NIC Link is Up %d Mbps %s, "
2306 "Flow Control: %s\n", 2309 "Flow Control: %s\n",
2307 netdev->name, 2310 netdev->name,
2308 adapter->link_speed, 2311 adapter->link_speed,
2309 adapter->link_duplex == FULL_DUPLEX ? 2312 adapter->link_duplex == FULL_DUPLEX ?
2310 "Full Duplex" : "Half Duplex", 2313 "Full Duplex" : "Half Duplex",
2311 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 2314 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2312 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 2315 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2313 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2316 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2314 E1000_CTRL_TFCE) ? "TX" : "None" ))); 2317 E1000_CTRL_TFCE) ? "TX" : "None")));
2315 2318
2316 /* adjust timeout factor according to speed/duplex */ 2319 /* adjust timeout factor according to speed/duplex */
2317 adapter->tx_timeout_factor = 1; 2320 adapter->tx_timeout_factor = 1;
@@ -2341,8 +2344,8 @@ static void e1000_watchdog(unsigned long data)
2341 if (netif_carrier_ok(netdev)) { 2344 if (netif_carrier_ok(netdev)) {
2342 adapter->link_speed = 0; 2345 adapter->link_speed = 0;
2343 adapter->link_duplex = 0; 2346 adapter->link_duplex = 0;
2344 printk(KERN_INFO "e1000: %s NIC Link is Down\n", 2347 pr_info("%s NIC Link is Down\n",
2345 netdev->name); 2348 netdev->name);
2346 netif_carrier_off(netdev); 2349 netif_carrier_off(netdev);
2347 2350
2348 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2351 if (!test_bit(__E1000_DOWN, &adapter->flags))
@@ -2381,6 +2384,22 @@ link_up:
2381 } 2384 }
2382 } 2385 }
2383 2386
2387 /* Simple mode for Interrupt Throttle Rate (ITR) */
2388 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2389 /*
2390 * Symmetric Tx/Rx gets a reduced ITR=2000;
2391 * Total asymmetrical Tx or Rx gets ITR=8000;
2392 * everyone else is between 2000-8000.
2393 */
2394 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2395 u32 dif = (adapter->gotcl > adapter->gorcl ?
2396 adapter->gotcl - adapter->gorcl :
2397 adapter->gorcl - adapter->gotcl) / 10000;
2398 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2399
2400 ew32(ITR, 1000000000 / (itr * 256));
2401 }
2402
2384 /* Cause software interrupt to ensure rx ring is cleaned */ 2403 /* Cause software interrupt to ensure rx ring is cleaned */
2385 ew32(ICS, E1000_ICS_RXDMT0); 2404 ew32(ICS, E1000_ICS_RXDMT0);
2386 2405
@@ -2525,8 +2544,6 @@ set_itr_now:
2525 adapter->itr = new_itr; 2544 adapter->itr = new_itr;
2526 ew32(ITR, 1000000000 / (new_itr * 256)); 2545 ew32(ITR, 1000000000 / (new_itr * 256));
2527 } 2546 }
2528
2529 return;
2530} 2547}
2531 2548
2532#define E1000_TX_FLAGS_CSUM 0x00000001 2549#define E1000_TX_FLAGS_CSUM 0x00000001
@@ -2632,8 +2649,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
2632 break; 2649 break;
2633 default: 2650 default:
2634 if (unlikely(net_ratelimit())) 2651 if (unlikely(net_ratelimit()))
2635 DPRINTK(DRV, WARNING, 2652 e_warn("checksum_partial proto=%x!\n", skb->protocol);
2636 "checksum_partial proto=%x!\n", skb->protocol);
2637 break; 2653 break;
2638 } 2654 }
2639 2655
@@ -2715,9 +2731,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2715 /* set time_stamp *before* dma to help avoid a possible race */ 2731 /* set time_stamp *before* dma to help avoid a possible race */
2716 buffer_info->time_stamp = jiffies; 2732 buffer_info->time_stamp = jiffies;
2717 buffer_info->mapped_as_page = false; 2733 buffer_info->mapped_as_page = false;
2718 buffer_info->dma = pci_map_single(pdev, skb->data + offset, 2734 buffer_info->dma = dma_map_single(&pdev->dev,
2719 size, PCI_DMA_TODEVICE); 2735 skb->data + offset,
2720 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2736 size, DMA_TO_DEVICE);
2737 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2721 goto dma_error; 2738 goto dma_error;
2722 buffer_info->next_to_watch = i; 2739 buffer_info->next_to_watch = i;
2723 2740
@@ -2761,10 +2778,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2761 buffer_info->length = size; 2778 buffer_info->length = size;
2762 buffer_info->time_stamp = jiffies; 2779 buffer_info->time_stamp = jiffies;
2763 buffer_info->mapped_as_page = true; 2780 buffer_info->mapped_as_page = true;
2764 buffer_info->dma = pci_map_page(pdev, frag->page, 2781 buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
2765 offset, size, 2782 offset, size,
2766 PCI_DMA_TODEVICE); 2783 DMA_TO_DEVICE);
2767 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2784 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2768 goto dma_error; 2785 goto dma_error;
2769 buffer_info->next_to_watch = i; 2786 buffer_info->next_to_watch = i;
2770 2787
@@ -2930,7 +2947,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
2930 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; 2947 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
2931 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 2948 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
2932 unsigned int tx_flags = 0; 2949 unsigned int tx_flags = 0;
2933 unsigned int len = skb->len - skb->data_len; 2950 unsigned int len = skb_headlen(skb);
2934 unsigned int nr_frags; 2951 unsigned int nr_frags;
2935 unsigned int mss; 2952 unsigned int mss;
2936 int count = 0; 2953 int count = 0;
@@ -2976,12 +2993,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
2976 /* fall through */ 2993 /* fall through */
2977 pull_size = min((unsigned int)4, skb->data_len); 2994 pull_size = min((unsigned int)4, skb->data_len);
2978 if (!__pskb_pull_tail(skb, pull_size)) { 2995 if (!__pskb_pull_tail(skb, pull_size)) {
2979 DPRINTK(DRV, ERR, 2996 e_err("__pskb_pull_tail failed.\n");
2980 "__pskb_pull_tail failed.\n");
2981 dev_kfree_skb_any(skb); 2997 dev_kfree_skb_any(skb);
2982 return NETDEV_TX_OK; 2998 return NETDEV_TX_OK;
2983 } 2999 }
2984 len = skb->len - skb->data_len; 3000 len = skb_headlen(skb);
2985 break; 3001 break;
2986 default: 3002 default:
2987 /* do nothing */ 3003 /* do nothing */
@@ -3125,7 +3141,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3125 3141
3126 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3142 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3127 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3143 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3128 DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); 3144 e_err("Invalid MTU setting\n");
3129 return -EINVAL; 3145 return -EINVAL;
3130 } 3146 }
3131 3147
@@ -3133,7 +3149,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3133 switch (hw->mac_type) { 3149 switch (hw->mac_type) {
3134 case e1000_undefined ... e1000_82542_rev2_1: 3150 case e1000_undefined ... e1000_82542_rev2_1:
3135 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 3151 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3136 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); 3152 e_err("Jumbo Frames not supported.\n");
3137 return -EINVAL; 3153 return -EINVAL;
3138 } 3154 }
3139 break; 3155 break;
@@ -3171,8 +3187,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3171 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3187 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3172 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 3188 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3173 3189
3174 printk(KERN_INFO "e1000: %s changing MTU from %d to %d\n", 3190 pr_info("%s changing MTU from %d to %d\n",
3175 netdev->name, netdev->mtu, new_mtu); 3191 netdev->name, netdev->mtu, new_mtu);
3176 netdev->mtu = new_mtu; 3192 netdev->mtu = new_mtu;
3177 3193
3178 if (netif_running(netdev)) 3194 if (netif_running(netdev))
@@ -3485,17 +3501,17 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3485 !(er32(STATUS) & E1000_STATUS_TXOFF)) { 3501 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3486 3502
3487 /* detected Tx unit hang */ 3503 /* detected Tx unit hang */
3488 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" 3504 e_err("Detected Tx Unit Hang\n"
3489 " Tx Queue <%lu>\n" 3505 " Tx Queue <%lu>\n"
3490 " TDH <%x>\n" 3506 " TDH <%x>\n"
3491 " TDT <%x>\n" 3507 " TDT <%x>\n"
3492 " next_to_use <%x>\n" 3508 " next_to_use <%x>\n"
3493 " next_to_clean <%x>\n" 3509 " next_to_clean <%x>\n"
3494 "buffer_info[next_to_clean]\n" 3510 "buffer_info[next_to_clean]\n"
3495 " time_stamp <%lx>\n" 3511 " time_stamp <%lx>\n"
3496 " next_to_watch <%x>\n" 3512 " next_to_watch <%x>\n"
3497 " jiffies <%lx>\n" 3513 " jiffies <%lx>\n"
3498 " next_to_watch.status <%x>\n", 3514 " next_to_watch.status <%x>\n",
3499 (unsigned long)((tx_ring - adapter->tx_ring) / 3515 (unsigned long)((tx_ring - adapter->tx_ring) /
3500 sizeof(struct e1000_tx_ring)), 3516 sizeof(struct e1000_tx_ring)),
3501 readl(hw->hw_addr + tx_ring->tdh), 3517 readl(hw->hw_addr + tx_ring->tdh),
@@ -3635,8 +3651,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
3635 3651
3636 cleaned = true; 3652 cleaned = true;
3637 cleaned_count++; 3653 cleaned_count++;
3638 pci_unmap_page(pdev, buffer_info->dma, buffer_info->length, 3654 dma_unmap_page(&pdev->dev, buffer_info->dma,
3639 PCI_DMA_FROMDEVICE); 3655 buffer_info->length, DMA_FROM_DEVICE);
3640 buffer_info->dma = 0; 3656 buffer_info->dma = 0;
3641 3657
3642 length = le16_to_cpu(rx_desc->length); 3658 length = le16_to_cpu(rx_desc->length);
@@ -3734,7 +3750,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
3734 3750
3735 /* eth type trans needs skb->data to point to something */ 3751 /* eth type trans needs skb->data to point to something */
3736 if (!pskb_may_pull(skb, ETH_HLEN)) { 3752 if (!pskb_may_pull(skb, ETH_HLEN)) {
3737 DPRINTK(DRV, ERR, "pskb_may_pull failed.\n"); 3753 e_err("pskb_may_pull failed.\n");
3738 dev_kfree_skb(skb); 3754 dev_kfree_skb(skb);
3739 goto next_desc; 3755 goto next_desc;
3740 } 3756 }
@@ -3769,6 +3785,31 @@ next_desc:
3769 return cleaned; 3785 return cleaned;
3770} 3786}
3771 3787
3788/*
3789 * this should improve performance for small packets with large amounts
3790 * of reassembly being done in the stack
3791 */
3792static void e1000_check_copybreak(struct net_device *netdev,
3793 struct e1000_buffer *buffer_info,
3794 u32 length, struct sk_buff **skb)
3795{
3796 struct sk_buff *new_skb;
3797
3798 if (length > copybreak)
3799 return;
3800
3801 new_skb = netdev_alloc_skb_ip_align(netdev, length);
3802 if (!new_skb)
3803 return;
3804
3805 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
3806 (*skb)->data - NET_IP_ALIGN,
3807 length + NET_IP_ALIGN);
3808 /* save the skb in buffer_info as good */
3809 buffer_info->skb = *skb;
3810 *skb = new_skb;
3811}
3812
3772/** 3813/**
3773 * e1000_clean_rx_irq - Send received data up the network stack; legacy 3814 * e1000_clean_rx_irq - Send received data up the network stack; legacy
3774 * @adapter: board private structure 3815 * @adapter: board private structure
@@ -3818,8 +3859,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3818 3859
3819 cleaned = true; 3860 cleaned = true;
3820 cleaned_count++; 3861 cleaned_count++;
3821 pci_unmap_single(pdev, buffer_info->dma, buffer_info->length, 3862 dma_unmap_single(&pdev->dev, buffer_info->dma,
3822 PCI_DMA_FROMDEVICE); 3863 buffer_info->length, DMA_FROM_DEVICE);
3823 buffer_info->dma = 0; 3864 buffer_info->dma = 0;
3824 3865
3825 length = le16_to_cpu(rx_desc->length); 3866 length = le16_to_cpu(rx_desc->length);
@@ -3834,8 +3875,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3834 3875
3835 if (adapter->discarding) { 3876 if (adapter->discarding) {
3836 /* All receives must fit into a single buffer */ 3877 /* All receives must fit into a single buffer */
3837 E1000_DBG("%s: Receive packet consumed multiple" 3878 e_info("Receive packet consumed multiple buffers\n");
3838 " buffers\n", netdev->name);
3839 /* recycle */ 3879 /* recycle */
3840 buffer_info->skb = skb; 3880 buffer_info->skb = skb;
3841 if (status & E1000_RXD_STAT_EOP) 3881 if (status & E1000_RXD_STAT_EOP)
@@ -3868,26 +3908,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3868 total_rx_bytes += length; 3908 total_rx_bytes += length;
3869 total_rx_packets++; 3909 total_rx_packets++;
3870 3910
3871 /* code added for copybreak, this should improve 3911 e1000_check_copybreak(netdev, buffer_info, length, &skb);
3872 * performance for small packets with large amounts 3912
3873 * of reassembly being done in the stack */
3874 if (length < copybreak) {
3875 struct sk_buff *new_skb =
3876 netdev_alloc_skb_ip_align(netdev, length);
3877 if (new_skb) {
3878 skb_copy_to_linear_data_offset(new_skb,
3879 -NET_IP_ALIGN,
3880 (skb->data -
3881 NET_IP_ALIGN),
3882 (length +
3883 NET_IP_ALIGN));
3884 /* save the skb in buffer_info as good */
3885 buffer_info->skb = skb;
3886 skb = new_skb;
3887 }
3888 /* else just continue with the old one */
3889 }
3890 /* end copybreak code */
3891 skb_put(skb, length); 3913 skb_put(skb, length);
3892 3914
3893 /* Receive Checksum Offload */ 3915 /* Receive Checksum Offload */
@@ -3965,8 +3987,8 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3965 /* Fix for errata 23, can't cross 64kB boundary */ 3987 /* Fix for errata 23, can't cross 64kB boundary */
3966 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 3988 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
3967 struct sk_buff *oldskb = skb; 3989 struct sk_buff *oldskb = skb;
3968 DPRINTK(PROBE, ERR, "skb align check failed: %u bytes " 3990 e_err("skb align check failed: %u bytes at %p\n",
3969 "at %p\n", bufsz, skb->data); 3991 bufsz, skb->data);
3970 /* Try again, without freeing the previous */ 3992 /* Try again, without freeing the previous */
3971 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 3993 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
3972 /* Failed allocation, critical failure */ 3994 /* Failed allocation, critical failure */
@@ -3999,11 +4021,11 @@ check_page:
3999 } 4021 }
4000 4022
4001 if (!buffer_info->dma) { 4023 if (!buffer_info->dma) {
4002 buffer_info->dma = pci_map_page(pdev, 4024 buffer_info->dma = dma_map_page(&pdev->dev,
4003 buffer_info->page, 0, 4025 buffer_info->page, 0,
4004 buffer_info->length, 4026 buffer_info->length,
4005 PCI_DMA_FROMDEVICE); 4027 DMA_FROM_DEVICE);
4006 if (pci_dma_mapping_error(pdev, buffer_info->dma)) { 4028 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4007 put_page(buffer_info->page); 4029 put_page(buffer_info->page);
4008 dev_kfree_skb(skb); 4030 dev_kfree_skb(skb);
4009 buffer_info->page = NULL; 4031 buffer_info->page = NULL;
@@ -4074,8 +4096,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4074 /* Fix for errata 23, can't cross 64kB boundary */ 4096 /* Fix for errata 23, can't cross 64kB boundary */
4075 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4097 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4076 struct sk_buff *oldskb = skb; 4098 struct sk_buff *oldskb = skb;
4077 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " 4099 e_err("skb align check failed: %u bytes at %p\n",
4078 "at %p\n", bufsz, skb->data); 4100 bufsz, skb->data);
4079 /* Try again, without freeing the previous */ 4101 /* Try again, without freeing the previous */
4080 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4102 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4081 /* Failed allocation, critical failure */ 4103 /* Failed allocation, critical failure */
@@ -4099,11 +4121,11 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4099 buffer_info->skb = skb; 4121 buffer_info->skb = skb;
4100 buffer_info->length = adapter->rx_buffer_len; 4122 buffer_info->length = adapter->rx_buffer_len;
4101map_skb: 4123map_skb:
4102 buffer_info->dma = pci_map_single(pdev, 4124 buffer_info->dma = dma_map_single(&pdev->dev,
4103 skb->data, 4125 skb->data,
4104 buffer_info->length, 4126 buffer_info->length,
4105 PCI_DMA_FROMDEVICE); 4127 DMA_FROM_DEVICE);
4106 if (pci_dma_mapping_error(pdev, buffer_info->dma)) { 4128 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4107 dev_kfree_skb(skb); 4129 dev_kfree_skb(skb);
4108 buffer_info->skb = NULL; 4130 buffer_info->skb = NULL;
4109 buffer_info->dma = 0; 4131 buffer_info->dma = 0;
@@ -4120,16 +4142,15 @@ map_skb:
4120 if (!e1000_check_64k_bound(adapter, 4142 if (!e1000_check_64k_bound(adapter,
4121 (void *)(unsigned long)buffer_info->dma, 4143 (void *)(unsigned long)buffer_info->dma,
4122 adapter->rx_buffer_len)) { 4144 adapter->rx_buffer_len)) {
4123 DPRINTK(RX_ERR, ERR, 4145 e_err("dma align check failed: %u bytes at %p\n",
4124 "dma align check failed: %u bytes at %p\n", 4146 adapter->rx_buffer_len,
4125 adapter->rx_buffer_len, 4147 (void *)(unsigned long)buffer_info->dma);
4126 (void *)(unsigned long)buffer_info->dma);
4127 dev_kfree_skb(skb); 4148 dev_kfree_skb(skb);
4128 buffer_info->skb = NULL; 4149 buffer_info->skb = NULL;
4129 4150
4130 pci_unmap_single(pdev, buffer_info->dma, 4151 dma_unmap_single(&pdev->dev, buffer_info->dma,
4131 adapter->rx_buffer_len, 4152 adapter->rx_buffer_len,
4132 PCI_DMA_FROMDEVICE); 4153 DMA_FROM_DEVICE);
4133 buffer_info->dma = 0; 4154 buffer_info->dma = 0;
4134 4155
4135 adapter->alloc_rx_buff_failed++; 4156 adapter->alloc_rx_buff_failed++;
@@ -4335,7 +4356,7 @@ void e1000_pci_set_mwi(struct e1000_hw *hw)
4335 int ret_val = pci_set_mwi(adapter->pdev); 4356 int ret_val = pci_set_mwi(adapter->pdev);
4336 4357
4337 if (ret_val) 4358 if (ret_val)
4338 DPRINTK(PROBE, ERR, "Error in setting MWI\n"); 4359 e_err("Error in setting MWI\n");
4339} 4360}
4340 4361
4341void e1000_pci_clear_mwi(struct e1000_hw *hw) 4362void e1000_pci_clear_mwi(struct e1000_hw *hw)
@@ -4466,7 +4487,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
4466 /* Fiber NICs only allow 1000 gbps Full duplex */ 4487 /* Fiber NICs only allow 1000 gbps Full duplex */
4467 if ((hw->media_type == e1000_media_type_fiber) && 4488 if ((hw->media_type == e1000_media_type_fiber) &&
4468 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 4489 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
4469 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); 4490 e_err("Unsupported Speed/Duplex configuration\n");
4470 return -EINVAL; 4491 return -EINVAL;
4471 } 4492 }
4472 4493
@@ -4489,7 +4510,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
4489 break; 4510 break;
4490 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 4511 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4491 default: 4512 default:
4492 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); 4513 e_err("Unsupported Speed/Duplex configuration\n");
4493 return -EINVAL; 4514 return -EINVAL;
4494 } 4515 }
4495 return 0; 4516 return 0;
@@ -4612,7 +4633,7 @@ static int e1000_resume(struct pci_dev *pdev)
4612 else 4633 else
4613 err = pci_enable_device_mem(pdev); 4634 err = pci_enable_device_mem(pdev);
4614 if (err) { 4635 if (err) {
4615 printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n"); 4636 pr_err("Cannot enable PCI device from suspend\n");
4616 return err; 4637 return err;
4617 } 4638 }
4618 pci_set_master(pdev); 4639 pci_set_master(pdev);
@@ -4715,7 +4736,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4715 else 4736 else
4716 err = pci_enable_device_mem(pdev); 4737 err = pci_enable_device_mem(pdev);
4717 if (err) { 4738 if (err) {
4718 printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n"); 4739 pr_err("Cannot re-enable PCI device after reset.\n");
4719 return PCI_ERS_RESULT_DISCONNECT; 4740 return PCI_ERS_RESULT_DISCONNECT;
4720 } 4741 }
4721 pci_set_master(pdev); 4742 pci_set_master(pdev);
@@ -4746,7 +4767,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
4746 4767
4747 if (netif_running(netdev)) { 4768 if (netif_running(netdev)) {
4748 if (e1000_up(adapter)) { 4769 if (e1000_up(adapter)) {
4749 printk("e1000: can't bring device back up after reset\n"); 4770 pr_info("can't bring device back up after reset\n");
4750 return; 4771 return;
4751 } 4772 }
4752 } 4773 }