aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgb/ixgb_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgb/ixgb_main.c')
-rw-r--r--drivers/net/ixgb/ixgb_main.c304
1 files changed, 186 insertions, 118 deletions
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index cfd67d812f0d..57006fb8840e 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 3
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. 4 Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free 7 under the terms of the GNU General Public License as published by the Free
@@ -28,22 +28,6 @@
28 28
29#include "ixgb.h" 29#include "ixgb.h"
30 30
31/* Change Log
32 * 1.0.96 04/19/05
33 * - Make needlessly global code static -- bunk@stusta.de
34 * - ethtool cleanup -- shemminger@osdl.org
35 * - Support for MODULE_VERSION -- linville@tuxdriver.com
36 * - add skb_header_cloned check to the tso path -- herbert@apana.org.au
37 * 1.0.88 01/05/05
38 * - include fix to the condition that determines when to quit NAPI - Robert Olsson
39 * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
40 * 1.0.84 10/26/04
41 * - reset buffer_info->dma in Tx resource cleanup logic
42 * 1.0.83 10/12/04
43 * - sparse cleanup - shemminger@osdl.org
44 * - fix tx resource cleanup logic
45 */
46
47char ixgb_driver_name[] = "ixgb"; 31char ixgb_driver_name[] = "ixgb";
48static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver"; 32static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
49 33
@@ -52,9 +36,9 @@ static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
52#else 36#else
53#define DRIVERNAPI "-NAPI" 37#define DRIVERNAPI "-NAPI"
54#endif 38#endif
55#define DRV_VERSION "1.0.100-k2"DRIVERNAPI 39#define DRV_VERSION "1.0.109-k2"DRIVERNAPI
56char ixgb_driver_version[] = DRV_VERSION; 40char ixgb_driver_version[] = DRV_VERSION;
57static char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; 41static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
58 42
59/* ixgb_pci_tbl - PCI Device ID Table 43/* ixgb_pci_tbl - PCI Device ID Table
60 * 44 *
@@ -67,6 +51,8 @@ static char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
67static struct pci_device_id ixgb_pci_tbl[] = { 51static struct pci_device_id ixgb_pci_tbl[] = {
68 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, 52 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
69 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 53 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
54 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
55 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
70 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, 56 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
71 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 57 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
72 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR, 58 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
@@ -148,6 +134,11 @@ MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
148MODULE_LICENSE("GPL"); 134MODULE_LICENSE("GPL");
149MODULE_VERSION(DRV_VERSION); 135MODULE_VERSION(DRV_VERSION);
150 136
137#define DEFAULT_DEBUG_LEVEL_SHIFT 3
138static int debug = DEFAULT_DEBUG_LEVEL_SHIFT;
139module_param(debug, int, 0);
140MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
141
151/* some defines for controlling descriptor fetches in h/w */ 142/* some defines for controlling descriptor fetches in h/w */
152#define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */ 143#define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */
153#define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below 144#define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
@@ -196,7 +187,7 @@ module_exit(ixgb_exit_module);
196 * @adapter: board private structure 187 * @adapter: board private structure
197 **/ 188 **/
198 189
199static inline void 190static void
200ixgb_irq_disable(struct ixgb_adapter *adapter) 191ixgb_irq_disable(struct ixgb_adapter *adapter)
201{ 192{
202 atomic_inc(&adapter->irq_sem); 193 atomic_inc(&adapter->irq_sem);
@@ -210,7 +201,7 @@ ixgb_irq_disable(struct ixgb_adapter *adapter)
210 * @adapter: board private structure 201 * @adapter: board private structure
211 **/ 202 **/
212 203
213static inline void 204static void
214ixgb_irq_enable(struct ixgb_adapter *adapter) 205ixgb_irq_enable(struct ixgb_adapter *adapter)
215{ 206{
216 if(atomic_dec_and_test(&adapter->irq_sem)) { 207 if(atomic_dec_and_test(&adapter->irq_sem)) {
@@ -231,6 +222,7 @@ ixgb_up(struct ixgb_adapter *adapter)
231 222
232 /* hardware has been reset, we need to reload some things */ 223 /* hardware has been reset, we need to reload some things */
233 224
225 ixgb_rar_set(hw, netdev->dev_addr, 0);
234 ixgb_set_multi(netdev); 226 ixgb_set_multi(netdev);
235 227
236 ixgb_restore_vlan(adapter); 228 ixgb_restore_vlan(adapter);
@@ -240,6 +232,9 @@ ixgb_up(struct ixgb_adapter *adapter)
240 ixgb_configure_rx(adapter); 232 ixgb_configure_rx(adapter);
241 ixgb_alloc_rx_buffers(adapter); 233 ixgb_alloc_rx_buffers(adapter);
242 234
235 /* disable interrupts and get the hardware into a known state */
236 IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
237
243#ifdef CONFIG_PCI_MSI 238#ifdef CONFIG_PCI_MSI
244 { 239 {
245 boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) & 240 boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) &
@@ -249,7 +244,7 @@ ixgb_up(struct ixgb_adapter *adapter)
249 if (!pcix) 244 if (!pcix)
250 adapter->have_msi = FALSE; 245 adapter->have_msi = FALSE;
251 else if((err = pci_enable_msi(adapter->pdev))) { 246 else if((err = pci_enable_msi(adapter->pdev))) {
252 printk (KERN_ERR 247 DPRINTK(PROBE, ERR,
253 "Unable to allocate MSI interrupt Error: %d\n", err); 248 "Unable to allocate MSI interrupt Error: %d\n", err);
254 adapter->have_msi = FALSE; 249 adapter->have_msi = FALSE;
255 /* proceed to try to request regular interrupt */ 250 /* proceed to try to request regular interrupt */
@@ -259,11 +254,11 @@ ixgb_up(struct ixgb_adapter *adapter)
259#endif 254#endif
260 if((err = request_irq(adapter->pdev->irq, &ixgb_intr, 255 if((err = request_irq(adapter->pdev->irq, &ixgb_intr,
261 SA_SHIRQ | SA_SAMPLE_RANDOM, 256 SA_SHIRQ | SA_SAMPLE_RANDOM,
262 netdev->name, netdev))) 257 netdev->name, netdev))) {
258 DPRINTK(PROBE, ERR,
259 "Unable to allocate interrupt Error: %d\n", err);
263 return err; 260 return err;
264 261 }
265 /* disable interrupts and get the hardware into a known state */
266 IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
267 262
268 if((hw->max_frame_size != max_frame) || 263 if((hw->max_frame_size != max_frame) ||
269 (hw->max_frame_size != 264 (hw->max_frame_size !=
@@ -285,11 +280,12 @@ ixgb_up(struct ixgb_adapter *adapter)
285 } 280 }
286 281
287 mod_timer(&adapter->watchdog_timer, jiffies); 282 mod_timer(&adapter->watchdog_timer, jiffies);
288 ixgb_irq_enable(adapter);
289 283
290#ifdef CONFIG_IXGB_NAPI 284#ifdef CONFIG_IXGB_NAPI
291 netif_poll_enable(netdev); 285 netif_poll_enable(netdev);
292#endif 286#endif
287 ixgb_irq_enable(adapter);
288
293 return 0; 289 return 0;
294} 290}
295 291
@@ -326,7 +322,7 @@ ixgb_reset(struct ixgb_adapter *adapter)
326 322
327 ixgb_adapter_stop(&adapter->hw); 323 ixgb_adapter_stop(&adapter->hw);
328 if(!ixgb_init_hw(&adapter->hw)) 324 if(!ixgb_init_hw(&adapter->hw))
329 IXGB_DBG("ixgb_init_hw failed.\n"); 325 DPRINTK(PROBE, ERR, "ixgb_init_hw failed.\n");
330} 326}
331 327
332/** 328/**
@@ -363,7 +359,8 @@ ixgb_probe(struct pci_dev *pdev,
363 } else { 359 } else {
364 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) || 360 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
365 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { 361 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
366 IXGB_ERR("No usable DMA configuration, aborting\n"); 362 printk(KERN_ERR
363 "ixgb: No usable DMA configuration, aborting\n");
367 goto err_dma_mask; 364 goto err_dma_mask;
368 } 365 }
369 pci_using_dac = 0; 366 pci_using_dac = 0;
@@ -388,6 +385,7 @@ ixgb_probe(struct pci_dev *pdev,
388 adapter->netdev = netdev; 385 adapter->netdev = netdev;
389 adapter->pdev = pdev; 386 adapter->pdev = pdev;
390 adapter->hw.back = adapter; 387 adapter->hw.back = adapter;
388 adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
391 389
392 mmio_start = pci_resource_start(pdev, BAR_0); 390 mmio_start = pci_resource_start(pdev, BAR_0);
393 mmio_len = pci_resource_len(pdev, BAR_0); 391 mmio_len = pci_resource_len(pdev, BAR_0);
@@ -416,7 +414,7 @@ ixgb_probe(struct pci_dev *pdev,
416 netdev->change_mtu = &ixgb_change_mtu; 414 netdev->change_mtu = &ixgb_change_mtu;
417 ixgb_set_ethtool_ops(netdev); 415 ixgb_set_ethtool_ops(netdev);
418 netdev->tx_timeout = &ixgb_tx_timeout; 416 netdev->tx_timeout = &ixgb_tx_timeout;
419 netdev->watchdog_timeo = HZ; 417 netdev->watchdog_timeo = 5 * HZ;
420#ifdef CONFIG_IXGB_NAPI 418#ifdef CONFIG_IXGB_NAPI
421 netdev->poll = &ixgb_clean; 419 netdev->poll = &ixgb_clean;
422 netdev->weight = 64; 420 netdev->weight = 64;
@@ -428,6 +426,7 @@ ixgb_probe(struct pci_dev *pdev,
428 netdev->poll_controller = ixgb_netpoll; 426 netdev->poll_controller = ixgb_netpoll;
429#endif 427#endif
430 428
429 strcpy(netdev->name, pci_name(pdev));
431 netdev->mem_start = mmio_start; 430 netdev->mem_start = mmio_start;
432 netdev->mem_end = mmio_start + mmio_len; 431 netdev->mem_end = mmio_start + mmio_len;
433 netdev->base_addr = adapter->hw.io_base; 432 netdev->base_addr = adapter->hw.io_base;
@@ -449,6 +448,9 @@ ixgb_probe(struct pci_dev *pdev,
449#ifdef NETIF_F_TSO 448#ifdef NETIF_F_TSO
450 netdev->features |= NETIF_F_TSO; 449 netdev->features |= NETIF_F_TSO;
451#endif 450#endif
451#ifdef NETIF_F_LLTX
452 netdev->features |= NETIF_F_LLTX;
453#endif
452 454
453 if(pci_using_dac) 455 if(pci_using_dac)
454 netdev->features |= NETIF_F_HIGHDMA; 456 netdev->features |= NETIF_F_HIGHDMA;
@@ -456,7 +458,7 @@ ixgb_probe(struct pci_dev *pdev,
456 /* make sure the EEPROM is good */ 458 /* make sure the EEPROM is good */
457 459
458 if(!ixgb_validate_eeprom_checksum(&adapter->hw)) { 460 if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
459 printk(KERN_ERR "The EEPROM Checksum Is Not Valid\n"); 461 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
460 err = -EIO; 462 err = -EIO;
461 goto err_eeprom; 463 goto err_eeprom;
462 } 464 }
@@ -465,6 +467,7 @@ ixgb_probe(struct pci_dev *pdev,
465 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); 467 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
466 468
467 if(!is_valid_ether_addr(netdev->perm_addr)) { 469 if(!is_valid_ether_addr(netdev->perm_addr)) {
470 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
468 err = -EIO; 471 err = -EIO;
469 goto err_eeprom; 472 goto err_eeprom;
470 } 473 }
@@ -478,6 +481,7 @@ ixgb_probe(struct pci_dev *pdev,
478 INIT_WORK(&adapter->tx_timeout_task, 481 INIT_WORK(&adapter->tx_timeout_task,
479 (void (*)(void *))ixgb_tx_timeout_task, netdev); 482 (void (*)(void *))ixgb_tx_timeout_task, netdev);
480 483
484 strcpy(netdev->name, "eth%d");
481 if((err = register_netdev(netdev))) 485 if((err = register_netdev(netdev)))
482 goto err_register; 486 goto err_register;
483 487
@@ -486,8 +490,7 @@ ixgb_probe(struct pci_dev *pdev,
486 netif_carrier_off(netdev); 490 netif_carrier_off(netdev);
487 netif_stop_queue(netdev); 491 netif_stop_queue(netdev);
488 492
489 printk(KERN_INFO "%s: Intel(R) PRO/10GbE Network Connection\n", 493 DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n");
490 netdev->name);
491 ixgb_check_options(adapter); 494 ixgb_check_options(adapter);
492 /* reset the hardware with the new settings */ 495 /* reset the hardware with the new settings */
493 496
@@ -557,17 +560,17 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
557 hw->subsystem_vendor_id = pdev->subsystem_vendor; 560 hw->subsystem_vendor_id = pdev->subsystem_vendor;
558 hw->subsystem_id = pdev->subsystem_device; 561 hw->subsystem_id = pdev->subsystem_device;
559 562
560 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
561
562 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; 563 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
564 adapter->rx_buffer_len = hw->max_frame_size;
563 565
564 if((hw->device_id == IXGB_DEVICE_ID_82597EX) 566 if((hw->device_id == IXGB_DEVICE_ID_82597EX)
565 ||(hw->device_id == IXGB_DEVICE_ID_82597EX_LR) 567 || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4)
566 ||(hw->device_id == IXGB_DEVICE_ID_82597EX_SR)) 568 || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
569 || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
567 hw->mac_type = ixgb_82597; 570 hw->mac_type = ixgb_82597;
568 else { 571 else {
569 /* should never have loaded on this device */ 572 /* should never have loaded on this device */
570 printk(KERN_ERR "ixgb: unsupported device id\n"); 573 DPRINTK(PROBE, ERR, "unsupported device id\n");
571 } 574 }
572 575
573 /* enable flow control to be programmed */ 576 /* enable flow control to be programmed */
@@ -665,6 +668,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
665 size = sizeof(struct ixgb_buffer) * txdr->count; 668 size = sizeof(struct ixgb_buffer) * txdr->count;
666 txdr->buffer_info = vmalloc(size); 669 txdr->buffer_info = vmalloc(size);
667 if(!txdr->buffer_info) { 670 if(!txdr->buffer_info) {
671 DPRINTK(PROBE, ERR,
672 "Unable to allocate transmit descriptor ring memory\n");
668 return -ENOMEM; 673 return -ENOMEM;
669 } 674 }
670 memset(txdr->buffer_info, 0, size); 675 memset(txdr->buffer_info, 0, size);
@@ -677,6 +682,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
677 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 682 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
678 if(!txdr->desc) { 683 if(!txdr->desc) {
679 vfree(txdr->buffer_info); 684 vfree(txdr->buffer_info);
685 DPRINTK(PROBE, ERR,
686 "Unable to allocate transmit descriptor memory\n");
680 return -ENOMEM; 687 return -ENOMEM;
681 } 688 }
682 memset(txdr->desc, 0, txdr->size); 689 memset(txdr->desc, 0, txdr->size);
@@ -750,6 +757,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
750 size = sizeof(struct ixgb_buffer) * rxdr->count; 757 size = sizeof(struct ixgb_buffer) * rxdr->count;
751 rxdr->buffer_info = vmalloc(size); 758 rxdr->buffer_info = vmalloc(size);
752 if(!rxdr->buffer_info) { 759 if(!rxdr->buffer_info) {
760 DPRINTK(PROBE, ERR,
761 "Unable to allocate receive descriptor ring\n");
753 return -ENOMEM; 762 return -ENOMEM;
754 } 763 }
755 memset(rxdr->buffer_info, 0, size); 764 memset(rxdr->buffer_info, 0, size);
@@ -763,6 +772,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
763 772
764 if(!rxdr->desc) { 773 if(!rxdr->desc) {
765 vfree(rxdr->buffer_info); 774 vfree(rxdr->buffer_info);
775 DPRINTK(PROBE, ERR,
776 "Unable to allocate receive descriptors\n");
766 return -ENOMEM; 777 return -ENOMEM;
767 } 778 }
768 memset(rxdr->desc, 0, rxdr->size); 779 memset(rxdr->desc, 0, rxdr->size);
@@ -794,21 +805,14 @@ ixgb_setup_rctl(struct ixgb_adapter *adapter)
794 805
795 rctl |= IXGB_RCTL_SECRC; 806 rctl |= IXGB_RCTL_SECRC;
796 807
797 switch (adapter->rx_buffer_len) { 808 if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
798 case IXGB_RXBUFFER_2048:
799 default:
800 rctl |= IXGB_RCTL_BSIZE_2048; 809 rctl |= IXGB_RCTL_BSIZE_2048;
801 break; 810 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
802 case IXGB_RXBUFFER_4096:
803 rctl |= IXGB_RCTL_BSIZE_4096; 811 rctl |= IXGB_RCTL_BSIZE_4096;
804 break; 812 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
805 case IXGB_RXBUFFER_8192:
806 rctl |= IXGB_RCTL_BSIZE_8192; 813 rctl |= IXGB_RCTL_BSIZE_8192;
807 break; 814 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
808 case IXGB_RXBUFFER_16384:
809 rctl |= IXGB_RCTL_BSIZE_16384; 815 rctl |= IXGB_RCTL_BSIZE_16384;
810 break;
811 }
812 816
813 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl); 817 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
814} 818}
@@ -898,22 +902,25 @@ ixgb_free_tx_resources(struct ixgb_adapter *adapter)
898 adapter->tx_ring.desc = NULL; 902 adapter->tx_ring.desc = NULL;
899} 903}
900 904
901static inline void 905static void
902ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter, 906ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
903 struct ixgb_buffer *buffer_info) 907 struct ixgb_buffer *buffer_info)
904{ 908{
905 struct pci_dev *pdev = adapter->pdev; 909 struct pci_dev *pdev = adapter->pdev;
906 if(buffer_info->dma) { 910
907 pci_unmap_page(pdev, 911 if (buffer_info->dma)
908 buffer_info->dma, 912 pci_unmap_page(pdev, buffer_info->dma, buffer_info->length,
909 buffer_info->length, 913 PCI_DMA_TODEVICE);
910 PCI_DMA_TODEVICE); 914
911 buffer_info->dma = 0; 915 if (buffer_info->skb)
912 }
913 if(buffer_info->skb) {
914 dev_kfree_skb_any(buffer_info->skb); 916 dev_kfree_skb_any(buffer_info->skb);
915 buffer_info->skb = NULL; 917
916 } 918 buffer_info->skb = NULL;
919 buffer_info->dma = 0;
920 buffer_info->time_stamp = 0;
921 /* these fields must always be initialized in tx
922 * buffer_info->length = 0;
923 * buffer_info->next_to_watch = 0; */
917} 924}
918 925
919/** 926/**
@@ -1112,8 +1119,8 @@ ixgb_watchdog(unsigned long data)
1112 1119
1113 if(adapter->hw.link_up) { 1120 if(adapter->hw.link_up) {
1114 if(!netif_carrier_ok(netdev)) { 1121 if(!netif_carrier_ok(netdev)) {
1115 printk(KERN_INFO "ixgb: %s NIC Link is Up %d Mbps %s\n", 1122 DPRINTK(LINK, INFO,
1116 netdev->name, 10000, "Full Duplex"); 1123 "NIC Link is Up 10000 Mbps Full Duplex\n");
1117 adapter->link_speed = 10000; 1124 adapter->link_speed = 10000;
1118 adapter->link_duplex = FULL_DUPLEX; 1125 adapter->link_duplex = FULL_DUPLEX;
1119 netif_carrier_on(netdev); 1126 netif_carrier_on(netdev);
@@ -1123,9 +1130,7 @@ ixgb_watchdog(unsigned long data)
1123 if(netif_carrier_ok(netdev)) { 1130 if(netif_carrier_ok(netdev)) {
1124 adapter->link_speed = 0; 1131 adapter->link_speed = 0;
1125 adapter->link_duplex = 0; 1132 adapter->link_duplex = 0;
1126 printk(KERN_INFO 1133 DPRINTK(LINK, INFO, "NIC Link is Down\n");
1127 "ixgb: %s NIC Link is Down\n",
1128 netdev->name);
1129 netif_carrier_off(netdev); 1134 netif_carrier_off(netdev);
1130 netif_stop_queue(netdev); 1135 netif_stop_queue(netdev);
1131 1136
@@ -1158,7 +1163,7 @@ ixgb_watchdog(unsigned long data)
1158#define IXGB_TX_FLAGS_VLAN 0x00000002 1163#define IXGB_TX_FLAGS_VLAN 0x00000002
1159#define IXGB_TX_FLAGS_TSO 0x00000004 1164#define IXGB_TX_FLAGS_TSO 0x00000004
1160 1165
1161static inline int 1166static int
1162ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) 1167ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1163{ 1168{
1164#ifdef NETIF_F_TSO 1169#ifdef NETIF_F_TSO
@@ -1220,7 +1225,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1220 return 0; 1225 return 0;
1221} 1226}
1222 1227
1223static inline boolean_t 1228static boolean_t
1224ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) 1229ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1225{ 1230{
1226 struct ixgb_context_desc *context_desc; 1231 struct ixgb_context_desc *context_desc;
@@ -1258,7 +1263,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1258#define IXGB_MAX_TXD_PWR 14 1263#define IXGB_MAX_TXD_PWR 14
1259#define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR) 1264#define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
1260 1265
1261static inline int 1266static int
1262ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, 1267ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1263 unsigned int first) 1268 unsigned int first)
1264{ 1269{
@@ -1284,6 +1289,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1284 size, 1289 size,
1285 PCI_DMA_TODEVICE); 1290 PCI_DMA_TODEVICE);
1286 buffer_info->time_stamp = jiffies; 1291 buffer_info->time_stamp = jiffies;
1292 buffer_info->next_to_watch = 0;
1287 1293
1288 len -= size; 1294 len -= size;
1289 offset += size; 1295 offset += size;
@@ -1309,6 +1315,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1309 size, 1315 size,
1310 PCI_DMA_TODEVICE); 1316 PCI_DMA_TODEVICE);
1311 buffer_info->time_stamp = jiffies; 1317 buffer_info->time_stamp = jiffies;
1318 buffer_info->next_to_watch = 0;
1312 1319
1313 len -= size; 1320 len -= size;
1314 offset += size; 1321 offset += size;
@@ -1323,7 +1330,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1323 return count; 1330 return count;
1324} 1331}
1325 1332
1326static inline void 1333static void
1327ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) 1334ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1328{ 1335{
1329 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; 1336 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
@@ -1395,13 +1402,26 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1395 return 0; 1402 return 0;
1396 } 1403 }
1397 1404
1405#ifdef NETIF_F_LLTX
1406 local_irq_save(flags);
1407 if (!spin_trylock(&adapter->tx_lock)) {
1408 /* Collision - tell upper layer to requeue */
1409 local_irq_restore(flags);
1410 return NETDEV_TX_LOCKED;
1411 }
1412#else
1398 spin_lock_irqsave(&adapter->tx_lock, flags); 1413 spin_lock_irqsave(&adapter->tx_lock, flags);
1414#endif
1415
1399 if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) { 1416 if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) {
1400 netif_stop_queue(netdev); 1417 netif_stop_queue(netdev);
1401 spin_unlock_irqrestore(&adapter->tx_lock, flags); 1418 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1402 return 1; 1419 return NETDEV_TX_BUSY;
1403 } 1420 }
1421
1422#ifndef NETIF_F_LLTX
1404 spin_unlock_irqrestore(&adapter->tx_lock, flags); 1423 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1424#endif
1405 1425
1406 if(adapter->vlgrp && vlan_tx_tag_present(skb)) { 1426 if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
1407 tx_flags |= IXGB_TX_FLAGS_VLAN; 1427 tx_flags |= IXGB_TX_FLAGS_VLAN;
@@ -1413,10 +1433,13 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1413 tso = ixgb_tso(adapter, skb); 1433 tso = ixgb_tso(adapter, skb);
1414 if (tso < 0) { 1434 if (tso < 0) {
1415 dev_kfree_skb_any(skb); 1435 dev_kfree_skb_any(skb);
1436#ifdef NETIF_F_LLTX
1437 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1438#endif
1416 return NETDEV_TX_OK; 1439 return NETDEV_TX_OK;
1417 } 1440 }
1418 1441
1419 if (tso) 1442 if (likely(tso))
1420 tx_flags |= IXGB_TX_FLAGS_TSO; 1443 tx_flags |= IXGB_TX_FLAGS_TSO;
1421 else if(ixgb_tx_csum(adapter, skb)) 1444 else if(ixgb_tx_csum(adapter, skb))
1422 tx_flags |= IXGB_TX_FLAGS_CSUM; 1445 tx_flags |= IXGB_TX_FLAGS_CSUM;
@@ -1426,7 +1449,15 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1426 1449
1427 netdev->trans_start = jiffies; 1450 netdev->trans_start = jiffies;
1428 1451
1429 return 0; 1452#ifdef NETIF_F_LLTX
1453 /* Make sure there is space in the ring for the next send. */
1454 if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED))
1455 netif_stop_queue(netdev);
1456
1457 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1458
1459#endif
1460 return NETDEV_TX_OK;
1430} 1461}
1431 1462
1432/** 1463/**
@@ -1448,6 +1479,7 @@ ixgb_tx_timeout_task(struct net_device *netdev)
1448{ 1479{
1449 struct ixgb_adapter *adapter = netdev_priv(netdev); 1480 struct ixgb_adapter *adapter = netdev_priv(netdev);
1450 1481
1482 adapter->tx_timeout_count++;
1451 ixgb_down(adapter, TRUE); 1483 ixgb_down(adapter, TRUE);
1452 ixgb_up(adapter); 1484 ixgb_up(adapter);
1453} 1485}
@@ -1486,28 +1518,15 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1486 1518
1487 if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) 1519 if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
1488 || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) { 1520 || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
1489 IXGB_ERR("Invalid MTU setting\n"); 1521 DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu);
1490 return -EINVAL; 1522 return -EINVAL;
1491 } 1523 }
1492 1524
1493 if((max_frame <= IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) 1525 adapter->rx_buffer_len = max_frame;
1494 || (max_frame <= IXGB_RXBUFFER_2048)) {
1495 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
1496
1497 } else if(max_frame <= IXGB_RXBUFFER_4096) {
1498 adapter->rx_buffer_len = IXGB_RXBUFFER_4096;
1499
1500 } else if(max_frame <= IXGB_RXBUFFER_8192) {
1501 adapter->rx_buffer_len = IXGB_RXBUFFER_8192;
1502
1503 } else {
1504 adapter->rx_buffer_len = IXGB_RXBUFFER_16384;
1505 }
1506 1526
1507 netdev->mtu = new_mtu; 1527 netdev->mtu = new_mtu;
1508 1528
1509 if(old_max_frame != max_frame && netif_running(netdev)) { 1529 if ((old_max_frame != max_frame) && netif_running(netdev)) {
1510
1511 ixgb_down(adapter, TRUE); 1530 ixgb_down(adapter, TRUE);
1512 ixgb_up(adapter); 1531 ixgb_up(adapter);
1513 } 1532 }
@@ -1765,23 +1784,43 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1765 1784
1766 tx_ring->next_to_clean = i; 1785 tx_ring->next_to_clean = i;
1767 1786
1768 spin_lock(&adapter->tx_lock); 1787 if (unlikely(netif_queue_stopped(netdev))) {
1769 if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev) && 1788 spin_lock(&adapter->tx_lock);
1770 (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) { 1789 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
1771 1790 (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE))
1772 netif_wake_queue(netdev); 1791 netif_wake_queue(netdev);
1792 spin_unlock(&adapter->tx_lock);
1773 } 1793 }
1774 spin_unlock(&adapter->tx_lock);
1775 1794
1776 if(adapter->detect_tx_hung) { 1795 if(adapter->detect_tx_hung) {
1777 /* detect a transmit hang in hardware, this serializes the 1796 /* detect a transmit hang in hardware, this serializes the
1778 * check with the clearing of time_stamp and movement of i */ 1797 * check with the clearing of time_stamp and movement of i */
1779 adapter->detect_tx_hung = FALSE; 1798 adapter->detect_tx_hung = FALSE;
1780 if(tx_ring->buffer_info[i].dma && 1799 if (tx_ring->buffer_info[eop].dma &&
1781 time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ) 1800 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
1782 && !(IXGB_READ_REG(&adapter->hw, STATUS) & 1801 && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1783 IXGB_STATUS_TXOFF)) 1802 IXGB_STATUS_TXOFF)) {
1803 /* detected Tx unit hang */
1804 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
1805 " TDH <%x>\n"
1806 " TDT <%x>\n"
1807 " next_to_use <%x>\n"
1808 " next_to_clean <%x>\n"
1809 "buffer_info[next_to_clean]\n"
1810 " time_stamp <%lx>\n"
1811 " next_to_watch <%x>\n"
1812 " jiffies <%lx>\n"
1813 " next_to_watch.status <%x>\n",
1814 IXGB_READ_REG(&adapter->hw, TDH),
1815 IXGB_READ_REG(&adapter->hw, TDT),
1816 tx_ring->next_to_use,
1817 tx_ring->next_to_clean,
1818 tx_ring->buffer_info[eop].time_stamp,
1819 eop,
1820 jiffies,
1821 eop_desc->status);
1784 netif_stop_queue(netdev); 1822 netif_stop_queue(netdev);
1823 }
1785 } 1824 }
1786 1825
1787 return cleaned; 1826 return cleaned;
@@ -1794,7 +1833,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1794 * @sk_buff: socket buffer with received data 1833 * @sk_buff: socket buffer with received data
1795 **/ 1834 **/
1796 1835
1797static inline void 1836static void
1798ixgb_rx_checksum(struct ixgb_adapter *adapter, 1837ixgb_rx_checksum(struct ixgb_adapter *adapter,
1799 struct ixgb_rx_desc *rx_desc, 1838 struct ixgb_rx_desc *rx_desc,
1800 struct sk_buff *skb) 1839 struct sk_buff *skb)
@@ -1858,6 +1897,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1858#endif 1897#endif
1859 status = rx_desc->status; 1898 status = rx_desc->status;
1860 skb = buffer_info->skb; 1899 skb = buffer_info->skb;
1900 buffer_info->skb = NULL;
1861 1901
1862 prefetch(skb->data); 1902 prefetch(skb->data);
1863 1903
@@ -1902,6 +1942,26 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1902 goto rxdesc_done; 1942 goto rxdesc_done;
1903 } 1943 }
1904 1944
1945 /* code added for copybreak, this should improve
1946 * performance for small packets with large amounts
1947 * of reassembly being done in the stack */
1948#define IXGB_CB_LENGTH 256
1949 if (length < IXGB_CB_LENGTH) {
1950 struct sk_buff *new_skb =
1951 dev_alloc_skb(length + NET_IP_ALIGN);
1952 if (new_skb) {
1953 skb_reserve(new_skb, NET_IP_ALIGN);
1954 new_skb->dev = netdev;
1955 memcpy(new_skb->data - NET_IP_ALIGN,
1956 skb->data - NET_IP_ALIGN,
1957 length + NET_IP_ALIGN);
1958 /* save the skb in buffer_info as good */
1959 buffer_info->skb = skb;
1960 skb = new_skb;
1961 }
1962 }
1963 /* end copybreak code */
1964
1905 /* Good Receive */ 1965 /* Good Receive */
1906 skb_put(skb, length); 1966 skb_put(skb, length);
1907 1967
@@ -1931,7 +1991,6 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1931rxdesc_done: 1991rxdesc_done:
1932 /* clean up descriptor, might be written over by hw */ 1992 /* clean up descriptor, might be written over by hw */
1933 rx_desc->status = 0; 1993 rx_desc->status = 0;
1934 buffer_info->skb = NULL;
1935 1994
1936 /* use prefetched values */ 1995 /* use prefetched values */
1937 rx_desc = next_rxd; 1996 rx_desc = next_rxd;
@@ -1971,12 +2030,18 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
1971 2030
1972 /* leave three descriptors unused */ 2031 /* leave three descriptors unused */
1973 while(--cleancount > 2) { 2032 while(--cleancount > 2) {
1974 rx_desc = IXGB_RX_DESC(*rx_ring, i); 2033 /* recycle! its good for you */
1975 2034 if (!(skb = buffer_info->skb))
1976 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); 2035 skb = dev_alloc_skb(adapter->rx_buffer_len
2036 + NET_IP_ALIGN);
2037 else {
2038 skb_trim(skb, 0);
2039 goto map_skb;
2040 }
1977 2041
1978 if(unlikely(!skb)) { 2042 if (unlikely(!skb)) {
1979 /* Better luck next round */ 2043 /* Better luck next round */
2044 adapter->alloc_rx_buff_failed++;
1980 break; 2045 break;
1981 } 2046 }
1982 2047
@@ -1990,33 +2055,36 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
1990 2055
1991 buffer_info->skb = skb; 2056 buffer_info->skb = skb;
1992 buffer_info->length = adapter->rx_buffer_len; 2057 buffer_info->length = adapter->rx_buffer_len;
1993 buffer_info->dma = 2058map_skb:
1994 pci_map_single(pdev, 2059 buffer_info->dma = pci_map_single(pdev,
1995 skb->data, 2060 skb->data,
1996 adapter->rx_buffer_len, 2061 adapter->rx_buffer_len,
1997 PCI_DMA_FROMDEVICE); 2062 PCI_DMA_FROMDEVICE);
1998 2063
2064 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1999 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); 2065 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
2000 /* guarantee DD bit not set now before h/w gets descriptor 2066 /* guarantee DD bit not set now before h/w gets descriptor
2001 * this is the rest of the workaround for h/w double 2067 * this is the rest of the workaround for h/w double
2002 * writeback. */ 2068 * writeback. */
2003 rx_desc->status = 0; 2069 rx_desc->status = 0;
2004 2070
2005 if((i & ~(num_group_tail_writes- 1)) == i) {
2006 /* Force memory writes to complete before letting h/w
2007 * know there are new descriptors to fetch. (Only
2008 * applicable for weak-ordered memory model archs,
2009 * such as IA-64). */
2010 wmb();
2011
2012 IXGB_WRITE_REG(&adapter->hw, RDT, i);
2013 }
2014 2071
2015 if(++i == rx_ring->count) i = 0; 2072 if(++i == rx_ring->count) i = 0;
2016 buffer_info = &rx_ring->buffer_info[i]; 2073 buffer_info = &rx_ring->buffer_info[i];
2017 } 2074 }
2018 2075
2019 rx_ring->next_to_use = i; 2076 if (likely(rx_ring->next_to_use != i)) {
2077 rx_ring->next_to_use = i;
2078 if (unlikely(i-- == 0))
2079 i = (rx_ring->count - 1);
2080
2081 /* Force memory writes to complete before letting h/w
2082 * know there are new descriptors to fetch. (Only
2083 * applicable for weak-ordered memory model archs, such
2084 * as IA-64). */
2085 wmb();
2086 IXGB_WRITE_REG(&adapter->hw, RDT, i);
2087 }
2020} 2088}
2021 2089
2022/** 2090/**