aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
author <jgarzik@pretzel.yyz.us>2005-05-25 13:57:15 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-05-25 13:57:15 -0400
commit09e62666d8a6312426f96bf483cdb003bde556fe (patch)
tree1ecb7fd093169fb77d7cd5925bc214fd6c6d82e4 /drivers
parent34812c9e188b47b1d6c9fff8ba530e6f2365ebc4 (diff)
parentac79c82e793bc2440c4765e5eb1b834d2c18edf2 (diff)
Automatic merge of /spare/repo/netdev-2.6 branch ixgb
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ixgb/ixgb.h2
-rw-r--r--drivers/net/ixgb/ixgb_ee.c24
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c4
-rw-r--r--drivers/net/ixgb/ixgb_main.c153
-rw-r--r--drivers/net/ixgb/ixgb_osdep.h3
5 files changed, 70 insertions, 116 deletions
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 26c4f15f7fc0..f8d3385c7842 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -110,7 +110,7 @@ struct ixgb_adapter;
110#define IXGB_TX_QUEUE_WAKE 16 110#define IXGB_TX_QUEUE_WAKE 16
111 111
112/* How many Rx Buffers do we bundle into one write to the hardware ? */ 112/* How many Rx Buffers do we bundle into one write to the hardware ? */
113#define IXGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 113#define IXGB_RX_BUFFER_WRITE 4 /* Must be power of 2 */
114 114
115/* only works for sizes that are powers of 2 */ 115/* only works for sizes that are powers of 2 */
116#define IXGB_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1))) 116#define IXGB_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 653e99f919ce..3aae110c5560 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -411,7 +411,7 @@ ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data)
411 ixgb_cleanup_eeprom(hw); 411 ixgb_cleanup_eeprom(hw);
412 412
413 /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */ 413 /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */
414 ee_map->init_ctrl_reg_1 = EEPROM_ICW1_SIGNATURE_CLEAR; 414 ee_map->init_ctrl_reg_1 = le16_to_cpu(EEPROM_ICW1_SIGNATURE_CLEAR);
415 415
416 return; 416 return;
417} 417}
@@ -483,7 +483,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
483 DEBUGOUT("ixgb_ee: Checksum invalid.\n"); 483 DEBUGOUT("ixgb_ee: Checksum invalid.\n");
484 /* clear the init_ctrl_reg_1 to signify that the cache is 484 /* clear the init_ctrl_reg_1 to signify that the cache is
485 * invalidated */ 485 * invalidated */
486 ee_map->init_ctrl_reg_1 = EEPROM_ICW1_SIGNATURE_CLEAR; 486 ee_map->init_ctrl_reg_1 = le16_to_cpu(EEPROM_ICW1_SIGNATURE_CLEAR);
487 return (FALSE); 487 return (FALSE);
488 } 488 }
489 489
@@ -579,7 +579,7 @@ ixgb_get_ee_compatibility(struct ixgb_hw *hw)
579 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 579 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
580 580
581 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 581 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
582 return(ee_map->compatibility); 582 return (le16_to_cpu(ee_map->compatibility));
583 583
584 return(0); 584 return(0);
585} 585}
@@ -616,7 +616,7 @@ ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw)
616 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 616 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
617 617
618 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 618 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
619 return(ee_map->init_ctrl_reg_1); 619 return (le16_to_cpu(ee_map->init_ctrl_reg_1));
620 620
621 return(0); 621 return(0);
622} 622}
@@ -635,7 +635,7 @@ ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw)
635 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 635 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
636 636
637 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 637 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
638 return(ee_map->init_ctrl_reg_2); 638 return (le16_to_cpu(ee_map->init_ctrl_reg_2));
639 639
640 return(0); 640 return(0);
641} 641}
@@ -654,7 +654,7 @@ ixgb_get_ee_subsystem_id(struct ixgb_hw *hw)
654 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 654 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
655 655
656 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 656 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
657 return(ee_map->subsystem_id); 657 return (le16_to_cpu(ee_map->subsystem_id));
658 658
659 return(0); 659 return(0);
660} 660}
@@ -673,7 +673,7 @@ ixgb_get_ee_subvendor_id(struct ixgb_hw *hw)
673 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 673 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
674 674
675 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 675 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
676 return(ee_map->subvendor_id); 676 return (le16_to_cpu(ee_map->subvendor_id));
677 677
678 return(0); 678 return(0);
679} 679}
@@ -692,7 +692,7 @@ ixgb_get_ee_device_id(struct ixgb_hw *hw)
692 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 692 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
693 693
694 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 694 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
695 return(ee_map->device_id); 695 return (le16_to_cpu(ee_map->device_id));
696 696
697 return(0); 697 return(0);
698} 698}
@@ -711,7 +711,7 @@ ixgb_get_ee_vendor_id(struct ixgb_hw *hw)
711 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 711 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
712 712
713 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 713 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
714 return(ee_map->vendor_id); 714 return (le16_to_cpu(ee_map->vendor_id));
715 715
716 return(0); 716 return(0);
717} 717}
@@ -730,7 +730,7 @@ ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw)
730 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 730 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
731 731
732 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 732 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
733 return(ee_map->swdpins_reg); 733 return (le16_to_cpu(ee_map->swdpins_reg));
734 734
735 return(0); 735 return(0);
736} 736}
@@ -749,7 +749,7 @@ ixgb_get_ee_d3_power(struct ixgb_hw *hw)
749 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 749 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
750 750
751 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 751 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
752 return(ee_map->d3_power); 752 return (le16_to_cpu(ee_map->d3_power));
753 753
754 return(0); 754 return(0);
755} 755}
@@ -768,7 +768,7 @@ ixgb_get_ee_d0_power(struct ixgb_hw *hw)
768 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 768 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
769 769
770 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 770 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
771 return(ee_map->d0_power); 771 return (le16_to_cpu(ee_map->d0_power));
772 772
773 return(0); 773 return(0);
774} 774}
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index aea10e8aaa72..3fa113854eeb 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -252,7 +252,9 @@ ixgb_get_regs(struct net_device *netdev,
252 uint32_t *reg_start = reg; 252 uint32_t *reg_start = reg;
253 uint8_t i; 253 uint8_t i;
254 254
255 regs->version = (adapter->hw.device_id << 16) | adapter->hw.subsystem_id; 255 /* the 1 (one) below indicates an attempt at versioning, if the
256 * interface in ethtool or the driver this 1 should be incremented */
257 regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id;
256 258
257 /* General Registers */ 259 /* General Registers */
258 *reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */ 260 *reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 7d26623d8592..35f6a7c271a2 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -47,7 +47,7 @@ char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
47#else 47#else
48#define DRIVERNAPI "-NAPI" 48#define DRIVERNAPI "-NAPI"
49#endif 49#endif
50char ixgb_driver_version[] = "1.0.90-k2"DRIVERNAPI; 50char ixgb_driver_version[] = "1.0.95-k2"DRIVERNAPI;
51char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; 51char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
52 52
53/* ixgb_pci_tbl - PCI Device ID Table 53/* ixgb_pci_tbl - PCI Device ID Table
@@ -103,6 +103,7 @@ static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
103static int ixgb_set_mac(struct net_device *netdev, void *p); 103static int ixgb_set_mac(struct net_device *netdev, void *p);
104static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs); 104static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs);
105static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter); 105static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
106
106#ifdef CONFIG_IXGB_NAPI 107#ifdef CONFIG_IXGB_NAPI
107static int ixgb_clean(struct net_device *netdev, int *budget); 108static int ixgb_clean(struct net_device *netdev, int *budget);
108static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter, 109static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
@@ -120,33 +121,20 @@ static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
120static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 121static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
121static void ixgb_restore_vlan(struct ixgb_adapter *adapter); 122static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
122 123
123static int ixgb_notify_reboot(struct notifier_block *, unsigned long event,
124 void *ptr);
125static int ixgb_suspend(struct pci_dev *pdev, uint32_t state);
126
127#ifdef CONFIG_NET_POLL_CONTROLLER 124#ifdef CONFIG_NET_POLL_CONTROLLER
128/* for netdump / net console */ 125/* for netdump / net console */
129static void ixgb_netpoll(struct net_device *dev); 126static void ixgb_netpoll(struct net_device *dev);
130#endif 127#endif
131 128
132struct notifier_block ixgb_notifier_reboot = {
133 .notifier_call = ixgb_notify_reboot,
134 .next = NULL,
135 .priority = 0
136};
137
138/* Exported from other modules */ 129/* Exported from other modules */
139 130
140extern void ixgb_check_options(struct ixgb_adapter *adapter); 131extern void ixgb_check_options(struct ixgb_adapter *adapter);
141 132
142static struct pci_driver ixgb_driver = { 133static struct pci_driver ixgb_driver = {
143 .name = ixgb_driver_name, 134 .name = ixgb_driver_name,
144 .id_table = ixgb_pci_tbl, 135 .id_table = ixgb_pci_tbl,
145 .probe = ixgb_probe, 136 .probe = ixgb_probe,
146 .remove = __devexit_p(ixgb_remove), 137 .remove = __devexit_p(ixgb_remove),
147 /* Power Managment Hooks */
148 .suspend = NULL,
149 .resume = NULL
150}; 138};
151 139
152MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 140MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@@ -169,17 +157,12 @@ MODULE_LICENSE("GPL");
169static int __init 157static int __init
170ixgb_init_module(void) 158ixgb_init_module(void)
171{ 159{
172 int ret;
173 printk(KERN_INFO "%s - version %s\n", 160 printk(KERN_INFO "%s - version %s\n",
174 ixgb_driver_string, ixgb_driver_version); 161 ixgb_driver_string, ixgb_driver_version);
175 162
176 printk(KERN_INFO "%s\n", ixgb_copyright); 163 printk(KERN_INFO "%s\n", ixgb_copyright);
177 164
178 ret = pci_module_init(&ixgb_driver); 165 return pci_module_init(&ixgb_driver);
179 if(ret >= 0) {
180 register_reboot_notifier(&ixgb_notifier_reboot);
181 }
182 return ret;
183} 166}
184 167
185module_init(ixgb_init_module); 168module_init(ixgb_init_module);
@@ -194,7 +177,6 @@ module_init(ixgb_init_module);
194static void __exit 177static void __exit
195ixgb_exit_module(void) 178ixgb_exit_module(void)
196{ 179{
197 unregister_reboot_notifier(&ixgb_notifier_reboot);
198 pci_unregister_driver(&ixgb_driver); 180 pci_unregister_driver(&ixgb_driver);
199} 181}
200 182
@@ -224,8 +206,8 @@ ixgb_irq_enable(struct ixgb_adapter *adapter)
224{ 206{
225 if(atomic_dec_and_test(&adapter->irq_sem)) { 207 if(atomic_dec_and_test(&adapter->irq_sem)) {
226 IXGB_WRITE_REG(&adapter->hw, IMS, 208 IXGB_WRITE_REG(&adapter->hw, IMS,
227 IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW | 209 IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
228 IXGB_INT_RXO | IXGB_INT_LSC); 210 IXGB_INT_LSC);
229 IXGB_WRITE_FLUSH(&adapter->hw); 211 IXGB_WRITE_FLUSH(&adapter->hw);
230 } 212 }
231} 213}
@@ -1209,10 +1191,10 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1209 | IXGB_CONTEXT_DESC_CMD_TSE 1191 | IXGB_CONTEXT_DESC_CMD_TSE
1210 | IXGB_CONTEXT_DESC_CMD_IP 1192 | IXGB_CONTEXT_DESC_CMD_IP
1211 | IXGB_CONTEXT_DESC_CMD_TCP 1193 | IXGB_CONTEXT_DESC_CMD_TCP
1212 | IXGB_CONTEXT_DESC_CMD_RS
1213 | IXGB_CONTEXT_DESC_CMD_IDE 1194 | IXGB_CONTEXT_DESC_CMD_IDE
1214 | (skb->len - (hdr_len))); 1195 | (skb->len - (hdr_len)));
1215 1196
1197
1216 if(++i == adapter->tx_ring.count) i = 0; 1198 if(++i == adapter->tx_ring.count) i = 0;
1217 adapter->tx_ring.next_to_use = i; 1199 adapter->tx_ring.next_to_use = i;
1218 1200
@@ -1247,8 +1229,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1247 context_desc->mss = 0; 1229 context_desc->mss = 0;
1248 context_desc->cmd_type_len = 1230 context_desc->cmd_type_len =
1249 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE 1231 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1250 | IXGB_TX_DESC_CMD_RS 1232 | IXGB_TX_DESC_CMD_IDE);
1251 | IXGB_TX_DESC_CMD_IDE);
1252 1233
1253 if(++i == adapter->tx_ring.count) i = 0; 1234 if(++i == adapter->tx_ring.count) i = 0;
1254 adapter->tx_ring.next_to_use = i; 1235 adapter->tx_ring.next_to_use = i;
@@ -1273,6 +1254,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1273 1254
1274 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 1255 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1275 unsigned int f; 1256 unsigned int f;
1257
1276 len -= skb->data_len; 1258 len -= skb->data_len;
1277 1259
1278 i = tx_ring->next_to_use; 1260 i = tx_ring->next_to_use;
@@ -1526,14 +1508,33 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1526void 1508void
1527ixgb_update_stats(struct ixgb_adapter *adapter) 1509ixgb_update_stats(struct ixgb_adapter *adapter)
1528{ 1510{
1511 struct net_device *netdev = adapter->netdev;
1512
1513 if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1514 (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1515 u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1516 u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1517 u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1518 u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1519
1520 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1521 /* fix up multicast stats by removing broadcasts */
1522 multi -= bcast;
1523
1524 adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1525 adapter->stats.mprch += (multi >> 32);
1526 adapter->stats.bprcl += bcast_l;
1527 adapter->stats.bprch += bcast_h;
1528 } else {
1529 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1530 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1531 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1532 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1533 }
1529 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL); 1534 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1530 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH); 1535 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1531 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL); 1536 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1532 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH); 1537 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1533 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1534 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1535 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1536 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1537 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL); 1538 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1538 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH); 1539 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1539 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL); 1540 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
@@ -1823,7 +1824,6 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1823 struct pci_dev *pdev = adapter->pdev; 1824 struct pci_dev *pdev = adapter->pdev;
1824 struct ixgb_rx_desc *rx_desc, *next_rxd; 1825 struct ixgb_rx_desc *rx_desc, *next_rxd;
1825 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer; 1826 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1826 struct sk_buff *skb, *next_skb;
1827 uint32_t length; 1827 uint32_t length;
1828 unsigned int i, j; 1828 unsigned int i, j;
1829 boolean_t cleaned = FALSE; 1829 boolean_t cleaned = FALSE;
@@ -1833,6 +1833,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1833 buffer_info = &rx_ring->buffer_info[i]; 1833 buffer_info = &rx_ring->buffer_info[i];
1834 1834
1835 while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) { 1835 while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1836 struct sk_buff *skb, *next_skb;
1837 u8 status;
1836 1838
1837#ifdef CONFIG_IXGB_NAPI 1839#ifdef CONFIG_IXGB_NAPI
1838 if(*work_done >= work_to_do) 1840 if(*work_done >= work_to_do)
@@ -1840,7 +1842,9 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1840 1842
1841 (*work_done)++; 1843 (*work_done)++;
1842#endif 1844#endif
1845 status = rx_desc->status;
1843 skb = buffer_info->skb; 1846 skb = buffer_info->skb;
1847
1844 prefetch(skb->data); 1848 prefetch(skb->data);
1845 1849
1846 if(++i == rx_ring->count) i = 0; 1850 if(++i == rx_ring->count) i = 0;
@@ -1855,7 +1859,6 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1855 next_skb = next_buffer->skb; 1859 next_skb = next_buffer->skb;
1856 prefetch(next_skb); 1860 prefetch(next_skb);
1857 1861
1858
1859 cleaned = TRUE; 1862 cleaned = TRUE;
1860 1863
1861 pci_unmap_single(pdev, 1864 pci_unmap_single(pdev,
@@ -1865,7 +1868,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1865 1868
1866 length = le16_to_cpu(rx_desc->length); 1869 length = le16_to_cpu(rx_desc->length);
1867 1870
1868 if(unlikely(!(rx_desc->status & IXGB_RX_DESC_STATUS_EOP))) { 1871 if(unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
1869 1872
1870 /* All receives must fit into a single buffer */ 1873 /* All receives must fit into a single buffer */
1871 1874
@@ -1873,12 +1876,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1873 "length<%x>\n", length); 1876 "length<%x>\n", length);
1874 1877
1875 dev_kfree_skb_irq(skb); 1878 dev_kfree_skb_irq(skb);
1876 rx_desc->status = 0; 1879 goto rxdesc_done;
1877 buffer_info->skb = NULL;
1878
1879 rx_desc = next_rxd;
1880 buffer_info = next_buffer;
1881 continue;
1882 } 1880 }
1883 1881
1884 if (unlikely(rx_desc->errors 1882 if (unlikely(rx_desc->errors
@@ -1887,12 +1885,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1887 IXGB_RX_DESC_ERRORS_RXE))) { 1885 IXGB_RX_DESC_ERRORS_RXE))) {
1888 1886
1889 dev_kfree_skb_irq(skb); 1887 dev_kfree_skb_irq(skb);
1890 rx_desc->status = 0; 1888 goto rxdesc_done;
1891 buffer_info->skb = NULL;
1892
1893 rx_desc = next_rxd;
1894 buffer_info = next_buffer;
1895 continue;
1896 } 1889 }
1897 1890
1898 /* Good Receive */ 1891 /* Good Receive */
@@ -1903,7 +1896,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1903 1896
1904 skb->protocol = eth_type_trans(skb, netdev); 1897 skb->protocol = eth_type_trans(skb, netdev);
1905#ifdef CONFIG_IXGB_NAPI 1898#ifdef CONFIG_IXGB_NAPI
1906 if(adapter->vlgrp && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) { 1899 if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
1907 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 1900 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
1908 le16_to_cpu(rx_desc->special) & 1901 le16_to_cpu(rx_desc->special) &
1909 IXGB_RX_DESC_SPECIAL_VLAN_MASK); 1902 IXGB_RX_DESC_SPECIAL_VLAN_MASK);
@@ -1911,7 +1904,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1911 netif_receive_skb(skb); 1904 netif_receive_skb(skb);
1912 } 1905 }
1913#else /* CONFIG_IXGB_NAPI */ 1906#else /* CONFIG_IXGB_NAPI */
1914 if(adapter->vlgrp && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) { 1907 if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
1915 vlan_hwaccel_rx(skb, adapter->vlgrp, 1908 vlan_hwaccel_rx(skb, adapter->vlgrp,
1916 le16_to_cpu(rx_desc->special) & 1909 le16_to_cpu(rx_desc->special) &
1917 IXGB_RX_DESC_SPECIAL_VLAN_MASK); 1910 IXGB_RX_DESC_SPECIAL_VLAN_MASK);
@@ -1921,9 +1914,12 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1921#endif /* CONFIG_IXGB_NAPI */ 1914#endif /* CONFIG_IXGB_NAPI */
1922 netdev->last_rx = jiffies; 1915 netdev->last_rx = jiffies;
1923 1916
1917rxdesc_done:
1918 /* clean up descriptor, might be written over by hw */
1924 rx_desc->status = 0; 1919 rx_desc->status = 0;
1925 buffer_info->skb = NULL; 1920 buffer_info->skb = NULL;
1926 1921
1922 /* use prefetched values */
1927 rx_desc = next_rxd; 1923 rx_desc = next_rxd;
1928 buffer_info = next_buffer; 1924 buffer_info = next_buffer;
1929 } 1925 }
@@ -1959,8 +1955,8 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
1959 1955
1960 num_group_tail_writes = IXGB_RX_BUFFER_WRITE; 1956 num_group_tail_writes = IXGB_RX_BUFFER_WRITE;
1961 1957
1962 /* leave one descriptor unused */ 1958 /* leave three descriptors unused */
1963 while(--cleancount > 0) { 1959 while(--cleancount > 2) {
1964 rx_desc = IXGB_RX_DESC(*rx_ring, i); 1960 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1965 1961
1966 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); 1962 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
@@ -1987,6 +1983,10 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
1987 PCI_DMA_FROMDEVICE); 1983 PCI_DMA_FROMDEVICE);
1988 1984
1989 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); 1985 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1986 /* guarantee DD bit not set now before h/w gets descriptor
1987 * this is the rest of the workaround for h/w double
1988 * writeback. */
1989 rx_desc->status = 0;
1990 1990
1991 if((i & ~(num_group_tail_writes- 1)) == i) { 1991 if((i & ~(num_group_tail_writes- 1)) == i) {
1992 /* Force memory writes to complete before letting h/w 1992 /* Force memory writes to complete before letting h/w
@@ -2099,54 +2099,6 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
2099 } 2099 }
2100} 2100}
2101 2101
2102/**
2103 * ixgb_notify_reboot - handles OS notification of reboot event.
2104 * @param nb notifier block, unused
2105 * @param event Event being passed to driver to act upon
2106 * @param p A pointer to our net device
2107 **/
2108static int
2109ixgb_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
2110{
2111 struct pci_dev *pdev = NULL;
2112
2113 switch(event) {
2114 case SYS_DOWN:
2115 case SYS_HALT:
2116 case SYS_POWER_OFF:
2117 while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
2118 if (pci_dev_driver(pdev) == &ixgb_driver)
2119 ixgb_suspend(pdev, 3);
2120 }
2121 }
2122 return NOTIFY_DONE;
2123}
2124
2125/**
2126 * ixgb_suspend - driver suspend function called from notify.
2127 * @param pdev pci driver structure used for passing to
2128 * @param state power state to enter
2129 **/
2130static int
2131ixgb_suspend(struct pci_dev *pdev, uint32_t state)
2132{
2133 struct net_device *netdev = pci_get_drvdata(pdev);
2134 struct ixgb_adapter *adapter = netdev->priv;
2135
2136 netif_device_detach(netdev);
2137
2138 if(netif_running(netdev))
2139 ixgb_down(adapter, TRUE);
2140
2141 pci_save_state(pdev);
2142
2143 state = (state > 0) ? 3 : 0;
2144 pci_set_power_state(pdev, state);
2145 msec_delay(200);
2146
2147 return 0;
2148}
2149
2150#ifdef CONFIG_NET_POLL_CONTROLLER 2102#ifdef CONFIG_NET_POLL_CONTROLLER
2151/* 2103/*
2152 * Polling 'interrupt' - used by things like netconsole to send skbs 2104 * Polling 'interrupt' - used by things like netconsole to send skbs
@@ -2157,6 +2109,7 @@ ixgb_suspend(struct pci_dev *pdev, uint32_t state)
2157static void ixgb_netpoll(struct net_device *dev) 2109static void ixgb_netpoll(struct net_device *dev)
2158{ 2110{
2159 struct ixgb_adapter *adapter = dev->priv; 2111 struct ixgb_adapter *adapter = dev->priv;
2112
2160 disable_irq(adapter->pdev->irq); 2113 disable_irq(adapter->pdev->irq);
2161 ixgb_intr(adapter->pdev->irq, dev, NULL); 2114 ixgb_intr(adapter->pdev->irq, dev, NULL);
2162 enable_irq(adapter->pdev->irq); 2115 enable_irq(adapter->pdev->irq);
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h
index 9eba92891901..dba20481ee80 100644
--- a/drivers/net/ixgb/ixgb_osdep.h
+++ b/drivers/net/ixgb/ixgb_osdep.h
@@ -45,8 +45,7 @@
45 /* Don't mdelay in interrupt context! */ \ 45 /* Don't mdelay in interrupt context! */ \
46 BUG(); \ 46 BUG(); \
47 } else { \ 47 } else { \
48 set_current_state(TASK_UNINTERRUPTIBLE); \ 48 msleep(x); \
49 schedule_timeout((x * HZ)/1000 + 2); \
50 } } while(0) 49 } } while(0)
51#endif 50#endif
52 51