diff options
-rw-r--r-- | drivers/net/ixgb/ixgb.h | 2 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_ethtool.c | 36 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_ids.h | 8 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_main.c | 78 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_param.c | 4 |
5 files changed, 63 insertions, 65 deletions
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h index 3cec7b98d52f..69529ec9dfe1 100644 --- a/drivers/net/ixgb/ixgb.h +++ b/drivers/net/ixgb/ixgb.h | |||
@@ -94,7 +94,7 @@ struct ixgb_adapter; | |||
94 | #define MIN_TXD 64 | 94 | #define MIN_TXD 64 |
95 | 95 | ||
96 | /* hardware cannot reliably support more than 512 descriptors owned by | 96 | /* hardware cannot reliably support more than 512 descriptors owned by |
97 | * hardware descrioptor cache otherwise an unreliable ring under heavy | 97 | * hardware descrioptor cache otherwise an unreliable ring under heavy |
98 | * recieve load may result */ | 98 | * recieve load may result */ |
99 | /* #define DEFAULT_RXD 1024 */ | 99 | /* #define DEFAULT_RXD 1024 */ |
100 | /* #define MAX_RXD 4096 */ | 100 | /* #define MAX_RXD 4096 */ |
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c index 7c9b35c677f0..5b354e3122c4 100644 --- a/drivers/net/ixgb/ixgb_ethtool.c +++ b/drivers/net/ixgb/ixgb_ethtool.c | |||
@@ -125,7 +125,7 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
125 | if (ecmd->autoneg == AUTONEG_ENABLE || | 125 | if (ecmd->autoneg == AUTONEG_ENABLE || |
126 | ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL) | 126 | ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL) |
127 | return -EINVAL; | 127 | return -EINVAL; |
128 | 128 | ||
129 | if (netif_running(adapter->netdev)) { | 129 | if (netif_running(adapter->netdev)) { |
130 | ixgb_down(adapter, true); | 130 | ixgb_down(adapter, true); |
131 | ixgb_reset(adapter); | 131 | ixgb_reset(adapter); |
@@ -143,9 +143,9 @@ ixgb_get_pauseparam(struct net_device *netdev, | |||
143 | { | 143 | { |
144 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 144 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
145 | struct ixgb_hw *hw = &adapter->hw; | 145 | struct ixgb_hw *hw = &adapter->hw; |
146 | 146 | ||
147 | pause->autoneg = AUTONEG_DISABLE; | 147 | pause->autoneg = AUTONEG_DISABLE; |
148 | 148 | ||
149 | if (hw->fc.type == ixgb_fc_rx_pause) | 149 | if (hw->fc.type == ixgb_fc_rx_pause) |
150 | pause->rx_pause = 1; | 150 | pause->rx_pause = 1; |
151 | else if (hw->fc.type == ixgb_fc_tx_pause) | 151 | else if (hw->fc.type == ixgb_fc_tx_pause) |
@@ -162,7 +162,7 @@ ixgb_set_pauseparam(struct net_device *netdev, | |||
162 | { | 162 | { |
163 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 163 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
164 | struct ixgb_hw *hw = &adapter->hw; | 164 | struct ixgb_hw *hw = &adapter->hw; |
165 | 165 | ||
166 | if (pause->autoneg == AUTONEG_ENABLE) | 166 | if (pause->autoneg == AUTONEG_ENABLE) |
167 | return -EINVAL; | 167 | return -EINVAL; |
168 | 168 | ||
@@ -181,7 +181,7 @@ ixgb_set_pauseparam(struct net_device *netdev, | |||
181 | ixgb_set_speed_duplex(netdev); | 181 | ixgb_set_speed_duplex(netdev); |
182 | } else | 182 | } else |
183 | ixgb_reset(adapter); | 183 | ixgb_reset(adapter); |
184 | 184 | ||
185 | return 0; | 185 | return 0; |
186 | } | 186 | } |
187 | 187 | ||
@@ -208,7 +208,7 @@ ixgb_set_rx_csum(struct net_device *netdev, u32 data) | |||
208 | ixgb_reset(adapter); | 208 | ixgb_reset(adapter); |
209 | return 0; | 209 | return 0; |
210 | } | 210 | } |
211 | 211 | ||
212 | static u32 | 212 | static u32 |
213 | ixgb_get_tx_csum(struct net_device *netdev) | 213 | ixgb_get_tx_csum(struct net_device *netdev) |
214 | { | 214 | { |
@@ -234,7 +234,7 @@ ixgb_set_tso(struct net_device *netdev, u32 data) | |||
234 | else | 234 | else |
235 | netdev->features &= ~NETIF_F_TSO; | 235 | netdev->features &= ~NETIF_F_TSO; |
236 | return 0; | 236 | return 0; |
237 | } | 237 | } |
238 | 238 | ||
239 | static u32 | 239 | static u32 |
240 | ixgb_get_msglevel(struct net_device *netdev) | 240 | ixgb_get_msglevel(struct net_device *netdev) |
@@ -251,7 +251,7 @@ ixgb_set_msglevel(struct net_device *netdev, u32 data) | |||
251 | } | 251 | } |
252 | #define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_ | 252 | #define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_ |
253 | 253 | ||
254 | static int | 254 | static int |
255 | ixgb_get_regs_len(struct net_device *netdev) | 255 | ixgb_get_regs_len(struct net_device *netdev) |
256 | { | 256 | { |
257 | #define IXGB_REG_DUMP_LEN 136*sizeof(u32) | 257 | #define IXGB_REG_DUMP_LEN 136*sizeof(u32) |
@@ -495,7 +495,7 @@ ixgb_set_eeprom(struct net_device *netdev, | |||
495 | if ((eeprom->offset + eeprom->len) & 1) { | 495 | if ((eeprom->offset + eeprom->len) & 1) { |
496 | /* need read/modify/write of last changed EEPROM word */ | 496 | /* need read/modify/write of last changed EEPROM word */ |
497 | /* only the first byte of the word is being modified */ | 497 | /* only the first byte of the word is being modified */ |
498 | eeprom_buff[last_word - first_word] | 498 | eeprom_buff[last_word - first_word] |
499 | = ixgb_read_eeprom(hw, last_word); | 499 | = ixgb_read_eeprom(hw, last_word); |
500 | } | 500 | } |
501 | 501 | ||
@@ -534,7 +534,7 @@ ixgb_get_ringparam(struct net_device *netdev, | |||
534 | struct ixgb_desc_ring *txdr = &adapter->tx_ring; | 534 | struct ixgb_desc_ring *txdr = &adapter->tx_ring; |
535 | struct ixgb_desc_ring *rxdr = &adapter->rx_ring; | 535 | struct ixgb_desc_ring *rxdr = &adapter->rx_ring; |
536 | 536 | ||
537 | ring->rx_max_pending = MAX_RXD; | 537 | ring->rx_max_pending = MAX_RXD; |
538 | ring->tx_max_pending = MAX_TXD; | 538 | ring->tx_max_pending = MAX_TXD; |
539 | ring->rx_mini_max_pending = 0; | 539 | ring->rx_mini_max_pending = 0; |
540 | ring->rx_jumbo_max_pending = 0; | 540 | ring->rx_jumbo_max_pending = 0; |
@@ -544,7 +544,7 @@ ixgb_get_ringparam(struct net_device *netdev, | |||
544 | ring->rx_jumbo_pending = 0; | 544 | ring->rx_jumbo_pending = 0; |
545 | } | 545 | } |
546 | 546 | ||
547 | static int | 547 | static int |
548 | ixgb_set_ringparam(struct net_device *netdev, | 548 | ixgb_set_ringparam(struct net_device *netdev, |
549 | struct ethtool_ringparam *ring) | 549 | struct ethtool_ringparam *ring) |
550 | { | 550 | { |
@@ -647,7 +647,7 @@ ixgb_phys_id(struct net_device *netdev, u32 data) | |||
647 | return 0; | 647 | return 0; |
648 | } | 648 | } |
649 | 649 | ||
650 | static int | 650 | static int |
651 | ixgb_get_sset_count(struct net_device *netdev, int sset) | 651 | ixgb_get_sset_count(struct net_device *netdev, int sset) |
652 | { | 652 | { |
653 | switch (sset) { | 653 | switch (sset) { |
@@ -658,8 +658,8 @@ ixgb_get_sset_count(struct net_device *netdev, int sset) | |||
658 | } | 658 | } |
659 | } | 659 | } |
660 | 660 | ||
661 | static void | 661 | static void |
662 | ixgb_get_ethtool_stats(struct net_device *netdev, | 662 | ixgb_get_ethtool_stats(struct net_device *netdev, |
663 | struct ethtool_stats *stats, u64 *data) | 663 | struct ethtool_stats *stats, u64 *data) |
664 | { | 664 | { |
665 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 665 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
@@ -667,13 +667,13 @@ ixgb_get_ethtool_stats(struct net_device *netdev, | |||
667 | 667 | ||
668 | ixgb_update_stats(adapter); | 668 | ixgb_update_stats(adapter); |
669 | for(i = 0; i < IXGB_STATS_LEN; i++) { | 669 | for(i = 0; i < IXGB_STATS_LEN; i++) { |
670 | char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset; | 670 | char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset; |
671 | data[i] = (ixgb_gstrings_stats[i].sizeof_stat == | 671 | data[i] = (ixgb_gstrings_stats[i].sizeof_stat == |
672 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; | 672 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; |
673 | } | 673 | } |
674 | } | 674 | } |
675 | 675 | ||
676 | static void | 676 | static void |
677 | ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | 677 | ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) |
678 | { | 678 | { |
679 | int i; | 679 | int i; |
@@ -681,7 +681,7 @@ ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |||
681 | switch(stringset) { | 681 | switch(stringset) { |
682 | case ETH_SS_STATS: | 682 | case ETH_SS_STATS: |
683 | for(i=0; i < IXGB_STATS_LEN; i++) { | 683 | for(i=0; i < IXGB_STATS_LEN; i++) { |
684 | memcpy(data + i * ETH_GSTRING_LEN, | 684 | memcpy(data + i * ETH_GSTRING_LEN, |
685 | ixgb_gstrings_stats[i].stat_string, | 685 | ixgb_gstrings_stats[i].stat_string, |
686 | ETH_GSTRING_LEN); | 686 | ETH_GSTRING_LEN); |
687 | } | 687 | } |
diff --git a/drivers/net/ixgb/ixgb_ids.h b/drivers/net/ixgb/ixgb_ids.h index 180d20e793a5..4ba4d1910eb4 100644 --- a/drivers/net/ixgb/ixgb_ids.h +++ b/drivers/net/ixgb/ixgb_ids.h | |||
@@ -38,11 +38,11 @@ | |||
38 | #define SUN_VENDOR_ID 0x108E | 38 | #define SUN_VENDOR_ID 0x108E |
39 | #define SUN_SUBVENDOR_ID 0x108E | 39 | #define SUN_SUBVENDOR_ID 0x108E |
40 | 40 | ||
41 | #define IXGB_DEVICE_ID_82597EX 0x1048 | 41 | #define IXGB_DEVICE_ID_82597EX 0x1048 |
42 | #define IXGB_DEVICE_ID_82597EX_SR 0x1A48 | 42 | #define IXGB_DEVICE_ID_82597EX_SR 0x1A48 |
43 | #define IXGB_DEVICE_ID_82597EX_LR 0x1B48 | 43 | #define IXGB_DEVICE_ID_82597EX_LR 0x1B48 |
44 | #define IXGB_SUBDEVICE_ID_A11F 0xA11F | 44 | #define IXGB_SUBDEVICE_ID_A11F 0xA11F |
45 | #define IXGB_SUBDEVICE_ID_A01F 0xA01F | 45 | #define IXGB_SUBDEVICE_ID_A01F 0xA01F |
46 | 46 | ||
47 | #define IXGB_DEVICE_ID_82597EX_CX4 0x109E | 47 | #define IXGB_DEVICE_ID_82597EX_CX4 0x109E |
48 | #define IXGB_SUBDEVICE_ID_A00C 0xA00C | 48 | #define IXGB_SUBDEVICE_ID_A00C 0xA00C |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index dffb853111b7..fc211a3f7c8c 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -346,8 +346,7 @@ ixgb_reset(struct ixgb_adapter *adapter) | |||
346 | **/ | 346 | **/ |
347 | 347 | ||
348 | static int __devinit | 348 | static int __devinit |
349 | ixgb_probe(struct pci_dev *pdev, | 349 | ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
350 | const struct pci_device_id *ent) | ||
351 | { | 350 | { |
352 | struct net_device *netdev = NULL; | 351 | struct net_device *netdev = NULL; |
353 | struct ixgb_adapter *adapter; | 352 | struct ixgb_adapter *adapter; |
@@ -562,7 +561,7 @@ ixgb_sw_init(struct ixgb_adapter *adapter) | |||
562 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) | 561 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) |
563 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) | 562 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) |
564 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR)) | 563 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR)) |
565 | hw->mac_type = ixgb_82597; | 564 | hw->mac_type = ixgb_82597; |
566 | else { | 565 | else { |
567 | /* should never have loaded on this device */ | 566 | /* should never have loaded on this device */ |
568 | DPRINTK(PROBE, ERR, "unsupported device id\n"); | 567 | DPRINTK(PROBE, ERR, "unsupported device id\n"); |
@@ -702,8 +701,8 @@ ixgb_configure_tx(struct ixgb_adapter *adapter) | |||
702 | u32 tctl; | 701 | u32 tctl; |
703 | struct ixgb_hw *hw = &adapter->hw; | 702 | struct ixgb_hw *hw = &adapter->hw; |
704 | 703 | ||
705 | /* Setup the Base and Length of the Tx Descriptor Ring | 704 | /* Setup the Base and Length of the Tx Descriptor Ring |
706 | * tx_ring.dma can be either a 32 or 64 bit value | 705 | * tx_ring.dma can be either a 32 or 64 bit value |
707 | */ | 706 | */ |
708 | 707 | ||
709 | IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); | 708 | IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); |
@@ -729,8 +728,8 @@ ixgb_configure_tx(struct ixgb_adapter *adapter) | |||
729 | 728 | ||
730 | /* Setup Transmit Descriptor Settings for this adapter */ | 729 | /* Setup Transmit Descriptor Settings for this adapter */ |
731 | adapter->tx_cmd_type = | 730 | adapter->tx_cmd_type = |
732 | IXGB_TX_DESC_TYPE | 731 | IXGB_TX_DESC_TYPE | |
733 | | (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0); | 732 | (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0); |
734 | } | 733 | } |
735 | 734 | ||
736 | /** | 735 | /** |
@@ -792,8 +791,8 @@ ixgb_setup_rctl(struct ixgb_adapter *adapter) | |||
792 | rctl &= ~(3 << IXGB_RCTL_MO_SHIFT); | 791 | rctl &= ~(3 << IXGB_RCTL_MO_SHIFT); |
793 | 792 | ||
794 | rctl |= | 793 | rctl |= |
795 | IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | | 794 | IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | |
796 | IXGB_RCTL_RXEN | IXGB_RCTL_CFF | | 795 | IXGB_RCTL_RXEN | IXGB_RCTL_CFF | |
797 | (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT); | 796 | (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT); |
798 | 797 | ||
799 | rctl |= IXGB_RCTL_SECRC; | 798 | rctl |= IXGB_RCTL_SECRC; |
@@ -890,7 +889,7 @@ ixgb_free_tx_resources(struct ixgb_adapter *adapter) | |||
890 | 889 | ||
891 | static void | 890 | static void |
892 | ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter, | 891 | ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter, |
893 | struct ixgb_buffer *buffer_info) | 892 | struct ixgb_buffer *buffer_info) |
894 | { | 893 | { |
895 | struct pci_dev *pdev = adapter->pdev; | 894 | struct pci_dev *pdev = adapter->pdev; |
896 | 895 | ||
@@ -1076,10 +1075,11 @@ ixgb_set_multi(struct net_device *netdev) | |||
1076 | 1075 | ||
1077 | IXGB_WRITE_REG(hw, RCTL, rctl); | 1076 | IXGB_WRITE_REG(hw, RCTL, rctl); |
1078 | 1077 | ||
1079 | for(i = 0, mc_ptr = netdev->mc_list; mc_ptr; | 1078 | for (i = 0, mc_ptr = netdev->mc_list; |
1080 | i++, mc_ptr = mc_ptr->next) | 1079 | mc_ptr; |
1080 | i++, mc_ptr = mc_ptr->next) | ||
1081 | memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS], | 1081 | memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS], |
1082 | mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS); | 1082 | mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS); |
1083 | 1083 | ||
1084 | ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0); | 1084 | ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0); |
1085 | } | 1085 | } |
@@ -1199,7 +1199,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1199 | context_desc->hdr_len = hdr_len; | 1199 | context_desc->hdr_len = hdr_len; |
1200 | context_desc->status = 0; | 1200 | context_desc->status = 0; |
1201 | context_desc->cmd_type_len = cpu_to_le32( | 1201 | context_desc->cmd_type_len = cpu_to_le32( |
1202 | IXGB_CONTEXT_DESC_TYPE | 1202 | IXGB_CONTEXT_DESC_TYPE |
1203 | | IXGB_CONTEXT_DESC_CMD_TSE | 1203 | | IXGB_CONTEXT_DESC_CMD_TSE |
1204 | | IXGB_CONTEXT_DESC_CMD_IP | 1204 | | IXGB_CONTEXT_DESC_CMD_IP |
1205 | | IXGB_CONTEXT_DESC_CMD_TCP | 1205 | | IXGB_CONTEXT_DESC_CMD_TCP |
@@ -1375,8 +1375,8 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) | |||
1375 | if (++i == tx_ring->count) i = 0; | 1375 | if (++i == tx_ring->count) i = 0; |
1376 | } | 1376 | } |
1377 | 1377 | ||
1378 | tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP | 1378 | tx_desc->cmd_type_len |= |
1379 | | IXGB_TX_DESC_CMD_RS ); | 1379 | cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS); |
1380 | 1380 | ||
1381 | /* Force memory writes to complete before letting h/w | 1381 | /* Force memory writes to complete before letting h/w |
1382 | * know there are new descriptors to fetch. (Only | 1382 | * know there are new descriptors to fetch. (Only |
@@ -1455,7 +1455,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1455 | } | 1455 | } |
1456 | 1456 | ||
1457 | first = adapter->tx_ring.next_to_use; | 1457 | first = adapter->tx_ring.next_to_use; |
1458 | 1458 | ||
1459 | tso = ixgb_tso(adapter, skb); | 1459 | tso = ixgb_tso(adapter, skb); |
1460 | if (tso < 0) { | 1460 | if (tso < 0) { |
1461 | dev_kfree_skb_any(skb); | 1461 | dev_kfree_skb_any(skb); |
@@ -1577,16 +1577,16 @@ ixgb_update_stats(struct ixgb_adapter *adapter) | |||
1577 | u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL); | 1577 | u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL); |
1578 | u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL); | 1578 | u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL); |
1579 | u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH); | 1579 | u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH); |
1580 | u64 bcast = ((u64)bcast_h << 32) | bcast_l; | 1580 | u64 bcast = ((u64)bcast_h << 32) | bcast_l; |
1581 | 1581 | ||
1582 | multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32); | 1582 | multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32); |
1583 | /* fix up multicast stats by removing broadcasts */ | 1583 | /* fix up multicast stats by removing broadcasts */ |
1584 | if (multi >= bcast) | 1584 | if (multi >= bcast) |
1585 | multi -= bcast; | 1585 | multi -= bcast; |
1586 | 1586 | ||
1587 | adapter->stats.mprcl += (multi & 0xFFFFFFFF); | 1587 | adapter->stats.mprcl += (multi & 0xFFFFFFFF); |
1588 | adapter->stats.mprch += (multi >> 32); | 1588 | adapter->stats.mprch += (multi >> 32); |
1589 | adapter->stats.bprcl += bcast_l; | 1589 | adapter->stats.bprcl += bcast_l; |
1590 | adapter->stats.bprch += bcast_h; | 1590 | adapter->stats.bprch += bcast_h; |
1591 | } else { | 1591 | } else { |
1592 | adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL); | 1592 | adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL); |
@@ -1715,7 +1715,7 @@ ixgb_intr(int irq, void *data) | |||
1715 | #ifdef CONFIG_IXGB_NAPI | 1715 | #ifdef CONFIG_IXGB_NAPI |
1716 | if (netif_rx_schedule_prep(netdev, &adapter->napi)) { | 1716 | if (netif_rx_schedule_prep(netdev, &adapter->napi)) { |
1717 | 1717 | ||
1718 | /* Disable interrupts and register for poll. The flush | 1718 | /* Disable interrupts and register for poll. The flush |
1719 | of the posted write is intentionally left out. | 1719 | of the posted write is intentionally left out. |
1720 | */ | 1720 | */ |
1721 | 1721 | ||
@@ -1731,7 +1731,7 @@ ixgb_intr(int irq, void *data) | |||
1731 | if (!ixgb_clean_rx_irq(adapter) & | 1731 | if (!ixgb_clean_rx_irq(adapter) & |
1732 | !ixgb_clean_tx_irq(adapter)) | 1732 | !ixgb_clean_tx_irq(adapter)) |
1733 | break; | 1733 | break; |
1734 | #endif | 1734 | #endif |
1735 | return IRQ_HANDLED; | 1735 | return IRQ_HANDLED; |
1736 | } | 1736 | } |
1737 | 1737 | ||
@@ -1787,9 +1787,9 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter) | |||
1787 | tx_desc = IXGB_TX_DESC(*tx_ring, i); | 1787 | tx_desc = IXGB_TX_DESC(*tx_ring, i); |
1788 | buffer_info = &tx_ring->buffer_info[i]; | 1788 | buffer_info = &tx_ring->buffer_info[i]; |
1789 | 1789 | ||
1790 | if (tx_desc->popts | 1790 | if (tx_desc->popts & |
1791 | & (IXGB_TX_DESC_POPTS_TXSM | | 1791 | (IXGB_TX_DESC_POPTS_TXSM | |
1792 | IXGB_TX_DESC_POPTS_IXSM)) | 1792 | IXGB_TX_DESC_POPTS_IXSM)) |
1793 | adapter->hw_csum_tx_good++; | 1793 | adapter->hw_csum_tx_good++; |
1794 | 1794 | ||
1795 | ixgb_unmap_and_free_tx_resource(adapter, buffer_info); | 1795 | ixgb_unmap_and_free_tx_resource(adapter, buffer_info); |
@@ -1862,8 +1862,8 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter) | |||
1862 | 1862 | ||
1863 | static void | 1863 | static void |
1864 | ixgb_rx_checksum(struct ixgb_adapter *adapter, | 1864 | ixgb_rx_checksum(struct ixgb_adapter *adapter, |
1865 | struct ixgb_rx_desc *rx_desc, | 1865 | struct ixgb_rx_desc *rx_desc, |
1866 | struct sk_buff *skb) | 1866 | struct sk_buff *skb) |
1867 | { | 1867 | { |
1868 | /* Ignore Checksum bit is set OR | 1868 | /* Ignore Checksum bit is set OR |
1869 | * TCP Checksum has not been calculated | 1869 | * TCP Checksum has not been calculated |
@@ -1960,11 +1960,9 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) | |||
1960 | goto rxdesc_done; | 1960 | goto rxdesc_done; |
1961 | } | 1961 | } |
1962 | 1962 | ||
1963 | if (unlikely(rx_desc->errors | 1963 | if (unlikely(rx_desc->errors & |
1964 | & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE | 1964 | (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE | |
1965 | | IXGB_RX_DESC_ERRORS_P | | 1965 | IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) { |
1966 | IXGB_RX_DESC_ERRORS_RXE))) { | ||
1967 | |||
1968 | dev_kfree_skb_irq(skb); | 1966 | dev_kfree_skb_irq(skb); |
1969 | goto rxdesc_done; | 1967 | goto rxdesc_done; |
1970 | } | 1968 | } |
@@ -2000,14 +1998,14 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) | |||
2000 | #ifdef CONFIG_IXGB_NAPI | 1998 | #ifdef CONFIG_IXGB_NAPI |
2001 | if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { | 1999 | if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { |
2002 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, | 2000 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, |
2003 | le16_to_cpu(rx_desc->special)); | 2001 | le16_to_cpu(rx_desc->special)); |
2004 | } else { | 2002 | } else { |
2005 | netif_receive_skb(skb); | 2003 | netif_receive_skb(skb); |
2006 | } | 2004 | } |
2007 | #else /* CONFIG_IXGB_NAPI */ | 2005 | #else /* CONFIG_IXGB_NAPI */ |
2008 | if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { | 2006 | if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { |
2009 | vlan_hwaccel_rx(skb, adapter->vlgrp, | 2007 | vlan_hwaccel_rx(skb, adapter->vlgrp, |
2010 | le16_to_cpu(rx_desc->special)); | 2008 | le16_to_cpu(rx_desc->special)); |
2011 | } else { | 2009 | } else { |
2012 | netif_rx(skb); | 2010 | netif_rx(skb); |
2013 | } | 2011 | } |
@@ -2086,7 +2084,7 @@ map_skb: | |||
2086 | rx_desc = IXGB_RX_DESC(*rx_ring, i); | 2084 | rx_desc = IXGB_RX_DESC(*rx_ring, i); |
2087 | rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); | 2085 | rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); |
2088 | /* guarantee DD bit not set now before h/w gets descriptor | 2086 | /* guarantee DD bit not set now before h/w gets descriptor |
2089 | * this is the rest of the workaround for h/w double | 2087 | * this is the rest of the workaround for h/w double |
2090 | * writeback. */ | 2088 | * writeback. */ |
2091 | rx_desc->status = 0; | 2089 | rx_desc->status = 0; |
2092 | 2090 | ||
@@ -2111,7 +2109,7 @@ map_skb: | |||
2111 | 2109 | ||
2112 | /** | 2110 | /** |
2113 | * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping. | 2111 | * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping. |
2114 | * | 2112 | * |
2115 | * @param netdev network interface device structure | 2113 | * @param netdev network interface device structure |
2116 | * @param grp indicates to enable or disable tagging/stripping | 2114 | * @param grp indicates to enable or disable tagging/stripping |
2117 | **/ | 2115 | **/ |
@@ -2231,8 +2229,8 @@ static void ixgb_netpoll(struct net_device *dev) | |||
2231 | * This callback is called by the PCI subsystem whenever | 2229 | * This callback is called by the PCI subsystem whenever |
2232 | * a PCI bus error is detected. | 2230 | * a PCI bus error is detected. |
2233 | */ | 2231 | */ |
2234 | static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, | 2232 | static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev, |
2235 | enum pci_channel_state state) | 2233 | enum pci_channel_state state) |
2236 | { | 2234 | { |
2237 | struct net_device *netdev = pci_get_drvdata(pdev); | 2235 | struct net_device *netdev = pci_get_drvdata(pdev); |
2238 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 2236 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
@@ -2255,7 +2253,7 @@ static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, | |||
2255 | * This is a shortened version of the device probe/discovery code, | 2253 | * This is a shortened version of the device probe/discovery code, |
2256 | * it resembles the first-half of the ixgb_probe() routine. | 2254 | * it resembles the first-half of the ixgb_probe() routine. |
2257 | */ | 2255 | */ |
2258 | static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev) | 2256 | static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev) |
2259 | { | 2257 | { |
2260 | struct net_device *netdev = pci_get_drvdata(pdev); | 2258 | struct net_device *netdev = pci_get_drvdata(pdev); |
2261 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 2259 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
@@ -2299,7 +2297,7 @@ static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev) | |||
2299 | * normal operation. Implementation resembles the second-half | 2297 | * normal operation. Implementation resembles the second-half |
2300 | * of the ixgb_probe() routine. | 2298 | * of the ixgb_probe() routine. |
2301 | */ | 2299 | */ |
2302 | static void ixgb_io_resume (struct pci_dev *pdev) | 2300 | static void ixgb_io_resume(struct pci_dev *pdev) |
2303 | { | 2301 | { |
2304 | struct net_device *netdev = pci_get_drvdata(pdev); | 2302 | struct net_device *netdev = pci_get_drvdata(pdev); |
2305 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 2303 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c index a23d2ffc4b7c..07a6980c7d8f 100644 --- a/drivers/net/ixgb/ixgb_param.c +++ b/drivers/net/ixgb/ixgb_param.c | |||
@@ -136,7 +136,7 @@ IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold"); | |||
136 | /* Flow control request timeout (how long to pause the link partner's tx) | 136 | /* Flow control request timeout (how long to pause the link partner's tx) |
137 | * (PAP 15:0) | 137 | * (PAP 15:0) |
138 | * | 138 | * |
139 | * Valid Range: 1 - 65535 | 139 | * Valid Range: 1 - 65535 |
140 | * | 140 | * |
141 | * Default Value: 65535 (0xffff) (we'll send an xon if we recover) | 141 | * Default Value: 65535 (0xffff) (we'll send an xon if we recover) |
142 | */ | 142 | */ |
@@ -412,7 +412,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) | |||
412 | /* high must be greater than low */ | 412 | /* high must be greater than low */ |
413 | if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) { | 413 | if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) { |
414 | /* set defaults */ | 414 | /* set defaults */ |
415 | printk (KERN_INFO | 415 | printk (KERN_INFO |
416 | "RxFCHighThresh must be >= (RxFCLowThresh + 8), " | 416 | "RxFCHighThresh must be >= (RxFCLowThresh + 8), " |
417 | "Using Defaults\n"); | 417 | "Using Defaults\n"); |
418 | adapter->hw.fc.high_water = DEFAULT_FCRTH; | 418 | adapter->hw.fc.high_water = DEFAULT_FCRTH; |