diff options
author | Peter P Waskiewicz <peter.p.waskiewicz.jr@intel.com> | 2008-09-11 23:04:46 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-09-24 18:55:05 -0400 |
commit | b46172402f39719e97b921cc3ca85141f3e8b1c2 (patch) | |
tree | 5dc5e2df7b0e08d3f6bc206594b835ae0d337b9d /drivers/net/ixgbe/ixgbe_main.c | |
parent | 51ac6445b108abab5e5ebeb5e68665d4509a6f29 (diff) |
ixgbe: Whitespace, copyright update and version number change patch
This patch cleans up a bit of whitespace issues with the driver, updates
the copyright information, and bumps the version number up.
Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 360 |
1 files changed, 178 insertions, 182 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index b51391180907..2980a3736457 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | 3 | Intel 10 Gigabit PCI Express Linux driver |
4 | Copyright(c) 1999 - 2007 Intel Corporation. | 4 | Copyright(c) 1999 - 2008 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -20,7 +20,6 @@ | |||
20 | the file called "COPYING". | 20 | the file called "COPYING". |
21 | 21 | ||
22 | Contact Information: | 22 | Contact Information: |
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | 23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> |
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | 24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
26 | 25 | ||
@@ -46,15 +45,14 @@ | |||
46 | 45 | ||
47 | char ixgbe_driver_name[] = "ixgbe"; | 46 | char ixgbe_driver_name[] = "ixgbe"; |
48 | static const char ixgbe_driver_string[] = | 47 | static const char ixgbe_driver_string[] = |
49 | "Intel(R) 10 Gigabit PCI Express Network Driver"; | 48 | "Intel(R) 10 Gigabit PCI Express Network Driver"; |
50 | 49 | ||
51 | #define DRV_VERSION "1.3.30-k2" | 50 | #define DRV_VERSION "1.3.30-k2" |
52 | const char ixgbe_driver_version[] = DRV_VERSION; | 51 | const char ixgbe_driver_version[] = DRV_VERSION; |
53 | static const char ixgbe_copyright[] = | 52 | static char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation."; |
54 | "Copyright (c) 1999-2007 Intel Corporation."; | ||
55 | 53 | ||
56 | static const struct ixgbe_info *ixgbe_info_tbl[] = { | 54 | static const struct ixgbe_info *ixgbe_info_tbl[] = { |
57 | [board_82598] = &ixgbe_82598_info, | 55 | [board_82598] = &ixgbe_82598_info, |
58 | }; | 56 | }; |
59 | 57 | ||
60 | /* ixgbe_pci_tbl - PCI Device ID Table | 58 | /* ixgbe_pci_tbl - PCI Device ID Table |
@@ -84,7 +82,7 @@ MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); | |||
84 | 82 | ||
85 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | 83 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) |
86 | static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, | 84 | static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, |
87 | void *p); | 85 | void *p); |
88 | static struct notifier_block dca_notifier = { | 86 | static struct notifier_block dca_notifier = { |
89 | .notifier_call = ixgbe_notify_dca, | 87 | .notifier_call = ixgbe_notify_dca, |
90 | .next = NULL, | 88 | .next = NULL, |
@@ -106,7 +104,7 @@ static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) | |||
106 | /* Let firmware take over control of h/w */ | 104 | /* Let firmware take over control of h/w */ |
107 | ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); | 105 | ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); |
108 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, | 106 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, |
109 | ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); | 107 | ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); |
110 | } | 108 | } |
111 | 109 | ||
112 | static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) | 110 | static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) |
@@ -116,7 +114,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) | |||
116 | /* Let firmware know the driver has taken over */ | 114 | /* Let firmware know the driver has taken over */ |
117 | ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); | 115 | ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); |
118 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, | 116 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, |
119 | ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); | 117 | ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); |
120 | } | 118 | } |
121 | 119 | ||
122 | #ifdef DEBUG | 120 | #ifdef DEBUG |
@@ -133,7 +131,7 @@ char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw) | |||
133 | #endif | 131 | #endif |
134 | 132 | ||
135 | static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry, | 133 | static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry, |
136 | u8 msix_vector) | 134 | u8 msix_vector) |
137 | { | 135 | { |
138 | u32 ivar, index; | 136 | u32 ivar, index; |
139 | 137 | ||
@@ -146,12 +144,12 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry, | |||
146 | } | 144 | } |
147 | 145 | ||
148 | static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, | 146 | static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, |
149 | struct ixgbe_tx_buffer | 147 | struct ixgbe_tx_buffer |
150 | *tx_buffer_info) | 148 | *tx_buffer_info) |
151 | { | 149 | { |
152 | if (tx_buffer_info->dma) { | 150 | if (tx_buffer_info->dma) { |
153 | pci_unmap_page(adapter->pdev, tx_buffer_info->dma, | 151 | pci_unmap_page(adapter->pdev, tx_buffer_info->dma, |
154 | tx_buffer_info->length, PCI_DMA_TODEVICE); | 152 | tx_buffer_info->length, PCI_DMA_TODEVICE); |
155 | tx_buffer_info->dma = 0; | 153 | tx_buffer_info->dma = 0; |
156 | } | 154 | } |
157 | if (tx_buffer_info->skb) { | 155 | if (tx_buffer_info->skb) { |
@@ -162,8 +160,8 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, | |||
162 | } | 160 | } |
163 | 161 | ||
164 | static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, | 162 | static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, |
165 | struct ixgbe_ring *tx_ring, | 163 | struct ixgbe_ring *tx_ring, |
166 | unsigned int eop) | 164 | unsigned int eop) |
167 | { | 165 | { |
168 | struct ixgbe_hw *hw = &adapter->hw; | 166 | struct ixgbe_hw *hw = &adapter->hw; |
169 | u32 head, tail; | 167 | u32 head, tail; |
@@ -198,14 +196,14 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, | |||
198 | return false; | 196 | return false; |
199 | } | 197 | } |
200 | 198 | ||
201 | #define IXGBE_MAX_TXD_PWR 14 | 199 | #define IXGBE_MAX_TXD_PWR 14 |
202 | #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) | 200 | #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) |
203 | 201 | ||
204 | /* Tx Descriptors needed, worst case */ | 202 | /* Tx Descriptors needed, worst case */ |
205 | #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ | 203 | #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ |
206 | (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) | 204 | (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) |
207 | #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ | 205 | #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ |
208 | MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ | 206 | MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ |
209 | 207 | ||
210 | #define GET_TX_HEAD_FROM_RING(ring) (\ | 208 | #define GET_TX_HEAD_FROM_RING(ring) (\ |
211 | *(volatile u32 *) \ | 209 | *(volatile u32 *) \ |
@@ -313,7 +311,7 @@ done_cleaning: | |||
313 | 311 | ||
314 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | 312 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) |
315 | static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, | 313 | static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, |
316 | struct ixgbe_ring *rx_ring) | 314 | struct ixgbe_ring *rx_ring) |
317 | { | 315 | { |
318 | u32 rxctrl; | 316 | u32 rxctrl; |
319 | int cpu = get_cpu(); | 317 | int cpu = get_cpu(); |
@@ -332,7 +330,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, | |||
332 | } | 330 | } |
333 | 331 | ||
334 | static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, | 332 | static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, |
335 | struct ixgbe_ring *tx_ring) | 333 | struct ixgbe_ring *tx_ring) |
336 | { | 334 | { |
337 | u32 txctrl; | 335 | u32 txctrl; |
338 | int cpu = get_cpu(); | 336 | int cpu = get_cpu(); |
@@ -408,8 +406,8 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) | |||
408 | * @rx_desc: rx descriptor | 406 | * @rx_desc: rx descriptor |
409 | **/ | 407 | **/ |
410 | static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, | 408 | static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, |
411 | struct sk_buff *skb, u8 status, | 409 | struct sk_buff *skb, u8 status, |
412 | struct ixgbe_ring *ring, | 410 | struct ixgbe_ring *ring, |
413 | union ixgbe_adv_rx_desc *rx_desc) | 411 | union ixgbe_adv_rx_desc *rx_desc) |
414 | { | 412 | { |
415 | bool is_vlan = (status & IXGBE_RXD_STAT_VP); | 413 | bool is_vlan = (status & IXGBE_RXD_STAT_VP); |
@@ -577,8 +575,8 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) | |||
577 | } | 575 | } |
578 | 576 | ||
579 | static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, | 577 | static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, |
580 | struct ixgbe_ring *rx_ring, | 578 | struct ixgbe_ring *rx_ring, |
581 | int *work_done, int work_to_do) | 579 | int *work_done, int work_to_do) |
582 | { | 580 | { |
583 | struct pci_dev *pdev = adapter->pdev; | 581 | struct pci_dev *pdev = adapter->pdev; |
584 | union ixgbe_adv_rx_desc *rx_desc, *next_rxd; | 582 | union ixgbe_adv_rx_desc *rx_desc, *next_rxd; |
@@ -622,8 +620,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, | |||
622 | 620 | ||
623 | if (len && !skb_shinfo(skb)->nr_frags) { | 621 | if (len && !skb_shinfo(skb)->nr_frags) { |
624 | pci_unmap_single(pdev, rx_buffer_info->dma, | 622 | pci_unmap_single(pdev, rx_buffer_info->dma, |
625 | rx_ring->rx_buf_len + NET_IP_ALIGN, | 623 | rx_ring->rx_buf_len + NET_IP_ALIGN, |
626 | PCI_DMA_FROMDEVICE); | 624 | PCI_DMA_FROMDEVICE); |
627 | skb_put(skb, len); | 625 | skb_put(skb, len); |
628 | } | 626 | } |
629 | 627 | ||
@@ -741,24 +739,24 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
741 | q_vector = &adapter->q_vector[v_idx]; | 739 | q_vector = &adapter->q_vector[v_idx]; |
742 | /* XXX for_each_bit(...) */ | 740 | /* XXX for_each_bit(...) */ |
743 | r_idx = find_first_bit(q_vector->rxr_idx, | 741 | r_idx = find_first_bit(q_vector->rxr_idx, |
744 | adapter->num_rx_queues); | 742 | adapter->num_rx_queues); |
745 | 743 | ||
746 | for (i = 0; i < q_vector->rxr_count; i++) { | 744 | for (i = 0; i < q_vector->rxr_count; i++) { |
747 | j = adapter->rx_ring[r_idx].reg_idx; | 745 | j = adapter->rx_ring[r_idx].reg_idx; |
748 | ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx); | 746 | ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx); |
749 | r_idx = find_next_bit(q_vector->rxr_idx, | 747 | r_idx = find_next_bit(q_vector->rxr_idx, |
750 | adapter->num_rx_queues, | 748 | adapter->num_rx_queues, |
751 | r_idx + 1); | 749 | r_idx + 1); |
752 | } | 750 | } |
753 | r_idx = find_first_bit(q_vector->txr_idx, | 751 | r_idx = find_first_bit(q_vector->txr_idx, |
754 | adapter->num_tx_queues); | 752 | adapter->num_tx_queues); |
755 | 753 | ||
756 | for (i = 0; i < q_vector->txr_count; i++) { | 754 | for (i = 0; i < q_vector->txr_count; i++) { |
757 | j = adapter->tx_ring[r_idx].reg_idx; | 755 | j = adapter->tx_ring[r_idx].reg_idx; |
758 | ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx); | 756 | ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx); |
759 | r_idx = find_next_bit(q_vector->txr_idx, | 757 | r_idx = find_next_bit(q_vector->txr_idx, |
760 | adapter->num_tx_queues, | 758 | adapter->num_tx_queues, |
761 | r_idx + 1); | 759 | r_idx + 1); |
762 | } | 760 | } |
763 | 761 | ||
764 | /* if this is a tx only vector halve the interrupt rate */ | 762 | /* if this is a tx only vector halve the interrupt rate */ |
@@ -769,7 +767,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
769 | q_vector->eitr = adapter->eitr_param; | 767 | q_vector->eitr = adapter->eitr_param; |
770 | 768 | ||
771 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), | 769 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), |
772 | EITR_INTS_PER_SEC_TO_REG(q_vector->eitr)); | 770 | EITR_INTS_PER_SEC_TO_REG(q_vector->eitr)); |
773 | } | 771 | } |
774 | 772 | ||
775 | ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx); | 773 | ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx); |
@@ -807,8 +805,8 @@ enum latency_range { | |||
807 | * parameter (see ixgbe_param.c) | 805 | * parameter (see ixgbe_param.c) |
808 | **/ | 806 | **/ |
809 | static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, | 807 | static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, |
810 | u32 eitr, u8 itr_setting, | 808 | u32 eitr, u8 itr_setting, |
811 | int packets, int bytes) | 809 | int packets, int bytes) |
812 | { | 810 | { |
813 | unsigned int retval = itr_setting; | 811 | unsigned int retval = itr_setting; |
814 | u32 timepassed_us; | 812 | u32 timepassed_us; |
@@ -855,37 +853,37 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) | |||
855 | u32 new_itr; | 853 | u32 new_itr; |
856 | u8 current_itr, ret_itr; | 854 | u8 current_itr, ret_itr; |
857 | int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) / | 855 | int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) / |
858 | sizeof(struct ixgbe_q_vector); | 856 | sizeof(struct ixgbe_q_vector); |
859 | struct ixgbe_ring *rx_ring, *tx_ring; | 857 | struct ixgbe_ring *rx_ring, *tx_ring; |
860 | 858 | ||
861 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | 859 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
862 | for (i = 0; i < q_vector->txr_count; i++) { | 860 | for (i = 0; i < q_vector->txr_count; i++) { |
863 | tx_ring = &(adapter->tx_ring[r_idx]); | 861 | tx_ring = &(adapter->tx_ring[r_idx]); |
864 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, | 862 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, |
865 | q_vector->tx_itr, | 863 | q_vector->tx_itr, |
866 | tx_ring->total_packets, | 864 | tx_ring->total_packets, |
867 | tx_ring->total_bytes); | 865 | tx_ring->total_bytes); |
868 | /* if the result for this queue would decrease interrupt | 866 | /* if the result for this queue would decrease interrupt |
869 | * rate for this vector then use that result */ | 867 | * rate for this vector then use that result */ |
870 | q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? | 868 | q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? |
871 | q_vector->tx_itr - 1 : ret_itr); | 869 | q_vector->tx_itr - 1 : ret_itr); |
872 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, | 870 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
873 | r_idx + 1); | 871 | r_idx + 1); |
874 | } | 872 | } |
875 | 873 | ||
876 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 874 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
877 | for (i = 0; i < q_vector->rxr_count; i++) { | 875 | for (i = 0; i < q_vector->rxr_count; i++) { |
878 | rx_ring = &(adapter->rx_ring[r_idx]); | 876 | rx_ring = &(adapter->rx_ring[r_idx]); |
879 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, | 877 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, |
880 | q_vector->rx_itr, | 878 | q_vector->rx_itr, |
881 | rx_ring->total_packets, | 879 | rx_ring->total_packets, |
882 | rx_ring->total_bytes); | 880 | rx_ring->total_bytes); |
883 | /* if the result for this queue would decrease interrupt | 881 | /* if the result for this queue would decrease interrupt |
884 | * rate for this vector then use that result */ | 882 | * rate for this vector then use that result */ |
885 | q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? | 883 | q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? |
886 | q_vector->rx_itr - 1 : ret_itr); | 884 | q_vector->rx_itr - 1 : ret_itr); |
887 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, | 885 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, |
888 | r_idx + 1); | 886 | r_idx + 1); |
889 | } | 887 | } |
890 | 888 | ||
891 | current_itr = max(q_vector->rx_itr, q_vector->tx_itr); | 889 | current_itr = max(q_vector->rx_itr, q_vector->tx_itr); |
@@ -912,7 +910,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) | |||
912 | itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); | 910 | itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); |
913 | /* must write high and low 16 bits to reset counter */ | 911 | /* must write high and low 16 bits to reset counter */ |
914 | DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx, | 912 | DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx, |
915 | itr_reg); | 913 | itr_reg); |
916 | IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16); | 914 | IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16); |
917 | } | 915 | } |
918 | 916 | ||
@@ -970,7 +968,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) | |||
970 | tx_ring->total_packets = 0; | 968 | tx_ring->total_packets = 0; |
971 | ixgbe_clean_tx_irq(adapter, tx_ring); | 969 | ixgbe_clean_tx_irq(adapter, tx_ring); |
972 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, | 970 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
973 | r_idx + 1); | 971 | r_idx + 1); |
974 | } | 972 | } |
975 | 973 | ||
976 | return IRQ_HANDLED; | 974 | return IRQ_HANDLED; |
@@ -1029,7 +1027,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) | |||
1029 | static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) | 1027 | static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) |
1030 | { | 1028 | { |
1031 | struct ixgbe_q_vector *q_vector = | 1029 | struct ixgbe_q_vector *q_vector = |
1032 | container_of(napi, struct ixgbe_q_vector, napi); | 1030 | container_of(napi, struct ixgbe_q_vector, napi); |
1033 | struct ixgbe_adapter *adapter = q_vector->adapter; | 1031 | struct ixgbe_adapter *adapter = q_vector->adapter; |
1034 | struct ixgbe_ring *rx_ring = NULL; | 1032 | struct ixgbe_ring *rx_ring = NULL; |
1035 | int work_done = 0; | 1033 | int work_done = 0; |
@@ -1106,7 +1104,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) | |||
1106 | return work_done; | 1104 | return work_done; |
1107 | } | 1105 | } |
1108 | static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, | 1106 | static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, |
1109 | int r_idx) | 1107 | int r_idx) |
1110 | { | 1108 | { |
1111 | a->q_vector[v_idx].adapter = a; | 1109 | a->q_vector[v_idx].adapter = a; |
1112 | set_bit(r_idx, a->q_vector[v_idx].rxr_idx); | 1110 | set_bit(r_idx, a->q_vector[v_idx].rxr_idx); |
@@ -1115,7 +1113,7 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, | |||
1115 | } | 1113 | } |
1116 | 1114 | ||
1117 | static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, | 1115 | static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, |
1118 | int r_idx) | 1116 | int r_idx) |
1119 | { | 1117 | { |
1120 | a->q_vector[v_idx].adapter = a; | 1118 | a->q_vector[v_idx].adapter = a; |
1121 | set_bit(r_idx, a->q_vector[v_idx].txr_idx); | 1119 | set_bit(r_idx, a->q_vector[v_idx].txr_idx); |
@@ -1135,7 +1133,7 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, | |||
1135 | * mapping configurations in here. | 1133 | * mapping configurations in here. |
1136 | **/ | 1134 | **/ |
1137 | static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, | 1135 | static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, |
1138 | int vectors) | 1136 | int vectors) |
1139 | { | 1137 | { |
1140 | int v_start = 0; | 1138 | int v_start = 0; |
1141 | int rxr_idx = 0, txr_idx = 0; | 1139 | int rxr_idx = 0, txr_idx = 0; |
@@ -1212,28 +1210,28 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
1212 | goto out; | 1210 | goto out; |
1213 | 1211 | ||
1214 | #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ | 1212 | #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ |
1215 | (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ | 1213 | (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ |
1216 | &ixgbe_msix_clean_many) | 1214 | &ixgbe_msix_clean_many) |
1217 | for (vector = 0; vector < q_vectors; vector++) { | 1215 | for (vector = 0; vector < q_vectors; vector++) { |
1218 | handler = SET_HANDLER(&adapter->q_vector[vector]); | 1216 | handler = SET_HANDLER(&adapter->q_vector[vector]); |
1219 | sprintf(adapter->name[vector], "%s:v%d-%s", | 1217 | sprintf(adapter->name[vector], "%s:v%d-%s", |
1220 | netdev->name, vector, | 1218 | netdev->name, vector, |
1221 | (handler == &ixgbe_msix_clean_rx) ? "Rx" : | 1219 | (handler == &ixgbe_msix_clean_rx) ? "Rx" : |
1222 | ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx")); | 1220 | ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx")); |
1223 | err = request_irq(adapter->msix_entries[vector].vector, | 1221 | err = request_irq(adapter->msix_entries[vector].vector, |
1224 | handler, 0, adapter->name[vector], | 1222 | handler, 0, adapter->name[vector], |
1225 | &(adapter->q_vector[vector])); | 1223 | &(adapter->q_vector[vector])); |
1226 | if (err) { | 1224 | if (err) { |
1227 | DPRINTK(PROBE, ERR, | 1225 | DPRINTK(PROBE, ERR, |
1228 | "request_irq failed for MSIX interrupt " | 1226 | "request_irq failed for MSIX interrupt " |
1229 | "Error: %d\n", err); | 1227 | "Error: %d\n", err); |
1230 | goto free_queue_irqs; | 1228 | goto free_queue_irqs; |
1231 | } | 1229 | } |
1232 | } | 1230 | } |
1233 | 1231 | ||
1234 | sprintf(adapter->name[vector], "%s:lsc", netdev->name); | 1232 | sprintf(adapter->name[vector], "%s:lsc", netdev->name); |
1235 | err = request_irq(adapter->msix_entries[vector].vector, | 1233 | err = request_irq(adapter->msix_entries[vector].vector, |
1236 | &ixgbe_msix_lsc, 0, adapter->name[vector], netdev); | 1234 | &ixgbe_msix_lsc, 0, adapter->name[vector], netdev); |
1237 | if (err) { | 1235 | if (err) { |
1238 | DPRINTK(PROBE, ERR, | 1236 | DPRINTK(PROBE, ERR, |
1239 | "request_irq for msix_lsc failed: %d\n", err); | 1237 | "request_irq for msix_lsc failed: %d\n", err); |
@@ -1245,7 +1243,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
1245 | free_queue_irqs: | 1243 | free_queue_irqs: |
1246 | for (i = vector - 1; i >= 0; i--) | 1244 | for (i = vector - 1; i >= 0; i--) |
1247 | free_irq(adapter->msix_entries[--vector].vector, | 1245 | free_irq(adapter->msix_entries[--vector].vector, |
1248 | &(adapter->q_vector[i])); | 1246 | &(adapter->q_vector[i])); |
1249 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | 1247 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; |
1250 | pci_disable_msix(adapter->pdev); | 1248 | pci_disable_msix(adapter->pdev); |
1251 | kfree(adapter->msix_entries); | 1249 | kfree(adapter->msix_entries); |
@@ -1264,13 +1262,13 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter) | |||
1264 | struct ixgbe_ring *tx_ring = &adapter->tx_ring[0]; | 1262 | struct ixgbe_ring *tx_ring = &adapter->tx_ring[0]; |
1265 | 1263 | ||
1266 | q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, | 1264 | q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, |
1267 | q_vector->tx_itr, | 1265 | q_vector->tx_itr, |
1268 | tx_ring->total_packets, | 1266 | tx_ring->total_packets, |
1269 | tx_ring->total_bytes); | 1267 | tx_ring->total_bytes); |
1270 | q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr, | 1268 | q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr, |
1271 | q_vector->rx_itr, | 1269 | q_vector->rx_itr, |
1272 | rx_ring->total_packets, | 1270 | rx_ring->total_packets, |
1273 | rx_ring->total_bytes); | 1271 | rx_ring->total_bytes); |
1274 | 1272 | ||
1275 | current_itr = max(q_vector->rx_itr, q_vector->tx_itr); | 1273 | current_itr = max(q_vector->rx_itr, q_vector->tx_itr); |
1276 | 1274 | ||
@@ -1373,10 +1371,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) | |||
1373 | err = ixgbe_request_msix_irqs(adapter); | 1371 | err = ixgbe_request_msix_irqs(adapter); |
1374 | } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { | 1372 | } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { |
1375 | err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0, | 1373 | err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0, |
1376 | netdev->name, netdev); | 1374 | netdev->name, netdev); |
1377 | } else { | 1375 | } else { |
1378 | err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED, | 1376 | err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED, |
1379 | netdev->name, netdev); | 1377 | netdev->name, netdev); |
1380 | } | 1378 | } |
1381 | 1379 | ||
1382 | if (err) | 1380 | if (err) |
@@ -1400,7 +1398,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) | |||
1400 | i--; | 1398 | i--; |
1401 | for (; i >= 0; i--) { | 1399 | for (; i >= 0; i--) { |
1402 | free_irq(adapter->msix_entries[i].vector, | 1400 | free_irq(adapter->msix_entries[i].vector, |
1403 | &(adapter->q_vector[i])); | 1401 | &(adapter->q_vector[i])); |
1404 | } | 1402 | } |
1405 | 1403 | ||
1406 | ixgbe_reset_q_vectors(adapter); | 1404 | ixgbe_reset_q_vectors(adapter); |
@@ -1533,8 +1531,8 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) | |||
1533 | srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; | 1531 | srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
1534 | srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; | 1532 | srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; |
1535 | srrctl |= ((IXGBE_RX_HDR_SIZE << | 1533 | srrctl |= ((IXGBE_RX_HDR_SIZE << |
1536 | IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & | 1534 | IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & |
1537 | IXGBE_SRRCTL_BSIZEHDR_MASK); | 1535 | IXGBE_SRRCTL_BSIZEHDR_MASK); |
1538 | } else { | 1536 | } else { |
1539 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; | 1537 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; |
1540 | 1538 | ||
@@ -1551,7 +1549,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) | |||
1551 | /** | 1549 | /** |
1552 | * ixgbe_get_skb_hdr - helper function for LRO header processing | 1550 | * ixgbe_get_skb_hdr - helper function for LRO header processing |
1553 | * @skb: pointer to sk_buff to be added to LRO packet | 1551 | * @skb: pointer to sk_buff to be added to LRO packet |
1554 | * @iphdr: pointer to tcp header structure | 1552 | * @iphdr: pointer to ip header structure |
1555 | * @tcph: pointer to tcp header structure | 1553 | * @tcph: pointer to tcp header structure |
1556 | * @hdr_flags: pointer to header flags | 1554 | * @hdr_flags: pointer to header flags |
1557 | * @priv: private data | 1555 | * @priv: private data |
@@ -1576,7 +1574,7 @@ static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph, | |||
1576 | } | 1574 | } |
1577 | 1575 | ||
1578 | #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ | 1576 | #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ |
1579 | (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) | 1577 | (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) |
1580 | 1578 | ||
1581 | /** | 1579 | /** |
1582 | * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset | 1580 | * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset |
@@ -1723,7 +1721,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1723 | } | 1721 | } |
1724 | 1722 | ||
1725 | static void ixgbe_vlan_rx_register(struct net_device *netdev, | 1723 | static void ixgbe_vlan_rx_register(struct net_device *netdev, |
1726 | struct vlan_group *grp) | 1724 | struct vlan_group *grp) |
1727 | { | 1725 | { |
1728 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 1726 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
1729 | u32 ctrl; | 1727 | u32 ctrl; |
@@ -1909,7 +1907,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) | |||
1909 | ixgbe_configure_rx(adapter); | 1907 | ixgbe_configure_rx(adapter); |
1910 | for (i = 0; i < adapter->num_rx_queues; i++) | 1908 | for (i = 0; i < adapter->num_rx_queues; i++) |
1911 | ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i], | 1909 | ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i], |
1912 | (adapter->rx_ring[i].count - 1)); | 1910 | (adapter->rx_ring[i].count - 1)); |
1913 | } | 1911 | } |
1914 | 1912 | ||
1915 | static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | 1913 | static int ixgbe_up_complete(struct ixgbe_adapter *adapter) |
@@ -1927,7 +1925,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
1927 | (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) { | 1925 | (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) { |
1928 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 1926 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
1929 | gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME | | 1927 | gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME | |
1930 | IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); | 1928 | IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); |
1931 | } else { | 1929 | } else { |
1932 | /* MSI only */ | 1930 | /* MSI only */ |
1933 | gpie = 0; | 1931 | gpie = 0; |
@@ -2037,7 +2035,7 @@ static int ixgbe_resume(struct pci_dev *pdev) | |||
2037 | err = pci_enable_device(pdev); | 2035 | err = pci_enable_device(pdev); |
2038 | if (err) { | 2036 | if (err) { |
2039 | printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \ | 2037 | printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \ |
2040 | "suspend\n"); | 2038 | "suspend\n"); |
2041 | return err; | 2039 | return err; |
2042 | } | 2040 | } |
2043 | pci_set_master(pdev); | 2041 | pci_set_master(pdev); |
@@ -2068,7 +2066,7 @@ static int ixgbe_resume(struct pci_dev *pdev) | |||
2068 | * @rx_ring: ring to free buffers from | 2066 | * @rx_ring: ring to free buffers from |
2069 | **/ | 2067 | **/ |
2070 | static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | 2068 | static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, |
2071 | struct ixgbe_ring *rx_ring) | 2069 | struct ixgbe_ring *rx_ring) |
2072 | { | 2070 | { |
2073 | struct pci_dev *pdev = adapter->pdev; | 2071 | struct pci_dev *pdev = adapter->pdev; |
2074 | unsigned long size; | 2072 | unsigned long size; |
@@ -2082,8 +2080,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | |||
2082 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | 2080 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; |
2083 | if (rx_buffer_info->dma) { | 2081 | if (rx_buffer_info->dma) { |
2084 | pci_unmap_single(pdev, rx_buffer_info->dma, | 2082 | pci_unmap_single(pdev, rx_buffer_info->dma, |
2085 | rx_ring->rx_buf_len, | 2083 | rx_ring->rx_buf_len, |
2086 | PCI_DMA_FROMDEVICE); | 2084 | PCI_DMA_FROMDEVICE); |
2087 | rx_buffer_info->dma = 0; | 2085 | rx_buffer_info->dma = 0; |
2088 | } | 2086 | } |
2089 | if (rx_buffer_info->skb) { | 2087 | if (rx_buffer_info->skb) { |
@@ -2119,7 +2117,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | |||
2119 | * @tx_ring: ring to be cleaned | 2117 | * @tx_ring: ring to be cleaned |
2120 | **/ | 2118 | **/ |
2121 | static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, | 2119 | static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, |
2122 | struct ixgbe_ring *tx_ring) | 2120 | struct ixgbe_ring *tx_ring) |
2123 | { | 2121 | { |
2124 | struct ixgbe_tx_buffer *tx_buffer_info; | 2122 | struct ixgbe_tx_buffer *tx_buffer_info; |
2125 | unsigned long size; | 2123 | unsigned long size; |
@@ -2226,7 +2224,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
2226 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; | 2224 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; |
2227 | /* always use CB2 mode, difference is masked | 2225 | /* always use CB2 mode, difference is masked |
2228 | * in the CB driver */ | 2226 | * in the CB driver */ |
2229 | IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2); | 2227 | IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2); |
2230 | ixgbe_setup_dca(adapter); | 2228 | ixgbe_setup_dca(adapter); |
2231 | } | 2229 | } |
2232 | #endif | 2230 | #endif |
@@ -2280,7 +2278,7 @@ static void ixgbe_shutdown(struct pci_dev *pdev) | |||
2280 | static int ixgbe_poll(struct napi_struct *napi, int budget) | 2278 | static int ixgbe_poll(struct napi_struct *napi, int budget) |
2281 | { | 2279 | { |
2282 | struct ixgbe_q_vector *q_vector = container_of(napi, | 2280 | struct ixgbe_q_vector *q_vector = container_of(napi, |
2283 | struct ixgbe_q_vector, napi); | 2281 | struct ixgbe_q_vector, napi); |
2284 | struct ixgbe_adapter *adapter = q_vector->adapter; | 2282 | struct ixgbe_adapter *adapter = q_vector->adapter; |
2285 | int tx_cleaned, work_done = 0; | 2283 | int tx_cleaned, work_done = 0; |
2286 | 2284 | ||
@@ -2371,7 +2369,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | |||
2371 | } | 2369 | } |
2372 | 2370 | ||
2373 | static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | 2371 | static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, |
2374 | int vectors) | 2372 | int vectors) |
2375 | { | 2373 | { |
2376 | int err, vector_threshold; | 2374 | int err, vector_threshold; |
2377 | 2375 | ||
@@ -2390,7 +2388,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | |||
2390 | */ | 2388 | */ |
2391 | while (vectors >= vector_threshold) { | 2389 | while (vectors >= vector_threshold) { |
2392 | err = pci_enable_msix(adapter->pdev, adapter->msix_entries, | 2390 | err = pci_enable_msix(adapter->pdev, adapter->msix_entries, |
2393 | vectors); | 2391 | vectors); |
2394 | if (!err) /* Success in acquiring all requested vectors. */ | 2392 | if (!err) /* Success in acquiring all requested vectors. */ |
2395 | break; | 2393 | break; |
2396 | else if (err < 0) | 2394 | else if (err < 0) |
@@ -2425,9 +2423,6 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | |||
2425 | **/ | 2423 | **/ |
2426 | static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | 2424 | static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) |
2427 | { | 2425 | { |
2428 | /* TODO: Remove all uses of the indices in the cases where multiple | ||
2429 | * features are OR'd together, if the feature set makes sense. | ||
2430 | */ | ||
2431 | int feature_mask = 0, rss_i; | 2426 | int feature_mask = 0, rss_i; |
2432 | int i, txr_idx, rxr_idx; | 2427 | int i, txr_idx, rxr_idx; |
2433 | 2428 | ||
@@ -2468,12 +2463,12 @@ static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter) | |||
2468 | int i; | 2463 | int i; |
2469 | 2464 | ||
2470 | adapter->tx_ring = kcalloc(adapter->num_tx_queues, | 2465 | adapter->tx_ring = kcalloc(adapter->num_tx_queues, |
2471 | sizeof(struct ixgbe_ring), GFP_KERNEL); | 2466 | sizeof(struct ixgbe_ring), GFP_KERNEL); |
2472 | if (!adapter->tx_ring) | 2467 | if (!adapter->tx_ring) |
2473 | goto err_tx_ring_allocation; | 2468 | goto err_tx_ring_allocation; |
2474 | 2469 | ||
2475 | adapter->rx_ring = kcalloc(adapter->num_rx_queues, | 2470 | adapter->rx_ring = kcalloc(adapter->num_rx_queues, |
2476 | sizeof(struct ixgbe_ring), GFP_KERNEL); | 2471 | sizeof(struct ixgbe_ring), GFP_KERNEL); |
2477 | if (!adapter->rx_ring) | 2472 | if (!adapter->rx_ring) |
2478 | goto err_rx_ring_allocation; | 2473 | goto err_rx_ring_allocation; |
2479 | 2474 | ||
@@ -2505,7 +2500,7 @@ err_tx_ring_allocation: | |||
2505 | * capabilities of the hardware and the kernel. | 2500 | * capabilities of the hardware and the kernel. |
2506 | **/ | 2501 | **/ |
2507 | static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter | 2502 | static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter |
2508 | *adapter) | 2503 | *adapter) |
2509 | { | 2504 | { |
2510 | int err = 0; | 2505 | int err = 0; |
2511 | int vector, v_budget; | 2506 | int vector, v_budget; |
@@ -2517,7 +2512,7 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter | |||
2517 | * (roughly) twice the number of vectors as there are CPU's. | 2512 | * (roughly) twice the number of vectors as there are CPU's. |
2518 | */ | 2513 | */ |
2519 | v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, | 2514 | v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, |
2520 | (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; | 2515 | (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; |
2521 | 2516 | ||
2522 | /* | 2517 | /* |
2523 | * At the same time, hardware can only support a maximum of | 2518 | * At the same time, hardware can only support a maximum of |
@@ -2531,7 +2526,7 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter | |||
2531 | /* A failure in MSI-X entry allocation isn't fatal, but it does | 2526 | /* A failure in MSI-X entry allocation isn't fatal, but it does |
2532 | * mean we disable MSI-X capabilities of the adapter. */ | 2527 | * mean we disable MSI-X capabilities of the adapter. */ |
2533 | adapter->msix_entries = kcalloc(v_budget, | 2528 | adapter->msix_entries = kcalloc(v_budget, |
2534 | sizeof(struct msix_entry), GFP_KERNEL); | 2529 | sizeof(struct msix_entry), GFP_KERNEL); |
2535 | if (!adapter->msix_entries) { | 2530 | if (!adapter->msix_entries) { |
2536 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | 2531 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; |
2537 | ixgbe_set_num_queues(adapter); | 2532 | ixgbe_set_num_queues(adapter); |
@@ -2540,7 +2535,7 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter | |||
2540 | err = ixgbe_alloc_queues(adapter); | 2535 | err = ixgbe_alloc_queues(adapter); |
2541 | if (err) { | 2536 | if (err) { |
2542 | DPRINTK(PROBE, ERR, "Unable to allocate memory " | 2537 | DPRINTK(PROBE, ERR, "Unable to allocate memory " |
2543 | "for queues\n"); | 2538 | "for queues\n"); |
2544 | goto out; | 2539 | goto out; |
2545 | } | 2540 | } |
2546 | 2541 | ||
@@ -2561,7 +2556,7 @@ try_msi: | |||
2561 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; | 2556 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; |
2562 | } else { | 2557 | } else { |
2563 | DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, " | 2558 | DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, " |
2564 | "falling back to legacy. Error: %d\n", err); | 2559 | "falling back to legacy. Error: %d\n", err); |
2565 | /* reset err */ | 2560 | /* reset err */ |
2566 | err = 0; | 2561 | err = 0; |
2567 | } | 2562 | } |
@@ -2617,9 +2612,9 @@ static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) | |||
2617 | } | 2612 | } |
2618 | 2613 | ||
2619 | DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " | 2614 | DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " |
2620 | "Tx Queue count = %u\n", | 2615 | "Tx Queue count = %u\n", |
2621 | (adapter->num_rx_queues > 1) ? "Enabled" : | 2616 | (adapter->num_rx_queues > 1) ? "Enabled" : |
2622 | "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); | 2617 | "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); |
2623 | 2618 | ||
2624 | set_bit(__IXGBE_DOWN, &adapter->state); | 2619 | set_bit(__IXGBE_DOWN, &adapter->state); |
2625 | 2620 | ||
@@ -2746,7 +2741,7 @@ err: | |||
2746 | * Returns 0 on success, negative on failure | 2741 | * Returns 0 on success, negative on failure |
2747 | **/ | 2742 | **/ |
2748 | int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, | 2743 | int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, |
2749 | struct ixgbe_ring *rx_ring) | 2744 | struct ixgbe_ring *rx_ring) |
2750 | { | 2745 | { |
2751 | struct pci_dev *pdev = adapter->pdev; | 2746 | struct pci_dev *pdev = adapter->pdev; |
2752 | int size; | 2747 | int size; |
@@ -2761,7 +2756,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, | |||
2761 | rx_ring->rx_buffer_info = vmalloc(size); | 2756 | rx_ring->rx_buffer_info = vmalloc(size); |
2762 | if (!rx_ring->rx_buffer_info) { | 2757 | if (!rx_ring->rx_buffer_info) { |
2763 | DPRINTK(PROBE, ERR, | 2758 | DPRINTK(PROBE, ERR, |
2764 | "vmalloc allocation failed for the rx desc ring\n"); | 2759 | "vmalloc allocation failed for the rx desc ring\n"); |
2765 | goto alloc_failed; | 2760 | goto alloc_failed; |
2766 | } | 2761 | } |
2767 | memset(rx_ring->rx_buffer_info, 0, size); | 2762 | memset(rx_ring->rx_buffer_info, 0, size); |
@@ -2774,7 +2769,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, | |||
2774 | 2769 | ||
2775 | if (!rx_ring->desc) { | 2770 | if (!rx_ring->desc) { |
2776 | DPRINTK(PROBE, ERR, | 2771 | DPRINTK(PROBE, ERR, |
2777 | "Memory allocation failed for the rx desc ring\n"); | 2772 | "Memory allocation failed for the rx desc ring\n"); |
2778 | vfree(rx_ring->rx_buffer_info); | 2773 | vfree(rx_ring->rx_buffer_info); |
2779 | goto alloc_failed; | 2774 | goto alloc_failed; |
2780 | } | 2775 | } |
@@ -2827,7 +2822,7 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) | |||
2827 | } | 2822 | } |
2828 | 2823 | ||
2829 | /** | 2824 | /** |
2830 | * ixgbe_ree_rx_resources - Free Rx Resources | 2825 | * ixgbe_free_rx_resources - Free Rx Resources |
2831 | * @adapter: board private structure | 2826 | * @adapter: board private structure |
2832 | * @rx_ring: ring to clean the resources from | 2827 | * @rx_ring: ring to clean the resources from |
2833 | * | 2828 | * |
@@ -2881,11 +2876,10 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) | |||
2881 | 2876 | ||
2882 | for (i = 0; i < adapter->num_tx_queues; i++) { | 2877 | for (i = 0; i < adapter->num_tx_queues; i++) { |
2883 | err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]); | 2878 | err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]); |
2884 | if (err) { | 2879 | if (!err) |
2885 | DPRINTK(PROBE, ERR, | 2880 | continue; |
2886 | "Allocation for Tx Queue %u failed\n", i); | 2881 | DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i); |
2887 | break; | 2882 | break; |
2888 | } | ||
2889 | } | 2883 | } |
2890 | 2884 | ||
2891 | return err; | 2885 | return err; |
@@ -2908,11 +2902,10 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) | |||
2908 | 2902 | ||
2909 | for (i = 0; i < adapter->num_rx_queues; i++) { | 2903 | for (i = 0; i < adapter->num_rx_queues; i++) { |
2910 | err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]); | 2904 | err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]); |
2911 | if (err) { | 2905 | if (!err) |
2912 | DPRINTK(PROBE, ERR, | 2906 | continue; |
2913 | "Allocation for Rx Queue %u failed\n", i); | 2907 | DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i); |
2914 | break; | 2908 | break; |
2915 | } | ||
2916 | } | 2909 | } |
2917 | 2910 | ||
2918 | return err; | 2911 | return err; |
@@ -2935,7 +2928,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) | |||
2935 | return -EINVAL; | 2928 | return -EINVAL; |
2936 | 2929 | ||
2937 | DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", | 2930 | DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", |
2938 | netdev->mtu, new_mtu); | 2931 | netdev->mtu, new_mtu); |
2939 | /* must set new MTU before calling down or up */ | 2932 | /* must set new MTU before calling down or up */ |
2940 | netdev->mtu = new_mtu; | 2933 | netdev->mtu = new_mtu; |
2941 | 2934 | ||
@@ -3102,7 +3095,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
3102 | 3095 | ||
3103 | /* Rx Errors */ | 3096 | /* Rx Errors */ |
3104 | adapter->net_stats.rx_errors = adapter->stats.crcerrs + | 3097 | adapter->net_stats.rx_errors = adapter->stats.crcerrs + |
3105 | adapter->stats.rlec; | 3098 | adapter->stats.rlec; |
3106 | adapter->net_stats.rx_dropped = 0; | 3099 | adapter->net_stats.rx_dropped = 0; |
3107 | adapter->net_stats.rx_length_errors = adapter->stats.rlec; | 3100 | adapter->net_stats.rx_length_errors = adapter->stats.rlec; |
3108 | adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; | 3101 | adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; |
@@ -3206,8 +3199,8 @@ static void ixgbe_watchdog_task(struct work_struct *work) | |||
3206 | } | 3199 | } |
3207 | 3200 | ||
3208 | static int ixgbe_tso(struct ixgbe_adapter *adapter, | 3201 | static int ixgbe_tso(struct ixgbe_adapter *adapter, |
3209 | struct ixgbe_ring *tx_ring, struct sk_buff *skb, | 3202 | struct ixgbe_ring *tx_ring, struct sk_buff *skb, |
3210 | u32 tx_flags, u8 *hdr_len) | 3203 | u32 tx_flags, u8 *hdr_len) |
3211 | { | 3204 | { |
3212 | struct ixgbe_adv_tx_context_desc *context_desc; | 3205 | struct ixgbe_adv_tx_context_desc *context_desc; |
3213 | unsigned int i; | 3206 | unsigned int i; |
@@ -3230,16 +3223,16 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, | |||
3230 | iph->tot_len = 0; | 3223 | iph->tot_len = 0; |
3231 | iph->check = 0; | 3224 | iph->check = 0; |
3232 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | 3225 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
3233 | iph->daddr, 0, | 3226 | iph->daddr, 0, |
3234 | IPPROTO_TCP, | 3227 | IPPROTO_TCP, |
3235 | 0); | 3228 | 0); |
3236 | adapter->hw_tso_ctxt++; | 3229 | adapter->hw_tso_ctxt++; |
3237 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { | 3230 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { |
3238 | ipv6_hdr(skb)->payload_len = 0; | 3231 | ipv6_hdr(skb)->payload_len = 0; |
3239 | tcp_hdr(skb)->check = | 3232 | tcp_hdr(skb)->check = |
3240 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 3233 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
3241 | &ipv6_hdr(skb)->daddr, | 3234 | &ipv6_hdr(skb)->daddr, |
3242 | 0, IPPROTO_TCP, 0); | 3235 | 0, IPPROTO_TCP, 0); |
3243 | adapter->hw_tso6_ctxt++; | 3236 | adapter->hw_tso6_ctxt++; |
3244 | } | 3237 | } |
3245 | 3238 | ||
@@ -3253,7 +3246,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, | |||
3253 | vlan_macip_lens |= | 3246 | vlan_macip_lens |= |
3254 | (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); | 3247 | (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); |
3255 | vlan_macip_lens |= ((skb_network_offset(skb)) << | 3248 | vlan_macip_lens |= ((skb_network_offset(skb)) << |
3256 | IXGBE_ADVTXD_MACLEN_SHIFT); | 3249 | IXGBE_ADVTXD_MACLEN_SHIFT); |
3257 | *hdr_len += skb_network_offset(skb); | 3250 | *hdr_len += skb_network_offset(skb); |
3258 | vlan_macip_lens |= | 3251 | vlan_macip_lens |= |
3259 | (skb_transport_header(skb) - skb_network_header(skb)); | 3252 | (skb_transport_header(skb) - skb_network_header(skb)); |
@@ -3264,7 +3257,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, | |||
3264 | 3257 | ||
3265 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ | 3258 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ |
3266 | type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | | 3259 | type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | |
3267 | IXGBE_ADVTXD_DTYP_CTXT); | 3260 | IXGBE_ADVTXD_DTYP_CTXT); |
3268 | 3261 | ||
3269 | if (skb->protocol == htons(ETH_P_IP)) | 3262 | if (skb->protocol == htons(ETH_P_IP)) |
3270 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; | 3263 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; |
@@ -3293,8 +3286,8 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, | |||
3293 | } | 3286 | } |
3294 | 3287 | ||
3295 | static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, | 3288 | static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, |
3296 | struct ixgbe_ring *tx_ring, | 3289 | struct ixgbe_ring *tx_ring, |
3297 | struct sk_buff *skb, u32 tx_flags) | 3290 | struct sk_buff *skb, u32 tx_flags) |
3298 | { | 3291 | { |
3299 | struct ixgbe_adv_tx_context_desc *context_desc; | 3292 | struct ixgbe_adv_tx_context_desc *context_desc; |
3300 | unsigned int i; | 3293 | unsigned int i; |
@@ -3311,16 +3304,16 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, | |||
3311 | vlan_macip_lens |= | 3304 | vlan_macip_lens |= |
3312 | (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); | 3305 | (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); |
3313 | vlan_macip_lens |= (skb_network_offset(skb) << | 3306 | vlan_macip_lens |= (skb_network_offset(skb) << |
3314 | IXGBE_ADVTXD_MACLEN_SHIFT); | 3307 | IXGBE_ADVTXD_MACLEN_SHIFT); |
3315 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 3308 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
3316 | vlan_macip_lens |= (skb_transport_header(skb) - | 3309 | vlan_macip_lens |= (skb_transport_header(skb) - |
3317 | skb_network_header(skb)); | 3310 | skb_network_header(skb)); |
3318 | 3311 | ||
3319 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); | 3312 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); |
3320 | context_desc->seqnum_seed = 0; | 3313 | context_desc->seqnum_seed = 0; |
3321 | 3314 | ||
3322 | type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | | 3315 | type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | |
3323 | IXGBE_ADVTXD_DTYP_CTXT); | 3316 | IXGBE_ADVTXD_DTYP_CTXT); |
3324 | 3317 | ||
3325 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 3318 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
3326 | switch (skb->protocol) { | 3319 | switch (skb->protocol) { |
@@ -3328,13 +3321,13 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, | |||
3328 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; | 3321 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; |
3329 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | 3322 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) |
3330 | type_tucmd_mlhl |= | 3323 | type_tucmd_mlhl |= |
3331 | IXGBE_ADVTXD_TUCMD_L4T_TCP; | 3324 | IXGBE_ADVTXD_TUCMD_L4T_TCP; |
3332 | break; | 3325 | break; |
3333 | case __constant_htons(ETH_P_IPV6): | 3326 | case __constant_htons(ETH_P_IPV6): |
3334 | /* XXX what about other V6 headers?? */ | 3327 | /* XXX what about other V6 headers?? */ |
3335 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) | 3328 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) |
3336 | type_tucmd_mlhl |= | 3329 | type_tucmd_mlhl |= |
3337 | IXGBE_ADVTXD_TUCMD_L4T_TCP; | 3330 | IXGBE_ADVTXD_TUCMD_L4T_TCP; |
3338 | break; | 3331 | break; |
3339 | default: | 3332 | default: |
3340 | if (unlikely(net_ratelimit())) { | 3333 | if (unlikely(net_ratelimit())) { |
@@ -3366,8 +3359,8 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, | |||
3366 | } | 3359 | } |
3367 | 3360 | ||
3368 | static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | 3361 | static int ixgbe_tx_map(struct ixgbe_adapter *adapter, |
3369 | struct ixgbe_ring *tx_ring, | 3362 | struct ixgbe_ring *tx_ring, |
3370 | struct sk_buff *skb, unsigned int first) | 3363 | struct sk_buff *skb, unsigned int first) |
3371 | { | 3364 | { |
3372 | struct ixgbe_tx_buffer *tx_buffer_info; | 3365 | struct ixgbe_tx_buffer *tx_buffer_info; |
3373 | unsigned int len = skb->len; | 3366 | unsigned int len = skb->len; |
@@ -3385,8 +3378,8 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | |||
3385 | 3378 | ||
3386 | tx_buffer_info->length = size; | 3379 | tx_buffer_info->length = size; |
3387 | tx_buffer_info->dma = pci_map_single(adapter->pdev, | 3380 | tx_buffer_info->dma = pci_map_single(adapter->pdev, |
3388 | skb->data + offset, | 3381 | skb->data + offset, |
3389 | size, PCI_DMA_TODEVICE); | 3382 | size, PCI_DMA_TODEVICE); |
3390 | tx_buffer_info->time_stamp = jiffies; | 3383 | tx_buffer_info->time_stamp = jiffies; |
3391 | tx_buffer_info->next_to_watch = i; | 3384 | tx_buffer_info->next_to_watch = i; |
3392 | 3385 | ||
@@ -3411,9 +3404,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | |||
3411 | 3404 | ||
3412 | tx_buffer_info->length = size; | 3405 | tx_buffer_info->length = size; |
3413 | tx_buffer_info->dma = pci_map_page(adapter->pdev, | 3406 | tx_buffer_info->dma = pci_map_page(adapter->pdev, |
3414 | frag->page, | 3407 | frag->page, |
3415 | offset, | 3408 | offset, |
3416 | size, PCI_DMA_TODEVICE); | 3409 | size, |
3410 | PCI_DMA_TODEVICE); | ||
3417 | tx_buffer_info->time_stamp = jiffies; | 3411 | tx_buffer_info->time_stamp = jiffies; |
3418 | tx_buffer_info->next_to_watch = i; | 3412 | tx_buffer_info->next_to_watch = i; |
3419 | 3413 | ||
@@ -3436,8 +3430,8 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | |||
3436 | } | 3430 | } |
3437 | 3431 | ||
3438 | static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, | 3432 | static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, |
3439 | struct ixgbe_ring *tx_ring, | 3433 | struct ixgbe_ring *tx_ring, |
3440 | int tx_flags, int count, u32 paylen, u8 hdr_len) | 3434 | int tx_flags, int count, u32 paylen, u8 hdr_len) |
3441 | { | 3435 | { |
3442 | union ixgbe_adv_tx_desc *tx_desc = NULL; | 3436 | union ixgbe_adv_tx_desc *tx_desc = NULL; |
3443 | struct ixgbe_tx_buffer *tx_buffer_info; | 3437 | struct ixgbe_tx_buffer *tx_buffer_info; |
@@ -3456,17 +3450,17 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, | |||
3456 | cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; | 3450 | cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; |
3457 | 3451 | ||
3458 | olinfo_status |= IXGBE_TXD_POPTS_TXSM << | 3452 | olinfo_status |= IXGBE_TXD_POPTS_TXSM << |
3459 | IXGBE_ADVTXD_POPTS_SHIFT; | 3453 | IXGBE_ADVTXD_POPTS_SHIFT; |
3460 | 3454 | ||
3461 | /* use index 1 context for tso */ | 3455 | /* use index 1 context for tso */ |
3462 | olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); | 3456 | olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); |
3463 | if (tx_flags & IXGBE_TX_FLAGS_IPV4) | 3457 | if (tx_flags & IXGBE_TX_FLAGS_IPV4) |
3464 | olinfo_status |= IXGBE_TXD_POPTS_IXSM << | 3458 | olinfo_status |= IXGBE_TXD_POPTS_IXSM << |
3465 | IXGBE_ADVTXD_POPTS_SHIFT; | 3459 | IXGBE_ADVTXD_POPTS_SHIFT; |
3466 | 3460 | ||
3467 | } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) | 3461 | } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) |
3468 | olinfo_status |= IXGBE_TXD_POPTS_TXSM << | 3462 | olinfo_status |= IXGBE_TXD_POPTS_TXSM << |
3469 | IXGBE_ADVTXD_POPTS_SHIFT; | 3463 | IXGBE_ADVTXD_POPTS_SHIFT; |
3470 | 3464 | ||
3471 | olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); | 3465 | olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); |
3472 | 3466 | ||
@@ -3476,7 +3470,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, | |||
3476 | tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); | 3470 | tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); |
3477 | tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); | 3471 | tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); |
3478 | tx_desc->read.cmd_type_len = | 3472 | tx_desc->read.cmd_type_len = |
3479 | cpu_to_le32(cmd_type_len | tx_buffer_info->length); | 3473 | cpu_to_le32(cmd_type_len | tx_buffer_info->length); |
3480 | tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); | 3474 | tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); |
3481 | 3475 | ||
3482 | i++; | 3476 | i++; |
@@ -3499,7 +3493,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, | |||
3499 | } | 3493 | } |
3500 | 3494 | ||
3501 | static int __ixgbe_maybe_stop_tx(struct net_device *netdev, | 3495 | static int __ixgbe_maybe_stop_tx(struct net_device *netdev, |
3502 | struct ixgbe_ring *tx_ring, int size) | 3496 | struct ixgbe_ring *tx_ring, int size) |
3503 | { | 3497 | { |
3504 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 3498 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
3505 | 3499 | ||
@@ -3521,7 +3515,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev, | |||
3521 | } | 3515 | } |
3522 | 3516 | ||
3523 | static int ixgbe_maybe_stop_tx(struct net_device *netdev, | 3517 | static int ixgbe_maybe_stop_tx(struct net_device *netdev, |
3524 | struct ixgbe_ring *tx_ring, int size) | 3518 | struct ixgbe_ring *tx_ring, int size) |
3525 | { | 3519 | { |
3526 | if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) | 3520 | if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) |
3527 | return 0; | 3521 | return 0; |
@@ -3575,12 +3569,12 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3575 | if (tso) | 3569 | if (tso) |
3576 | tx_flags |= IXGBE_TX_FLAGS_TSO; | 3570 | tx_flags |= IXGBE_TX_FLAGS_TSO; |
3577 | else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && | 3571 | else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && |
3578 | (skb->ip_summed == CHECKSUM_PARTIAL)) | 3572 | (skb->ip_summed == CHECKSUM_PARTIAL)) |
3579 | tx_flags |= IXGBE_TX_FLAGS_CSUM; | 3573 | tx_flags |= IXGBE_TX_FLAGS_CSUM; |
3580 | 3574 | ||
3581 | ixgbe_tx_queue(adapter, tx_ring, tx_flags, | 3575 | ixgbe_tx_queue(adapter, tx_ring, tx_flags, |
3582 | ixgbe_tx_map(adapter, tx_ring, skb, first), | 3576 | ixgbe_tx_map(adapter, tx_ring, skb, first), |
3583 | skb->len, hdr_len); | 3577 | skb->len, hdr_len); |
3584 | 3578 | ||
3585 | netdev->trans_start = jiffies; | 3579 | netdev->trans_start = jiffies; |
3586 | 3580 | ||
@@ -3614,15 +3608,16 @@ static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev) | |||
3614 | static int ixgbe_set_mac(struct net_device *netdev, void *p) | 3608 | static int ixgbe_set_mac(struct net_device *netdev, void *p) |
3615 | { | 3609 | { |
3616 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 3610 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
3611 | struct ixgbe_hw *hw = &adapter->hw; | ||
3617 | struct sockaddr *addr = p; | 3612 | struct sockaddr *addr = p; |
3618 | 3613 | ||
3619 | if (!is_valid_ether_addr(addr->sa_data)) | 3614 | if (!is_valid_ether_addr(addr->sa_data)) |
3620 | return -EADDRNOTAVAIL; | 3615 | return -EADDRNOTAVAIL; |
3621 | 3616 | ||
3622 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | 3617 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
3623 | memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); | 3618 | memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); |
3624 | 3619 | ||
3625 | adapter->hw.mac.ops.set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); | 3620 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); |
3626 | 3621 | ||
3627 | return 0; | 3622 | return 0; |
3628 | } | 3623 | } |
@@ -3682,7 +3677,7 @@ static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) | |||
3682 | for (i = 0; i < q_vectors; i++) { | 3677 | for (i = 0; i < q_vectors; i++) { |
3683 | struct ixgbe_q_vector *q_vector = &adapter->q_vector[i]; | 3678 | struct ixgbe_q_vector *q_vector = &adapter->q_vector[i]; |
3684 | netif_napi_add(adapter->netdev, &q_vector->napi, | 3679 | netif_napi_add(adapter->netdev, &q_vector->napi, |
3685 | (*poll), 64); | 3680 | (*poll), 64); |
3686 | } | 3681 | } |
3687 | } | 3682 | } |
3688 | 3683 | ||
@@ -3698,7 +3693,7 @@ static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) | |||
3698 | * and a hardware reset occur. | 3693 | * and a hardware reset occur. |
3699 | **/ | 3694 | **/ |
3700 | static int __devinit ixgbe_probe(struct pci_dev *pdev, | 3695 | static int __devinit ixgbe_probe(struct pci_dev *pdev, |
3701 | const struct pci_device_id *ent) | 3696 | const struct pci_device_id *ent) |
3702 | { | 3697 | { |
3703 | struct net_device *netdev; | 3698 | struct net_device *netdev; |
3704 | struct ixgbe_adapter *adapter = NULL; | 3699 | struct ixgbe_adapter *adapter = NULL; |
@@ -3721,8 +3716,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3721 | if (err) { | 3716 | if (err) { |
3722 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | 3717 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); |
3723 | if (err) { | 3718 | if (err) { |
3724 | dev_err(&pdev->dev, "No usable DMA configuration, " | 3719 | dev_err(&pdev->dev, "No usable DMA " |
3725 | "aborting\n"); | 3720 | "configuration, aborting\n"); |
3726 | goto err_dma; | 3721 | goto err_dma; |
3727 | } | 3722 | } |
3728 | } | 3723 | } |
@@ -3820,10 +3815,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3820 | } | 3815 | } |
3821 | 3816 | ||
3822 | netdev->features = NETIF_F_SG | | 3817 | netdev->features = NETIF_F_SG | |
3823 | NETIF_F_IP_CSUM | | 3818 | NETIF_F_IP_CSUM | |
3824 | NETIF_F_HW_VLAN_TX | | 3819 | NETIF_F_HW_VLAN_TX | |
3825 | NETIF_F_HW_VLAN_RX | | 3820 | NETIF_F_HW_VLAN_RX | |
3826 | NETIF_F_HW_VLAN_FILTER; | 3821 | NETIF_F_HW_VLAN_FILTER; |
3827 | 3822 | ||
3828 | netdev->features |= NETIF_F_IPV6_CSUM; | 3823 | netdev->features |= NETIF_F_IPV6_CSUM; |
3829 | netdev->features |= NETIF_F_TSO; | 3824 | netdev->features |= NETIF_F_TSO; |
@@ -3870,28 +3865,28 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3870 | link_speed = link_status & IXGBE_PCI_LINK_SPEED; | 3865 | link_speed = link_status & IXGBE_PCI_LINK_SPEED; |
3871 | link_width = link_status & IXGBE_PCI_LINK_WIDTH; | 3866 | link_width = link_status & IXGBE_PCI_LINK_WIDTH; |
3872 | dev_info(&pdev->dev, "(PCI Express:%s:%s) " | 3867 | dev_info(&pdev->dev, "(PCI Express:%s:%s) " |
3873 | "%02x:%02x:%02x:%02x:%02x:%02x\n", | 3868 | "%02x:%02x:%02x:%02x:%02x:%02x\n", |
3874 | ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" : | 3869 | ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" : |
3875 | (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" : | 3870 | (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" : |
3876 | "Unknown"), | 3871 | "Unknown"), |
3877 | ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" : | 3872 | ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" : |
3878 | (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" : | 3873 | (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" : |
3879 | (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" : | 3874 | (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" : |
3880 | (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" : | 3875 | (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" : |
3881 | "Unknown"), | 3876 | "Unknown"), |
3882 | netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], | 3877 | netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], |
3883 | netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); | 3878 | netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); |
3884 | ixgbe_read_pba_num_generic(hw, &part_num); | 3879 | ixgbe_read_pba_num_generic(hw, &part_num); |
3885 | dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", | 3880 | dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", |
3886 | hw->mac.type, hw->phy.type, | 3881 | hw->mac.type, hw->phy.type, |
3887 | (part_num >> 8), (part_num & 0xff)); | 3882 | (part_num >> 8), (part_num & 0xff)); |
3888 | 3883 | ||
3889 | if (link_width <= IXGBE_PCI_LINK_WIDTH_4) { | 3884 | if (link_width <= IXGBE_PCI_LINK_WIDTH_4) { |
3890 | dev_warn(&pdev->dev, "PCI-Express bandwidth available for " | 3885 | dev_warn(&pdev->dev, "PCI-Express bandwidth available for " |
3891 | "this card is not sufficient for optimal " | 3886 | "this card is not sufficient for optimal " |
3892 | "performance.\n"); | 3887 | "performance.\n"); |
3893 | dev_warn(&pdev->dev, "For optimal performance a x8 " | 3888 | dev_warn(&pdev->dev, "For optimal performance a x8 " |
3894 | "PCI-Express slot is required.\n"); | 3889 | "PCI-Express slot is required.\n"); |
3895 | } | 3890 | } |
3896 | 3891 | ||
3897 | /* reset the hardware with the new settings */ | 3892 | /* reset the hardware with the new settings */ |
@@ -3999,7 +3994,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
3999 | * this device has been detected. | 3994 | * this device has been detected. |
4000 | */ | 3995 | */ |
4001 | static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, | 3996 | static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, |
4002 | pci_channel_state_t state) | 3997 | pci_channel_state_t state) |
4003 | { | 3998 | { |
4004 | struct net_device *netdev = pci_get_drvdata(pdev); | 3999 | struct net_device *netdev = pci_get_drvdata(pdev); |
4005 | struct ixgbe_adapter *adapter = netdev->priv; | 4000 | struct ixgbe_adapter *adapter = netdev->priv; |
@@ -4010,7 +4005,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, | |||
4010 | ixgbe_down(adapter); | 4005 | ixgbe_down(adapter); |
4011 | pci_disable_device(pdev); | 4006 | pci_disable_device(pdev); |
4012 | 4007 | ||
4013 | /* Request a slot slot reset. */ | 4008 | /* Request a slot reset. */ |
4014 | return PCI_ERS_RESULT_NEED_RESET; | 4009 | return PCI_ERS_RESULT_NEED_RESET; |
4015 | } | 4010 | } |
4016 | 4011 | ||
@@ -4027,7 +4022,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) | |||
4027 | 4022 | ||
4028 | if (pci_enable_device(pdev)) { | 4023 | if (pci_enable_device(pdev)) { |
4029 | DPRINTK(PROBE, ERR, | 4024 | DPRINTK(PROBE, ERR, |
4030 | "Cannot re-enable PCI device after reset.\n"); | 4025 | "Cannot re-enable PCI device after reset.\n"); |
4031 | return PCI_ERS_RESULT_DISCONNECT; | 4026 | return PCI_ERS_RESULT_DISCONNECT; |
4032 | } | 4027 | } |
4033 | pci_set_master(pdev); | 4028 | pci_set_master(pdev); |
@@ -4104,6 +4099,7 @@ static int __init ixgbe_init_module(void) | |||
4104 | ret = pci_register_driver(&ixgbe_driver); | 4099 | ret = pci_register_driver(&ixgbe_driver); |
4105 | return ret; | 4100 | return ret; |
4106 | } | 4101 | } |
4102 | |||
4107 | module_init(ixgbe_init_module); | 4103 | module_init(ixgbe_init_module); |
4108 | 4104 | ||
4109 | /** | 4105 | /** |
@@ -4122,12 +4118,12 @@ static void __exit ixgbe_exit_module(void) | |||
4122 | 4118 | ||
4123 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | 4119 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) |
4124 | static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, | 4120 | static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, |
4125 | void *p) | 4121 | void *p) |
4126 | { | 4122 | { |
4127 | int ret_val; | 4123 | int ret_val; |
4128 | 4124 | ||
4129 | ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, | 4125 | ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, |
4130 | __ixgbe_notify_dca); | 4126 | __ixgbe_notify_dca); |
4131 | 4127 | ||
4132 | return ret_val ? NOTIFY_BAD : NOTIFY_DONE; | 4128 | return ret_val ? NOTIFY_BAD : NOTIFY_DONE; |
4133 | } | 4129 | } |