aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2010-09-07 17:34:53 -0400
committerDavid S. Miller <davem@davemloft.net>2010-09-08 17:46:27 -0400
commite8e9f6966ab1977df05c894a50d7eca7d013c2a2 (patch)
tree36abde6bcdb3b2cc33b1d84714883b6d87cc6de4 /drivers/net/ixgbe
parent933d41f1f2b6d5e7bcc0782ad0eeaac983a79592 (diff)
drivers/net/ixgbe/ixgbe_main.c: Checkpatch cleanups
Whitespace cleanups. Move inline keyword after function type declarations. Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c467
1 files changed, 236 insertions, 231 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 0027d7561516..7b6dcd8cf1a6 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -50,7 +50,7 @@
50 50
51char ixgbe_driver_name[] = "ixgbe"; 51char ixgbe_driver_name[] = "ixgbe";
52static const char ixgbe_driver_string[] = 52static const char ixgbe_driver_string[] =
53 "Intel(R) 10 Gigabit PCI Express Network Driver"; 53 "Intel(R) 10 Gigabit PCI Express Network Driver";
54 54
55#define DRV_VERSION "2.0.84-k2" 55#define DRV_VERSION "2.0.84-k2"
56const char ixgbe_driver_version[] = DRV_VERSION; 56const char ixgbe_driver_version[] = DRV_VERSION;
@@ -120,7 +120,7 @@ MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
120 120
121#ifdef CONFIG_IXGBE_DCA 121#ifdef CONFIG_IXGBE_DCA
122static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, 122static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
123 void *p); 123 void *p);
124static struct notifier_block dca_notifier = { 124static struct notifier_block dca_notifier = {
125 .notifier_call = ixgbe_notify_dca, 125 .notifier_call = ixgbe_notify_dca,
126 .next = NULL, 126 .next = NULL,
@@ -131,8 +131,8 @@ static struct notifier_block dca_notifier = {
131#ifdef CONFIG_PCI_IOV 131#ifdef CONFIG_PCI_IOV
132static unsigned int max_vfs; 132static unsigned int max_vfs;
133module_param(max_vfs, uint, 0); 133module_param(max_vfs, uint, 0);
134MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " 134MODULE_PARM_DESC(max_vfs,
135 "per physical function"); 135 "Maximum number of virtual functions to allocate per physical function");
136#endif /* CONFIG_PCI_IOV */ 136#endif /* CONFIG_PCI_IOV */
137 137
138MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 138MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@@ -169,8 +169,8 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
169 169
170 /* take a breather then clean up driver data */ 170 /* take a breather then clean up driver data */
171 msleep(100); 171 msleep(100);
172 if (adapter->vfinfo) 172
173 kfree(adapter->vfinfo); 173 kfree(adapter->vfinfo);
174 adapter->vfinfo = NULL; 174 adapter->vfinfo = NULL;
175 175
176 adapter->num_vfs = 0; 176 adapter->num_vfs = 0;
@@ -523,7 +523,7 @@ static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
523 /* Let firmware take over control of h/w */ 523 /* Let firmware take over control of h/w */
524 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 524 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
525 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 525 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
526 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); 526 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
527} 527}
528 528
529static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) 529static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
@@ -533,7 +533,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
533 /* Let firmware know the driver has taken over */ 533 /* Let firmware know the driver has taken over */
534 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 534 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
535 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 535 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
536 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); 536 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
537} 537}
538 538
539/* 539/*
@@ -545,7 +545,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
545 * 545 *
546 */ 546 */
547static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, 547static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
548 u8 queue, u8 msix_vector) 548 u8 queue, u8 msix_vector)
549{ 549{
550 u32 ivar, index; 550 u32 ivar, index;
551 struct ixgbe_hw *hw = &adapter->hw; 551 struct ixgbe_hw *hw = &adapter->hw;
@@ -586,7 +586,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
586} 586}
587 587
588static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, 588static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
589 u64 qmask) 589 u64 qmask)
590{ 590{
591 u32 mask; 591 u32 mask;
592 592
@@ -602,8 +602,8 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
602} 602}
603 603
604void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, 604void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
605 struct ixgbe_tx_buffer 605 struct ixgbe_tx_buffer
606 *tx_buffer_info) 606 *tx_buffer_info)
607{ 607{
608 if (tx_buffer_info->dma) { 608 if (tx_buffer_info->dma) {
609 if (tx_buffer_info->mapped_as_page) 609 if (tx_buffer_info->mapped_as_page)
@@ -637,7 +637,7 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
637 * Returns : true if in xon state (currently not paused) 637 * Returns : true if in xon state (currently not paused)
638 */ 638 */
639static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter, 639static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
640 struct ixgbe_ring *tx_ring) 640 struct ixgbe_ring *tx_ring)
641{ 641{
642 u32 txoff = IXGBE_TFCS_TXOFF; 642 u32 txoff = IXGBE_TFCS_TXOFF;
643 643
@@ -682,8 +682,8 @@ static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
682} 682}
683 683
684static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, 684static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
685 struct ixgbe_ring *tx_ring, 685 struct ixgbe_ring *tx_ring,
686 unsigned int eop) 686 unsigned int eop)
687{ 687{
688 struct ixgbe_hw *hw = &adapter->hw; 688 struct ixgbe_hw *hw = &adapter->hw;
689 689
@@ -732,7 +732,7 @@ static void ixgbe_tx_timeout(struct net_device *netdev);
732 * @tx_ring: tx ring to clean 732 * @tx_ring: tx ring to clean
733 **/ 733 **/
734static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, 734static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
735 struct ixgbe_ring *tx_ring) 735 struct ixgbe_ring *tx_ring)
736{ 736{
737 struct ixgbe_adapter *adapter = q_vector->adapter; 737 struct ixgbe_adapter *adapter = q_vector->adapter;
738 struct net_device *netdev = adapter->netdev; 738 struct net_device *netdev = adapter->netdev;
@@ -781,7 +781,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
781 } 781 }
782 782
783 ixgbe_unmap_and_free_tx_resource(adapter, 783 ixgbe_unmap_and_free_tx_resource(adapter,
784 tx_buffer_info); 784 tx_buffer_info);
785 785
786 tx_desc->wb.status = 0; 786 tx_desc->wb.status = 0;
787 787
@@ -798,7 +798,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
798 798
799#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 799#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
800 if (unlikely(count && netif_carrier_ok(netdev) && 800 if (unlikely(count && netif_carrier_ok(netdev) &&
801 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 801 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
802 /* Make sure that anybody stopping the queue after this 802 /* Make sure that anybody stopping the queue after this
803 * sees the new next_to_clean. 803 * sees the new next_to_clean.
804 */ 804 */
@@ -832,7 +832,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
832 832
833#ifdef CONFIG_IXGBE_DCA 833#ifdef CONFIG_IXGBE_DCA
834static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, 834static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
835 struct ixgbe_ring *rx_ring) 835 struct ixgbe_ring *rx_ring)
836{ 836{
837 u32 rxctrl; 837 u32 rxctrl;
838 int cpu = get_cpu(); 838 int cpu = get_cpu();
@@ -846,13 +846,13 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
846 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 846 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
847 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; 847 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
848 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << 848 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
849 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); 849 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
850 } 850 }
851 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; 851 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
852 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; 852 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
853 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); 853 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
854 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | 854 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
855 IXGBE_DCA_RXCTRL_DESC_HSRO_EN); 855 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
856 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); 856 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
857 rx_ring->cpu = cpu; 857 rx_ring->cpu = cpu;
858 } 858 }
@@ -860,7 +860,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
860} 860}
861 861
862static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, 862static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
863 struct ixgbe_ring *tx_ring) 863 struct ixgbe_ring *tx_ring)
864{ 864{
865 u32 txctrl; 865 u32 txctrl;
866 int cpu = get_cpu(); 866 int cpu = get_cpu();
@@ -878,7 +878,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
878 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q)); 878 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
879 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; 879 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
880 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << 880 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
881 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); 881 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
882 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; 882 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
883 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl); 883 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
884 } 884 }
@@ -946,9 +946,9 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
946 * @rx_desc: rx descriptor 946 * @rx_desc: rx descriptor
947 **/ 947 **/
948static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, 948static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
949 struct sk_buff *skb, u8 status, 949 struct sk_buff *skb, u8 status,
950 struct ixgbe_ring *ring, 950 struct ixgbe_ring *ring,
951 union ixgbe_adv_rx_desc *rx_desc) 951 union ixgbe_adv_rx_desc *rx_desc)
952{ 952{
953 struct ixgbe_adapter *adapter = q_vector->adapter; 953 struct ixgbe_adapter *adapter = q_vector->adapter;
954 struct napi_struct *napi = &q_vector->napi; 954 struct napi_struct *napi = &q_vector->napi;
@@ -1016,7 +1016,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
1016} 1016}
1017 1017
1018static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, 1018static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
1019 struct ixgbe_ring *rx_ring, u32 val) 1019 struct ixgbe_ring *rx_ring, u32 val)
1020{ 1020{
1021 /* 1021 /*
1022 * Force memory writes to complete before letting h/w 1022 * Force memory writes to complete before letting h/w
@@ -1033,8 +1033,8 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
1033 * @adapter: address of board private structure 1033 * @adapter: address of board private structure
1034 **/ 1034 **/
1035void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, 1035void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1036 struct ixgbe_ring *rx_ring, 1036 struct ixgbe_ring *rx_ring,
1037 int cleaned_count) 1037 int cleaned_count)
1038{ 1038{
1039 struct net_device *netdev = adapter->netdev; 1039 struct net_device *netdev = adapter->netdev;
1040 struct pci_dev *pdev = adapter->pdev; 1040 struct pci_dev *pdev = adapter->pdev;
@@ -1064,8 +1064,8 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1064 } 1064 }
1065 1065
1066 bi->page_dma = dma_map_page(&pdev->dev, bi->page, 1066 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
1067 bi->page_offset, 1067 bi->page_offset,
1068 (PAGE_SIZE / 2), 1068 (PAGE_SIZE / 2),
1069 DMA_FROM_DEVICE); 1069 DMA_FROM_DEVICE);
1070 } 1070 }
1071 1071
@@ -1085,7 +1085,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1085 if (!bi->dma) { 1085 if (!bi->dma) {
1086 bi->dma = dma_map_single(&pdev->dev, 1086 bi->dma = dma_map_single(&pdev->dev,
1087 bi->skb->data, 1087 bi->skb->data,
1088 rx_ring->rx_buf_len, 1088 rx_ring->rx_buf_len,
1089 DMA_FROM_DEVICE); 1089 DMA_FROM_DEVICE);
1090 } 1090 }
1091 /* Refresh the desc even if buffer_addrs didn't change because 1091 /* Refresh the desc even if buffer_addrs didn't change because
@@ -1127,8 +1127,8 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
1127static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc) 1127static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
1128{ 1128{
1129 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & 1129 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
1130 IXGBE_RXDADV_RSCCNT_MASK) >> 1130 IXGBE_RXDADV_RSCCNT_MASK) >>
1131 IXGBE_RXDADV_RSCCNT_SHIFT; 1131 IXGBE_RXDADV_RSCCNT_SHIFT;
1132} 1132}
1133 1133
1134/** 1134/**
@@ -1141,7 +1141,7 @@ static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
1141 * turns it into the frag list owner. 1141 * turns it into the frag list owner.
1142 **/ 1142 **/
1143static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, 1143static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
1144 u64 *count) 1144 u64 *count)
1145{ 1145{
1146 unsigned int frag_list_size = 0; 1146 unsigned int frag_list_size = 0;
1147 1147
@@ -1169,8 +1169,8 @@ struct ixgbe_rsc_cb {
1169#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) 1169#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
1170 1170
1171static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, 1171static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1172 struct ixgbe_ring *rx_ring, 1172 struct ixgbe_ring *rx_ring,
1173 int *work_done, int work_to_do) 1173 int *work_done, int work_to_do)
1174{ 1174{
1175 struct ixgbe_adapter *adapter = q_vector->adapter; 1175 struct ixgbe_adapter *adapter = q_vector->adapter;
1176 struct net_device *netdev = adapter->netdev; 1176 struct net_device *netdev = adapter->netdev;
@@ -1232,9 +1232,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1232 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; 1232 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
1233 } else { 1233 } else {
1234 dma_unmap_single(&pdev->dev, 1234 dma_unmap_single(&pdev->dev,
1235 rx_buffer_info->dma, 1235 rx_buffer_info->dma,
1236 rx_ring->rx_buf_len, 1236 rx_ring->rx_buf_len,
1237 DMA_FROM_DEVICE); 1237 DMA_FROM_DEVICE);
1238 } 1238 }
1239 rx_buffer_info->dma = 0; 1239 rx_buffer_info->dma = 0;
1240 skb_put(skb, len); 1240 skb_put(skb, len);
@@ -1245,9 +1245,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1245 PAGE_SIZE / 2, DMA_FROM_DEVICE); 1245 PAGE_SIZE / 2, DMA_FROM_DEVICE);
1246 rx_buffer_info->page_dma = 0; 1246 rx_buffer_info->page_dma = 0;
1247 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 1247 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1248 rx_buffer_info->page, 1248 rx_buffer_info->page,
1249 rx_buffer_info->page_offset, 1249 rx_buffer_info->page_offset,
1250 upper_len); 1250 upper_len);
1251 1251
1252 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || 1252 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
1253 (page_count(rx_buffer_info->page) != 1)) 1253 (page_count(rx_buffer_info->page) != 1))
@@ -1281,18 +1281,20 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1281 1281
1282 if (staterr & IXGBE_RXD_STAT_EOP) { 1282 if (staterr & IXGBE_RXD_STAT_EOP) {
1283 if (skb->prev) 1283 if (skb->prev)
1284 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); 1284 skb = ixgbe_transform_rsc_queue(skb,
1285 &(rx_ring->rsc_count));
1285 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 1286 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
1286 if (IXGBE_RSC_CB(skb)->delay_unmap) { 1287 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1287 dma_unmap_single(&pdev->dev, 1288 dma_unmap_single(&pdev->dev,
1288 IXGBE_RSC_CB(skb)->dma, 1289 IXGBE_RSC_CB(skb)->dma,
1289 rx_ring->rx_buf_len, 1290 rx_ring->rx_buf_len,
1290 DMA_FROM_DEVICE); 1291 DMA_FROM_DEVICE);
1291 IXGBE_RSC_CB(skb)->dma = 0; 1292 IXGBE_RSC_CB(skb)->dma = 0;
1292 IXGBE_RSC_CB(skb)->delay_unmap = false; 1293 IXGBE_RSC_CB(skb)->delay_unmap = false;
1293 } 1294 }
1294 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) 1295 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
1295 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; 1296 rx_ring->rsc_count +=
1297 skb_shinfo(skb)->nr_frags;
1296 else 1298 else
1297 rx_ring->rsc_count++; 1299 rx_ring->rsc_count++;
1298 rx_ring->rsc_flush++; 1300 rx_ring->rsc_flush++;
@@ -1404,24 +1406,24 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1404 q_vector = adapter->q_vector[v_idx]; 1406 q_vector = adapter->q_vector[v_idx];
1405 /* XXX for_each_set_bit(...) */ 1407 /* XXX for_each_set_bit(...) */
1406 r_idx = find_first_bit(q_vector->rxr_idx, 1408 r_idx = find_first_bit(q_vector->rxr_idx,
1407 adapter->num_rx_queues); 1409 adapter->num_rx_queues);
1408 1410
1409 for (i = 0; i < q_vector->rxr_count; i++) { 1411 for (i = 0; i < q_vector->rxr_count; i++) {
1410 j = adapter->rx_ring[r_idx]->reg_idx; 1412 j = adapter->rx_ring[r_idx]->reg_idx;
1411 ixgbe_set_ivar(adapter, 0, j, v_idx); 1413 ixgbe_set_ivar(adapter, 0, j, v_idx);
1412 r_idx = find_next_bit(q_vector->rxr_idx, 1414 r_idx = find_next_bit(q_vector->rxr_idx,
1413 adapter->num_rx_queues, 1415 adapter->num_rx_queues,
1414 r_idx + 1); 1416 r_idx + 1);
1415 } 1417 }
1416 r_idx = find_first_bit(q_vector->txr_idx, 1418 r_idx = find_first_bit(q_vector->txr_idx,
1417 adapter->num_tx_queues); 1419 adapter->num_tx_queues);
1418 1420
1419 for (i = 0; i < q_vector->txr_count; i++) { 1421 for (i = 0; i < q_vector->txr_count; i++) {
1420 j = adapter->tx_ring[r_idx]->reg_idx; 1422 j = adapter->tx_ring[r_idx]->reg_idx;
1421 ixgbe_set_ivar(adapter, 1, j, v_idx); 1423 ixgbe_set_ivar(adapter, 1, j, v_idx);
1422 r_idx = find_next_bit(q_vector->txr_idx, 1424 r_idx = find_next_bit(q_vector->txr_idx,
1423 adapter->num_tx_queues, 1425 adapter->num_tx_queues,
1424 r_idx + 1); 1426 r_idx + 1);
1425 } 1427 }
1426 1428
1427 if (q_vector->txr_count && !q_vector->rxr_count) 1429 if (q_vector->txr_count && !q_vector->rxr_count)
@@ -1436,7 +1438,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1436 1438
1437 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1439 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1438 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, 1440 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
1439 v_idx); 1441 v_idx);
1440 else if (adapter->hw.mac.type == ixgbe_mac_82599EB) 1442 else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1441 ixgbe_set_ivar(adapter, -1, 1, v_idx); 1443 ixgbe_set_ivar(adapter, -1, 1, v_idx);
1442 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); 1444 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
@@ -1478,8 +1480,8 @@ enum latency_range {
1478 * parameter (see ixgbe_param.c) 1480 * parameter (see ixgbe_param.c)
1479 **/ 1481 **/
1480static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, 1482static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
1481 u32 eitr, u8 itr_setting, 1483 u32 eitr, u8 itr_setting,
1482 int packets, int bytes) 1484 int packets, int bytes)
1483{ 1485{
1484 unsigned int retval = itr_setting; 1486 unsigned int retval = itr_setting;
1485 u32 timepassed_us; 1487 u32 timepassed_us;
@@ -1568,30 +1570,30 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1568 for (i = 0; i < q_vector->txr_count; i++) { 1570 for (i = 0; i < q_vector->txr_count; i++) {
1569 tx_ring = adapter->tx_ring[r_idx]; 1571 tx_ring = adapter->tx_ring[r_idx];
1570 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1572 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1571 q_vector->tx_itr, 1573 q_vector->tx_itr,
1572 tx_ring->total_packets, 1574 tx_ring->total_packets,
1573 tx_ring->total_bytes); 1575 tx_ring->total_bytes);
1574 /* if the result for this queue would decrease interrupt 1576 /* if the result for this queue would decrease interrupt
1575 * rate for this vector then use that result */ 1577 * rate for this vector then use that result */
1576 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? 1578 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
1577 q_vector->tx_itr - 1 : ret_itr); 1579 q_vector->tx_itr - 1 : ret_itr);
1578 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1580 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1579 r_idx + 1); 1581 r_idx + 1);
1580 } 1582 }
1581 1583
1582 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1584 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1583 for (i = 0; i < q_vector->rxr_count; i++) { 1585 for (i = 0; i < q_vector->rxr_count; i++) {
1584 rx_ring = adapter->rx_ring[r_idx]; 1586 rx_ring = adapter->rx_ring[r_idx];
1585 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1587 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1586 q_vector->rx_itr, 1588 q_vector->rx_itr,
1587 rx_ring->total_packets, 1589 rx_ring->total_packets,
1588 rx_ring->total_bytes); 1590 rx_ring->total_bytes);
1589 /* if the result for this queue would decrease interrupt 1591 /* if the result for this queue would decrease interrupt
1590 * rate for this vector then use that result */ 1592 * rate for this vector then use that result */
1591 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? 1593 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
1592 q_vector->rx_itr - 1 : ret_itr); 1594 q_vector->rx_itr - 1 : ret_itr);
1593 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1595 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1594 r_idx + 1); 1596 r_idx + 1);
1595 } 1597 }
1596 1598
1597 current_itr = max(q_vector->rx_itr, q_vector->tx_itr); 1599 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
@@ -1628,8 +1630,8 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1628static void ixgbe_check_overtemp_task(struct work_struct *work) 1630static void ixgbe_check_overtemp_task(struct work_struct *work)
1629{ 1631{
1630 struct ixgbe_adapter *adapter = container_of(work, 1632 struct ixgbe_adapter *adapter = container_of(work,
1631 struct ixgbe_adapter, 1633 struct ixgbe_adapter,
1632 check_overtemp_task); 1634 check_overtemp_task);
1633 struct ixgbe_hw *hw = &adapter->hw; 1635 struct ixgbe_hw *hw = &adapter->hw;
1634 u32 eicr = adapter->interrupt_event; 1636 u32 eicr = adapter->interrupt_event;
1635 1637
@@ -1747,9 +1749,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1747 netif_tx_stop_all_queues(netdev); 1749 netif_tx_stop_all_queues(netdev);
1748 for (i = 0; i < adapter->num_tx_queues; i++) { 1750 for (i = 0; i < adapter->num_tx_queues; i++) {
1749 struct ixgbe_ring *tx_ring = 1751 struct ixgbe_ring *tx_ring =
1750 adapter->tx_ring[i]; 1752 adapter->tx_ring[i];
1751 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, 1753 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
1752 &tx_ring->reinit_state)) 1754 &tx_ring->reinit_state))
1753 schedule_work(&adapter->fdir_reinit_task); 1755 schedule_work(&adapter->fdir_reinit_task);
1754 } 1756 }
1755 } 1757 }
@@ -1778,7 +1780,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1778} 1780}
1779 1781
1780static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, 1782static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1781 u64 qmask) 1783 u64 qmask)
1782{ 1784{
1783 u32 mask; 1785 u32 mask;
1784 1786
@@ -1810,7 +1812,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1810 tx_ring->total_bytes = 0; 1812 tx_ring->total_bytes = 0;
1811 tx_ring->total_packets = 0; 1813 tx_ring->total_packets = 0;
1812 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1814 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1813 r_idx + 1); 1815 r_idx + 1);
1814 } 1816 }
1815 1817
1816 /* EIAM disabled interrupts (on this vector) for us */ 1818 /* EIAM disabled interrupts (on this vector) for us */
@@ -1838,7 +1840,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1838 rx_ring->total_bytes = 0; 1840 rx_ring->total_bytes = 0;
1839 rx_ring->total_packets = 0; 1841 rx_ring->total_packets = 0;
1840 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1842 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1841 r_idx + 1); 1843 r_idx + 1);
1842 } 1844 }
1843 1845
1844 if (!q_vector->rxr_count) 1846 if (!q_vector->rxr_count)
@@ -1868,7 +1870,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1868 ring->total_bytes = 0; 1870 ring->total_bytes = 0;
1869 ring->total_packets = 0; 1871 ring->total_packets = 0;
1870 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1872 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1871 r_idx + 1); 1873 r_idx + 1);
1872 } 1874 }
1873 1875
1874 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1876 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
@@ -1877,7 +1879,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1877 ring->total_bytes = 0; 1879 ring->total_bytes = 0;
1878 ring->total_packets = 0; 1880 ring->total_packets = 0;
1879 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1881 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1880 r_idx + 1); 1882 r_idx + 1);
1881 } 1883 }
1882 1884
1883 /* EIAM disabled interrupts (on this vector) for us */ 1885 /* EIAM disabled interrupts (on this vector) for us */
@@ -1897,7 +1899,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1897static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) 1899static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1898{ 1900{
1899 struct ixgbe_q_vector *q_vector = 1901 struct ixgbe_q_vector *q_vector =
1900 container_of(napi, struct ixgbe_q_vector, napi); 1902 container_of(napi, struct ixgbe_q_vector, napi);
1901 struct ixgbe_adapter *adapter = q_vector->adapter; 1903 struct ixgbe_adapter *adapter = q_vector->adapter;
1902 struct ixgbe_ring *rx_ring = NULL; 1904 struct ixgbe_ring *rx_ring = NULL;
1903 int work_done = 0; 1905 int work_done = 0;
@@ -1919,7 +1921,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1919 ixgbe_set_itr_msix(q_vector); 1921 ixgbe_set_itr_msix(q_vector);
1920 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1922 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1921 ixgbe_irq_enable_queues(adapter, 1923 ixgbe_irq_enable_queues(adapter,
1922 ((u64)1 << q_vector->v_idx)); 1924 ((u64)1 << q_vector->v_idx));
1923 } 1925 }
1924 1926
1925 return work_done; 1927 return work_done;
@@ -1936,7 +1938,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1936static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) 1938static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1937{ 1939{
1938 struct ixgbe_q_vector *q_vector = 1940 struct ixgbe_q_vector *q_vector =
1939 container_of(napi, struct ixgbe_q_vector, napi); 1941 container_of(napi, struct ixgbe_q_vector, napi);
1940 struct ixgbe_adapter *adapter = q_vector->adapter; 1942 struct ixgbe_adapter *adapter = q_vector->adapter;
1941 struct ixgbe_ring *ring = NULL; 1943 struct ixgbe_ring *ring = NULL;
1942 int work_done = 0, i; 1944 int work_done = 0, i;
@@ -1952,7 +1954,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1952#endif 1954#endif
1953 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); 1955 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1954 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1956 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1955 r_idx + 1); 1957 r_idx + 1);
1956 } 1958 }
1957 1959
1958 /* attempt to distribute budget to each queue fairly, but don't allow 1960 /* attempt to distribute budget to each queue fairly, but don't allow
@@ -1968,7 +1970,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1968#endif 1970#endif
1969 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); 1971 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
1970 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1972 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1971 r_idx + 1); 1973 r_idx + 1);
1972 } 1974 }
1973 1975
1974 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1976 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
@@ -1980,7 +1982,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1980 ixgbe_set_itr_msix(q_vector); 1982 ixgbe_set_itr_msix(q_vector);
1981 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1983 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1982 ixgbe_irq_enable_queues(adapter, 1984 ixgbe_irq_enable_queues(adapter,
1983 ((u64)1 << q_vector->v_idx)); 1985 ((u64)1 << q_vector->v_idx));
1984 return 0; 1986 return 0;
1985 } 1987 }
1986 1988
@@ -1998,7 +2000,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1998static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) 2000static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
1999{ 2001{
2000 struct ixgbe_q_vector *q_vector = 2002 struct ixgbe_q_vector *q_vector =
2001 container_of(napi, struct ixgbe_q_vector, napi); 2003 container_of(napi, struct ixgbe_q_vector, napi);
2002 struct ixgbe_adapter *adapter = q_vector->adapter; 2004 struct ixgbe_adapter *adapter = q_vector->adapter;
2003 struct ixgbe_ring *tx_ring = NULL; 2005 struct ixgbe_ring *tx_ring = NULL;
2004 int work_done = 0; 2006 int work_done = 0;
@@ -2020,14 +2022,15 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
2020 if (adapter->tx_itr_setting & 1) 2022 if (adapter->tx_itr_setting & 1)
2021 ixgbe_set_itr_msix(q_vector); 2023 ixgbe_set_itr_msix(q_vector);
2022 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2024 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2023 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); 2025 ixgbe_irq_enable_queues(adapter,
2026 ((u64)1 << q_vector->v_idx));
2024 } 2027 }
2025 2028
2026 return work_done; 2029 return work_done;
2027} 2030}
2028 2031
2029static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, 2032static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
2030 int r_idx) 2033 int r_idx)
2031{ 2034{
2032 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2035 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2033 2036
@@ -2036,7 +2039,7 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
2036} 2039}
2037 2040
2038static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, 2041static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2039 int t_idx) 2042 int t_idx)
2040{ 2043{
2041 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2044 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2042 2045
@@ -2056,7 +2059,7 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2056 * mapping configurations in here. 2059 * mapping configurations in here.
2057 **/ 2060 **/
2058static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, 2061static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
2059 int vectors) 2062 int vectors)
2060{ 2063{
2061 int v_start = 0; 2064 int v_start = 0;
2062 int rxr_idx = 0, txr_idx = 0; 2065 int rxr_idx = 0, txr_idx = 0;
@@ -2123,7 +2126,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2123 struct net_device *netdev = adapter->netdev; 2126 struct net_device *netdev = adapter->netdev;
2124 irqreturn_t (*handler)(int, void *); 2127 irqreturn_t (*handler)(int, void *);
2125 int i, vector, q_vectors, err; 2128 int i, vector, q_vectors, err;
2126 int ri=0, ti=0; 2129 int ri = 0, ti = 0;
2127 2130
2128 /* Decrement for Other and TCP Timer vectors */ 2131 /* Decrement for Other and TCP Timer vectors */
2129 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2132 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -2134,26 +2137,24 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2134 goto out; 2137 goto out;
2135 2138
2136#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ 2139#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
2137 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ 2140 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
2138 &ixgbe_msix_clean_many) 2141 &ixgbe_msix_clean_many)
2139 for (vector = 0; vector < q_vectors; vector++) { 2142 for (vector = 0; vector < q_vectors; vector++) {
2140 handler = SET_HANDLER(adapter->q_vector[vector]); 2143 handler = SET_HANDLER(adapter->q_vector[vector]);
2141 2144
2142 if(handler == &ixgbe_msix_clean_rx) { 2145 if (handler == &ixgbe_msix_clean_rx) {
2143 sprintf(adapter->name[vector], "%s-%s-%d", 2146 sprintf(adapter->name[vector], "%s-%s-%d",
2144 netdev->name, "rx", ri++); 2147 netdev->name, "rx", ri++);
2145 } 2148 } else if (handler == &ixgbe_msix_clean_tx) {
2146 else if(handler == &ixgbe_msix_clean_tx) {
2147 sprintf(adapter->name[vector], "%s-%s-%d", 2149 sprintf(adapter->name[vector], "%s-%s-%d",
2148 netdev->name, "tx", ti++); 2150 netdev->name, "tx", ti++);
2149 } 2151 } else
2150 else
2151 sprintf(adapter->name[vector], "%s-%s-%d", 2152 sprintf(adapter->name[vector], "%s-%s-%d",
2152 netdev->name, "TxRx", vector); 2153 netdev->name, "TxRx", vector);
2153 2154
2154 err = request_irq(adapter->msix_entries[vector].vector, 2155 err = request_irq(adapter->msix_entries[vector].vector,
2155 handler, 0, adapter->name[vector], 2156 handler, 0, adapter->name[vector],
2156 adapter->q_vector[vector]); 2157 adapter->q_vector[vector]);
2157 if (err) { 2158 if (err) {
2158 e_err(probe, "request_irq failed for MSIX interrupt " 2159 e_err(probe, "request_irq failed for MSIX interrupt "
2159 "Error: %d\n", err); 2160 "Error: %d\n", err);
@@ -2163,7 +2164,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2163 2164
2164 sprintf(adapter->name[vector], "%s:lsc", netdev->name); 2165 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
2165 err = request_irq(adapter->msix_entries[vector].vector, 2166 err = request_irq(adapter->msix_entries[vector].vector,
2166 ixgbe_msix_lsc, 0, adapter->name[vector], netdev); 2167 ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
2167 if (err) { 2168 if (err) {
2168 e_err(probe, "request_irq for msix_lsc failed: %d\n", err); 2169 e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
2169 goto free_queue_irqs; 2170 goto free_queue_irqs;
@@ -2174,7 +2175,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2174free_queue_irqs: 2175free_queue_irqs:
2175 for (i = vector - 1; i >= 0; i--) 2176 for (i = vector - 1; i >= 0; i--)
2176 free_irq(adapter->msix_entries[--vector].vector, 2177 free_irq(adapter->msix_entries[--vector].vector,
2177 adapter->q_vector[i]); 2178 adapter->q_vector[i]);
2178 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 2179 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2179 pci_disable_msix(adapter->pdev); 2180 pci_disable_msix(adapter->pdev);
2180 kfree(adapter->msix_entries); 2181 kfree(adapter->msix_entries);
@@ -2192,13 +2193,13 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2192 struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; 2193 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
2193 2194
2194 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, 2195 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
2195 q_vector->tx_itr, 2196 q_vector->tx_itr,
2196 tx_ring->total_packets, 2197 tx_ring->total_packets,
2197 tx_ring->total_bytes); 2198 tx_ring->total_bytes);
2198 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr, 2199 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
2199 q_vector->rx_itr, 2200 q_vector->rx_itr,
2200 rx_ring->total_packets, 2201 rx_ring->total_packets,
2201 rx_ring->total_bytes); 2202 rx_ring->total_bytes);
2202 2203
2203 current_itr = max(q_vector->rx_itr, q_vector->tx_itr); 2204 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
2204 2205
@@ -2344,10 +2345,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2344 err = ixgbe_request_msix_irqs(adapter); 2345 err = ixgbe_request_msix_irqs(adapter);
2345 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 2346 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
2346 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, 2347 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
2347 netdev->name, netdev); 2348 netdev->name, netdev);
2348 } else { 2349 } else {
2349 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, 2350 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
2350 netdev->name, netdev); 2351 netdev->name, netdev);
2351 } 2352 }
2352 2353
2353 if (err) 2354 if (err)
@@ -2371,7 +2372,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2371 i--; 2372 i--;
2372 for (; i >= 0; i--) { 2373 for (; i >= 0; i--) {
2373 free_irq(adapter->msix_entries[i].vector, 2374 free_irq(adapter->msix_entries[i].vector,
2374 adapter->q_vector[i]); 2375 adapter->q_vector[i]);
2375 } 2376 }
2376 2377
2377 ixgbe_reset_q_vectors(adapter); 2378 ixgbe_reset_q_vectors(adapter);
@@ -2414,7 +2415,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2414 struct ixgbe_hw *hw = &adapter->hw; 2415 struct ixgbe_hw *hw = &adapter->hw;
2415 2416
2416 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), 2417 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
2417 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param)); 2418 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
2418 2419
2419 ixgbe_set_ivar(adapter, 0, 0, 0); 2420 ixgbe_set_ivar(adapter, 0, 0, 0);
2420 ixgbe_set_ivar(adapter, 1, 0, 0); 2421 ixgbe_set_ivar(adapter, 1, 0, 0);
@@ -2448,7 +2449,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2448 IXGBE_WRITE_FLUSH(hw); 2449 IXGBE_WRITE_FLUSH(hw);
2449 2450
2450 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), 2451 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
2451 (tdba & DMA_BIT_MASK(32))); 2452 (tdba & DMA_BIT_MASK(32)));
2452 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); 2453 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
2453 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), 2454 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
2454 ring->count * sizeof(union ixgbe_adv_tx_desc)); 2455 ring->count * sizeof(union ixgbe_adv_tx_desc));
@@ -2559,7 +2560,7 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2559#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 2560#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2560 2561
2561static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, 2562static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2562 struct ixgbe_ring *rx_ring) 2563 struct ixgbe_ring *rx_ring)
2563{ 2564{
2564 u32 srrctl; 2565 u32 srrctl;
2565 int index; 2566 int index;
@@ -2601,8 +2602,8 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2601{ 2602{
2602 struct ixgbe_hw *hw = &adapter->hw; 2603 struct ixgbe_hw *hw = &adapter->hw;
2603 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, 2604 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
2604 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, 2605 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2605 0x6A3E67EA, 0x14364D17, 0x3BED200D}; 2606 0x6A3E67EA, 0x14364D17, 0x3BED200D};
2606 u32 mrqc = 0, reta = 0; 2607 u32 mrqc = 0, reta = 0;
2607 u32 rxcsum; 2608 u32 rxcsum;
2608 int i, j; 2609 int i, j;
@@ -2813,10 +2814,10 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
2813 2814
2814 /* PSRTYPE must be initialized in non 82598 adapters */ 2815 /* PSRTYPE must be initialized in non 82598 adapters */
2815 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | 2816 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2816 IXGBE_PSRTYPE_UDPHDR | 2817 IXGBE_PSRTYPE_UDPHDR |
2817 IXGBE_PSRTYPE_IPV4HDR | 2818 IXGBE_PSRTYPE_IPV4HDR |
2818 IXGBE_PSRTYPE_L2HDR | 2819 IXGBE_PSRTYPE_L2HDR |
2819 IXGBE_PSRTYPE_IPV6HDR; 2820 IXGBE_PSRTYPE_IPV6HDR;
2820 2821
2821 if (hw->mac.type == ixgbe_mac_82598EB) 2822 if (hw->mac.type == ixgbe_mac_82598EB)
2822 return; 2823 return;
@@ -2931,15 +2932,14 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
2931 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; 2932 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
2932 2933
2933#ifdef IXGBE_FCOE 2934#ifdef IXGBE_FCOE
2934 if (netdev->features & NETIF_F_FCOE_MTU) 2935 if (netdev->features & NETIF_F_FCOE_MTU) {
2935 {
2936 struct ixgbe_ring_feature *f; 2936 struct ixgbe_ring_feature *f;
2937 f = &adapter->ring_feature[RING_F_FCOE]; 2937 f = &adapter->ring_feature[RING_F_FCOE];
2938 if ((i >= f->mask) && (i < f->mask + f->indices)) { 2938 if ((i >= f->mask) && (i < f->mask + f->indices)) {
2939 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; 2939 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
2940 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) 2940 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2941 rx_ring->rx_buf_len = 2941 rx_ring->rx_buf_len =
2942 IXGBE_FCOE_JUMBO_FRAME_SIZE; 2942 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2943 } 2943 }
2944 } 2944 }
2945#endif /* IXGBE_FCOE */ 2945#endif /* IXGBE_FCOE */
@@ -3127,7 +3127,7 @@ static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3127} 3127}
3128 3128
3129static void ixgbe_vlan_rx_register(struct net_device *netdev, 3129static void ixgbe_vlan_rx_register(struct net_device *netdev,
3130 struct vlan_group *grp) 3130 struct vlan_group *grp)
3131{ 3131{
3132 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3132 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3133 3133
@@ -3385,7 +3385,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
3385 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 3385 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3386 for (i = 0; i < adapter->num_tx_queues; i++) 3386 for (i = 0; i < adapter->num_tx_queues; i++)
3387 adapter->tx_ring[i]->atr_sample_rate = 3387 adapter->tx_ring[i]->atr_sample_rate =
3388 adapter->atr_sample_rate; 3388 adapter->atr_sample_rate;
3389 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); 3389 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
3390 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { 3390 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
3391 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc); 3391 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
@@ -3464,7 +3464,8 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
3464 goto link_cfg_out; 3464 goto link_cfg_out;
3465 3465
3466 if (hw->mac.ops.get_link_capabilities) 3466 if (hw->mac.ops.get_link_capabilities)
3467 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); 3467 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
3468 &negotiation);
3468 if (ret) 3469 if (ret)
3469 goto link_cfg_out; 3470 goto link_cfg_out;
3470 3471
@@ -3666,7 +3667,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
3666 * @rx_ring: ring to free buffers from 3667 * @rx_ring: ring to free buffers from
3667 **/ 3668 **/
3668static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, 3669static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3669 struct ixgbe_ring *rx_ring) 3670 struct ixgbe_ring *rx_ring)
3670{ 3671{
3671 struct pci_dev *pdev = adapter->pdev; 3672 struct pci_dev *pdev = adapter->pdev;
3672 unsigned long size; 3673 unsigned long size;
@@ -3683,7 +3684,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3683 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 3684 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3684 if (rx_buffer_info->dma) { 3685 if (rx_buffer_info->dma) {
3685 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 3686 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
3686 rx_ring->rx_buf_len, 3687 rx_ring->rx_buf_len,
3687 DMA_FROM_DEVICE); 3688 DMA_FROM_DEVICE);
3688 rx_buffer_info->dma = 0; 3689 rx_buffer_info->dma = 0;
3689 } 3690 }
@@ -3695,7 +3696,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3695 if (IXGBE_RSC_CB(this)->delay_unmap) { 3696 if (IXGBE_RSC_CB(this)->delay_unmap) {
3696 dma_unmap_single(&pdev->dev, 3697 dma_unmap_single(&pdev->dev,
3697 IXGBE_RSC_CB(this)->dma, 3698 IXGBE_RSC_CB(this)->dma,
3698 rx_ring->rx_buf_len, 3699 rx_ring->rx_buf_len,
3699 DMA_FROM_DEVICE); 3700 DMA_FROM_DEVICE);
3700 IXGBE_RSC_CB(this)->dma = 0; 3701 IXGBE_RSC_CB(this)->dma = 0;
3701 IXGBE_RSC_CB(skb)->delay_unmap = false; 3702 IXGBE_RSC_CB(skb)->delay_unmap = false;
@@ -3737,7 +3738,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3737 * @tx_ring: ring to be cleaned 3738 * @tx_ring: ring to be cleaned
3738 **/ 3739 **/
3739static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, 3740static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
3740 struct ixgbe_ring *tx_ring) 3741 struct ixgbe_ring *tx_ring)
3741{ 3742{
3742 struct ixgbe_tx_buffer *tx_buffer_info; 3743 struct ixgbe_tx_buffer *tx_buffer_info;
3743 unsigned long size; 3744 unsigned long size;
@@ -3849,13 +3850,13 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3849 j = adapter->tx_ring[i]->reg_idx; 3850 j = adapter->tx_ring[i]->reg_idx;
3850 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 3851 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3851 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), 3852 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
3852 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 3853 (txdctl & ~IXGBE_TXDCTL_ENABLE));
3853 } 3854 }
3854 /* Disable the Tx DMA engine on 82599 */ 3855 /* Disable the Tx DMA engine on 82599 */
3855 if (hw->mac.type == ixgbe_mac_82599EB) 3856 if (hw->mac.type == ixgbe_mac_82599EB)
3856 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, 3857 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
3857 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & 3858 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3858 ~IXGBE_DMATXCTL_TE)); 3859 ~IXGBE_DMATXCTL_TE));
3859 3860
3860 /* power down the optics */ 3861 /* power down the optics */
3861 if (hw->phy.multispeed_fiber) 3862 if (hw->phy.multispeed_fiber)
@@ -3885,7 +3886,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3885static int ixgbe_poll(struct napi_struct *napi, int budget) 3886static int ixgbe_poll(struct napi_struct *napi, int budget)
3886{ 3887{
3887 struct ixgbe_q_vector *q_vector = 3888 struct ixgbe_q_vector *q_vector =
3888 container_of(napi, struct ixgbe_q_vector, napi); 3889 container_of(napi, struct ixgbe_q_vector, napi);
3889 struct ixgbe_adapter *adapter = q_vector->adapter; 3890 struct ixgbe_adapter *adapter = q_vector->adapter;
3890 int tx_clean_complete, work_done = 0; 3891 int tx_clean_complete, work_done = 0;
3891 3892
@@ -3995,7 +3996,7 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
3995 * Rx load across CPUs using RSS. 3996 * Rx load across CPUs using RSS.
3996 * 3997 *
3997 **/ 3998 **/
3998static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) 3999static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
3999{ 4000{
4000 bool ret = false; 4001 bool ret = false;
4001 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; 4002 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
@@ -4124,7 +4125,7 @@ done:
4124} 4125}
4125 4126
4126static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 4127static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4127 int vectors) 4128 int vectors)
4128{ 4129{
4129 int err, vector_threshold; 4130 int err, vector_threshold;
4130 4131
@@ -4143,7 +4144,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4143 */ 4144 */
4144 while (vectors >= vector_threshold) { 4145 while (vectors >= vector_threshold) {
4145 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 4146 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
4146 vectors); 4147 vectors);
4147 if (!err) /* Success in acquiring all requested vectors. */ 4148 if (!err) /* Success in acquiring all requested vectors. */
4148 break; 4149 break;
4149 else if (err < 0) 4150 else if (err < 0)
@@ -4170,7 +4171,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4170 * vectors we were allocated. 4171 * vectors we were allocated.
4171 */ 4172 */
4172 adapter->num_msix_vectors = min(vectors, 4173 adapter->num_msix_vectors = min(vectors,
4173 adapter->max_msix_q_vectors + NON_Q_VECTORS); 4174 adapter->max_msix_q_vectors + NON_Q_VECTORS);
4174 } 4175 }
4175} 4176}
4176 4177
@@ -4241,12 +4242,12 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4241 } 4242 }
4242 for ( ; i < 5; i++) { 4243 for ( ; i < 5; i++) {
4243 adapter->tx_ring[i]->reg_idx = 4244 adapter->tx_ring[i]->reg_idx =
4244 ((i + 2) << 4); 4245 ((i + 2) << 4);
4245 adapter->rx_ring[i]->reg_idx = i << 4; 4246 adapter->rx_ring[i]->reg_idx = i << 4;
4246 } 4247 }
4247 for ( ; i < dcb_i; i++) { 4248 for ( ; i < dcb_i; i++) {
4248 adapter->tx_ring[i]->reg_idx = 4249 adapter->tx_ring[i]->reg_idx =
4249 ((i + 8) << 3); 4250 ((i + 8) << 3);
4250 adapter->rx_ring[i]->reg_idx = i << 4; 4251 adapter->rx_ring[i]->reg_idx = i << 4;
4251 } 4252 }
4252 4253
@@ -4289,7 +4290,7 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4289 * Cache the descriptor ring offsets for Flow Director to the assigned rings. 4290 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
4290 * 4291 *
4291 **/ 4292 **/
4292static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) 4293static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
4293{ 4294{
4294 int i; 4295 int i;
4295 bool ret = false; 4296 bool ret = false;
@@ -4446,7 +4447,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
4446 adapter->node = cur_node; 4447 adapter->node = cur_node;
4447 } 4448 }
4448 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, 4449 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4449 adapter->node); 4450 adapter->node);
4450 if (!ring) 4451 if (!ring)
4451 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); 4452 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
4452 if (!ring) 4453 if (!ring)
@@ -4470,7 +4471,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
4470 adapter->node = cur_node; 4471 adapter->node = cur_node;
4471 } 4472 }
4472 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, 4473 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4473 adapter->node); 4474 adapter->node);
4474 if (!ring) 4475 if (!ring)
4475 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); 4476 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
4476 if (!ring) 4477 if (!ring)
@@ -4516,7 +4517,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
4516 * (roughly) the same number of vectors as there are CPU's. 4517 * (roughly) the same number of vectors as there are CPU's.
4517 */ 4518 */
4518 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, 4519 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
4519 (int)num_online_cpus()) + NON_Q_VECTORS; 4520 (int)num_online_cpus()) + NON_Q_VECTORS;
4520 4521
4521 /* 4522 /*
4522 * At the same time, hardware can only support a maximum of 4523 * At the same time, hardware can only support a maximum of
@@ -4530,7 +4531,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
4530 /* A failure in MSI-X entry allocation isn't fatal, but it does 4531 /* A failure in MSI-X entry allocation isn't fatal, but it does
4531 * mean we disable MSI-X capabilities of the adapter. */ 4532 * mean we disable MSI-X capabilities of the adapter. */
4532 adapter->msix_entries = kcalloc(v_budget, 4533 adapter->msix_entries = kcalloc(v_budget,
4533 sizeof(struct msix_entry), GFP_KERNEL); 4534 sizeof(struct msix_entry), GFP_KERNEL);
4534 if (adapter->msix_entries) { 4535 if (adapter->msix_entries) {
4535 for (vector = 0; vector < v_budget; vector++) 4536 for (vector = 0; vector < v_budget; vector++)
4536 adapter->msix_entries[vector].entry = vector; 4537 adapter->msix_entries[vector].entry = vector;
@@ -4592,10 +4593,10 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4592 4593
4593 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 4594 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
4594 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector), 4595 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
4595 GFP_KERNEL, adapter->node); 4596 GFP_KERNEL, adapter->node);
4596 if (!q_vector) 4597 if (!q_vector)
4597 q_vector = kzalloc(sizeof(struct ixgbe_q_vector), 4598 q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
4598 GFP_KERNEL); 4599 GFP_KERNEL);
4599 if (!q_vector) 4600 if (!q_vector)
4600 goto err_out; 4601 goto err_out;
4601 q_vector->adapter = adapter; 4602 q_vector->adapter = adapter;
@@ -4756,8 +4757,8 @@ static void ixgbe_sfp_timer(unsigned long data)
4756static void ixgbe_sfp_task(struct work_struct *work) 4757static void ixgbe_sfp_task(struct work_struct *work)
4757{ 4758{
4758 struct ixgbe_adapter *adapter = container_of(work, 4759 struct ixgbe_adapter *adapter = container_of(work,
4759 struct ixgbe_adapter, 4760 struct ixgbe_adapter,
4760 sfp_task); 4761 sfp_task);
4761 struct ixgbe_hw *hw = &adapter->hw; 4762 struct ixgbe_hw *hw = &adapter->hw;
4762 4763
4763 if ((hw->phy.type == ixgbe_phy_nl) && 4764 if ((hw->phy.type == ixgbe_phy_nl) &&
@@ -4782,7 +4783,7 @@ static void ixgbe_sfp_task(struct work_struct *work)
4782reschedule: 4783reschedule:
4783 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state)) 4784 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
4784 mod_timer(&adapter->sfp_timer, 4785 mod_timer(&adapter->sfp_timer,
4785 round_jiffies(jiffies + (2 * HZ))); 4786 round_jiffies(jiffies + (2 * HZ)));
4786} 4787}
4787 4788
4788/** 4789/**
@@ -4838,7 +4839,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4838 adapter->atr_sample_rate = 20; 4839 adapter->atr_sample_rate = 20;
4839 } 4840 }
4840 adapter->ring_feature[RING_F_FDIR].indices = 4841 adapter->ring_feature[RING_F_FDIR].indices =
4841 IXGBE_MAX_FDIR_INDICES; 4842 IXGBE_MAX_FDIR_INDICES;
4842 adapter->fdir_pballoc = 0; 4843 adapter->fdir_pballoc = 0;
4843#ifdef IXGBE_FCOE 4844#ifdef IXGBE_FCOE
4844 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; 4845 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
@@ -4869,7 +4870,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4869 adapter->dcb_cfg.round_robin_enable = false; 4870 adapter->dcb_cfg.round_robin_enable = false;
4870 adapter->dcb_set_bitmap = 0x00; 4871 adapter->dcb_set_bitmap = 0x00;
4871 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, 4872 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
4872 adapter->ring_feature[RING_F_DCB].indices); 4873 adapter->ring_feature[RING_F_DCB].indices);
4873 4874
4874#endif 4875#endif
4875 4876
@@ -4924,7 +4925,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4924 * Return 0 on success, negative on failure 4925 * Return 0 on success, negative on failure
4925 **/ 4926 **/
4926int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, 4927int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4927 struct ixgbe_ring *tx_ring) 4928 struct ixgbe_ring *tx_ring)
4928{ 4929{
4929 struct pci_dev *pdev = adapter->pdev; 4930 struct pci_dev *pdev = adapter->pdev;
4930 int size; 4931 int size;
@@ -4991,7 +4992,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4991 * Returns 0 on success, negative on failure 4992 * Returns 0 on success, negative on failure
4992 **/ 4993 **/
4993int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 4994int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
4994 struct ixgbe_ring *rx_ring) 4995 struct ixgbe_ring *rx_ring)
4995{ 4996{
4996 struct pci_dev *pdev = adapter->pdev; 4997 struct pci_dev *pdev = adapter->pdev;
4997 int size; 4998 int size;
@@ -5064,7 +5065,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5064 * Free all transmit software resources 5065 * Free all transmit software resources
5065 **/ 5066 **/
5066void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, 5067void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
5067 struct ixgbe_ring *tx_ring) 5068 struct ixgbe_ring *tx_ring)
5068{ 5069{
5069 struct pci_dev *pdev = adapter->pdev; 5070 struct pci_dev *pdev = adapter->pdev;
5070 5071
@@ -5102,7 +5103,7 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5102 * Free all receive software resources 5103 * Free all receive software resources
5103 **/ 5104 **/
5104void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, 5105void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
5105 struct ixgbe_ring *rx_ring) 5106 struct ixgbe_ring *rx_ring)
5106{ 5107{
5107 struct pci_dev *pdev = adapter->pdev; 5108 struct pci_dev *pdev = adapter->pdev;
5108 5109
@@ -5406,7 +5407,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5406 u64 rsc_flush = 0; 5407 u64 rsc_flush = 0;
5407 for (i = 0; i < 16; i++) 5408 for (i = 0; i < 16; i++)
5408 adapter->hw_rx_no_dma_resources += 5409 adapter->hw_rx_no_dma_resources +=
5409 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 5410 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5410 for (i = 0; i < adapter->num_rx_queues; i++) { 5411 for (i = 0; i < adapter->num_rx_queues; i++) {
5411 rsc_count += adapter->rx_ring[i]->rsc_count; 5412 rsc_count += adapter->rx_ring[i]->rsc_count;
5412 rsc_flush += adapter->rx_ring[i]->rsc_flush; 5413 rsc_flush += adapter->rx_ring[i]->rsc_flush;
@@ -5439,20 +5440,20 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5439 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 5440 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
5440 if (hw->mac.type == ixgbe_mac_82599EB) { 5441 if (hw->mac.type == ixgbe_mac_82599EB) {
5441 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, 5442 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
5442 IXGBE_PXONRXCNT(i)); 5443 IXGBE_PXONRXCNT(i));
5443 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, 5444 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
5444 IXGBE_PXOFFRXCNT(i)); 5445 IXGBE_PXOFFRXCNT(i));
5445 adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 5446 adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5446 } else { 5447 } else {
5447 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, 5448 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
5448 IXGBE_PXONRXC(i)); 5449 IXGBE_PXONRXC(i));
5449 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, 5450 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
5450 IXGBE_PXOFFRXC(i)); 5451 IXGBE_PXOFFRXC(i));
5451 } 5452 }
5452 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw, 5453 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
5453 IXGBE_PXONTXC(i)); 5454 IXGBE_PXONTXC(i));
5454 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw, 5455 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
5455 IXGBE_PXOFFTXC(i)); 5456 IXGBE_PXOFFTXC(i));
5456 } 5457 }
5457 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 5458 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
5458 /* work around hardware counting issue */ 5459 /* work around hardware counting issue */
@@ -5462,13 +5463,15 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5462 if (hw->mac.type == ixgbe_mac_82599EB) { 5463 if (hw->mac.type == ixgbe_mac_82599EB) {
5463 u64 tmp; 5464 u64 tmp;
5464 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 5465 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
5465 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */ 5466 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
5467 /* 4 high bits of GORC */
5466 adapter->stats.gorc += (tmp << 32); 5468 adapter->stats.gorc += (tmp << 32);
5467 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 5469 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
5468 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */ 5470 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
5471 /* 4 high bits of GOTC */
5469 adapter->stats.gotc += (tmp << 32); 5472 adapter->stats.gotc += (tmp << 32);
5470 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL); 5473 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
5471 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ 5474 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
5472 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 5475 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
5473 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 5476 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
5474 adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 5477 adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
@@ -5533,7 +5536,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5533 5536
5534 /* Rx Errors */ 5537 /* Rx Errors */
5535 netdev->stats.rx_errors = adapter->stats.crcerrs + 5538 netdev->stats.rx_errors = adapter->stats.crcerrs +
5536 adapter->stats.rlec; 5539 adapter->stats.rlec;
5537 netdev->stats.rx_dropped = 0; 5540 netdev->stats.rx_dropped = 0;
5538 netdev->stats.rx_length_errors = adapter->stats.rlec; 5541 netdev->stats.rx_length_errors = adapter->stats.rlec;
5539 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 5542 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
@@ -5595,8 +5598,8 @@ watchdog_short_circuit:
5595static void ixgbe_multispeed_fiber_task(struct work_struct *work) 5598static void ixgbe_multispeed_fiber_task(struct work_struct *work)
5596{ 5599{
5597 struct ixgbe_adapter *adapter = container_of(work, 5600 struct ixgbe_adapter *adapter = container_of(work,
5598 struct ixgbe_adapter, 5601 struct ixgbe_adapter,
5599 multispeed_fiber_task); 5602 multispeed_fiber_task);
5600 struct ixgbe_hw *hw = &adapter->hw; 5603 struct ixgbe_hw *hw = &adapter->hw;
5601 u32 autoneg; 5604 u32 autoneg;
5602 bool negotiation; 5605 bool negotiation;
@@ -5619,8 +5622,8 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work)
5619static void ixgbe_sfp_config_module_task(struct work_struct *work) 5622static void ixgbe_sfp_config_module_task(struct work_struct *work)
5620{ 5623{
5621 struct ixgbe_adapter *adapter = container_of(work, 5624 struct ixgbe_adapter *adapter = container_of(work,
5622 struct ixgbe_adapter, 5625 struct ixgbe_adapter,
5623 sfp_config_module_task); 5626 sfp_config_module_task);
5624 struct ixgbe_hw *hw = &adapter->hw; 5627 struct ixgbe_hw *hw = &adapter->hw;
5625 u32 err; 5628 u32 err;
5626 5629
@@ -5653,15 +5656,15 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
5653static void ixgbe_fdir_reinit_task(struct work_struct *work) 5656static void ixgbe_fdir_reinit_task(struct work_struct *work)
5654{ 5657{
5655 struct ixgbe_adapter *adapter = container_of(work, 5658 struct ixgbe_adapter *adapter = container_of(work,
5656 struct ixgbe_adapter, 5659 struct ixgbe_adapter,
5657 fdir_reinit_task); 5660 fdir_reinit_task);
5658 struct ixgbe_hw *hw = &adapter->hw; 5661 struct ixgbe_hw *hw = &adapter->hw;
5659 int i; 5662 int i;
5660 5663
5661 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 5664 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5662 for (i = 0; i < adapter->num_tx_queues; i++) 5665 for (i = 0; i < adapter->num_tx_queues; i++)
5663 set_bit(__IXGBE_FDIR_INIT_DONE, 5666 set_bit(__IXGBE_FDIR_INIT_DONE,
5664 &(adapter->tx_ring[i]->reinit_state)); 5667 &(adapter->tx_ring[i]->reinit_state));
5665 } else { 5668 } else {
5666 e_err(probe, "failed to finish FDIR re-initialization, " 5669 e_err(probe, "failed to finish FDIR re-initialization, "
5667 "ignored adding FDIR ATR filters\n"); 5670 "ignored adding FDIR ATR filters\n");
@@ -5679,8 +5682,8 @@ static DEFINE_MUTEX(ixgbe_watchdog_lock);
5679static void ixgbe_watchdog_task(struct work_struct *work) 5682static void ixgbe_watchdog_task(struct work_struct *work)
5680{ 5683{
5681 struct ixgbe_adapter *adapter = container_of(work, 5684 struct ixgbe_adapter *adapter = container_of(work,
5682 struct ixgbe_adapter, 5685 struct ixgbe_adapter,
5683 watchdog_task); 5686 watchdog_task);
5684 struct net_device *netdev = adapter->netdev; 5687 struct net_device *netdev = adapter->netdev;
5685 struct ixgbe_hw *hw = &adapter->hw; 5688 struct ixgbe_hw *hw = &adapter->hw;
5686 u32 link_speed; 5689 u32 link_speed;
@@ -5711,7 +5714,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5711 5714
5712 if (link_up || 5715 if (link_up ||
5713 time_after(jiffies, (adapter->link_check_timeout + 5716 time_after(jiffies, (adapter->link_check_timeout +
5714 IXGBE_TRY_LINK_TIMEOUT))) { 5717 IXGBE_TRY_LINK_TIMEOUT))) {
5715 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 5718 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5716 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); 5719 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
5717 } 5720 }
@@ -5782,8 +5785,8 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5782} 5785}
5783 5786
5784static int ixgbe_tso(struct ixgbe_adapter *adapter, 5787static int ixgbe_tso(struct ixgbe_adapter *adapter,
5785 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 5788 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
5786 u32 tx_flags, u8 *hdr_len) 5789 u32 tx_flags, u8 *hdr_len)
5787{ 5790{
5788 struct ixgbe_adv_tx_context_desc *context_desc; 5791 struct ixgbe_adv_tx_context_desc *context_desc;
5789 unsigned int i; 5792 unsigned int i;
@@ -5806,15 +5809,15 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5806 iph->tot_len = 0; 5809 iph->tot_len = 0;
5807 iph->check = 0; 5810 iph->check = 0;
5808 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 5811 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5809 iph->daddr, 0, 5812 iph->daddr, 0,
5810 IPPROTO_TCP, 5813 IPPROTO_TCP,
5811 0); 5814 0);
5812 } else if (skb_is_gso_v6(skb)) { 5815 } else if (skb_is_gso_v6(skb)) {
5813 ipv6_hdr(skb)->payload_len = 0; 5816 ipv6_hdr(skb)->payload_len = 0;
5814 tcp_hdr(skb)->check = 5817 tcp_hdr(skb)->check =
5815 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 5818 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5816 &ipv6_hdr(skb)->daddr, 5819 &ipv6_hdr(skb)->daddr,
5817 0, IPPROTO_TCP, 0); 5820 0, IPPROTO_TCP, 0);
5818 } 5821 }
5819 5822
5820 i = tx_ring->next_to_use; 5823 i = tx_ring->next_to_use;
@@ -5827,7 +5830,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5827 vlan_macip_lens |= 5830 vlan_macip_lens |=
5828 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 5831 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
5829 vlan_macip_lens |= ((skb_network_offset(skb)) << 5832 vlan_macip_lens |= ((skb_network_offset(skb)) <<
5830 IXGBE_ADVTXD_MACLEN_SHIFT); 5833 IXGBE_ADVTXD_MACLEN_SHIFT);
5831 *hdr_len += skb_network_offset(skb); 5834 *hdr_len += skb_network_offset(skb);
5832 vlan_macip_lens |= 5835 vlan_macip_lens |=
5833 (skb_transport_header(skb) - skb_network_header(skb)); 5836 (skb_transport_header(skb) - skb_network_header(skb));
@@ -5838,7 +5841,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5838 5841
5839 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 5842 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
5840 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | 5843 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
5841 IXGBE_ADVTXD_DTYP_CTXT); 5844 IXGBE_ADVTXD_DTYP_CTXT);
5842 5845
5843 if (skb->protocol == htons(ETH_P_IP)) 5846 if (skb->protocol == htons(ETH_P_IP))
5844 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 5847 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
@@ -5867,8 +5870,8 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5867} 5870}
5868 5871
5869static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, 5872static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5870 struct ixgbe_ring *tx_ring, 5873 struct ixgbe_ring *tx_ring,
5871 struct sk_buff *skb, u32 tx_flags) 5874 struct sk_buff *skb, u32 tx_flags)
5872{ 5875{
5873 struct ixgbe_adv_tx_context_desc *context_desc; 5876 struct ixgbe_adv_tx_context_desc *context_desc;
5874 unsigned int i; 5877 unsigned int i;
@@ -5885,16 +5888,16 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5885 vlan_macip_lens |= 5888 vlan_macip_lens |=
5886 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 5889 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
5887 vlan_macip_lens |= (skb_network_offset(skb) << 5890 vlan_macip_lens |= (skb_network_offset(skb) <<
5888 IXGBE_ADVTXD_MACLEN_SHIFT); 5891 IXGBE_ADVTXD_MACLEN_SHIFT);
5889 if (skb->ip_summed == CHECKSUM_PARTIAL) 5892 if (skb->ip_summed == CHECKSUM_PARTIAL)
5890 vlan_macip_lens |= (skb_transport_header(skb) - 5893 vlan_macip_lens |= (skb_transport_header(skb) -
5891 skb_network_header(skb)); 5894 skb_network_header(skb));
5892 5895
5893 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 5896 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5894 context_desc->seqnum_seed = 0; 5897 context_desc->seqnum_seed = 0;
5895 5898
5896 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 5899 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
5897 IXGBE_ADVTXD_DTYP_CTXT); 5900 IXGBE_ADVTXD_DTYP_CTXT);
5898 5901
5899 if (skb->ip_summed == CHECKSUM_PARTIAL) { 5902 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5900 __be16 protocol; 5903 __be16 protocol;
@@ -5913,19 +5916,19 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5913 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 5916 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
5914 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 5917 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
5915 type_tucmd_mlhl |= 5918 type_tucmd_mlhl |=
5916 IXGBE_ADVTXD_TUCMD_L4T_TCP; 5919 IXGBE_ADVTXD_TUCMD_L4T_TCP;
5917 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP) 5920 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
5918 type_tucmd_mlhl |= 5921 type_tucmd_mlhl |=
5919 IXGBE_ADVTXD_TUCMD_L4T_SCTP; 5922 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5920 break; 5923 break;
5921 case cpu_to_be16(ETH_P_IPV6): 5924 case cpu_to_be16(ETH_P_IPV6):
5922 /* XXX what about other V6 headers?? */ 5925 /* XXX what about other V6 headers?? */
5923 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 5926 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
5924 type_tucmd_mlhl |= 5927 type_tucmd_mlhl |=
5925 IXGBE_ADVTXD_TUCMD_L4T_TCP; 5928 IXGBE_ADVTXD_TUCMD_L4T_TCP;
5926 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP) 5929 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
5927 type_tucmd_mlhl |= 5930 type_tucmd_mlhl |=
5928 IXGBE_ADVTXD_TUCMD_L4T_SCTP; 5931 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5929 break; 5932 break;
5930 default: 5933 default:
5931 if (unlikely(net_ratelimit())) { 5934 if (unlikely(net_ratelimit())) {
@@ -5956,9 +5959,9 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5956} 5959}
5957 5960
5958static int ixgbe_tx_map(struct ixgbe_adapter *adapter, 5961static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5959 struct ixgbe_ring *tx_ring, 5962 struct ixgbe_ring *tx_ring,
5960 struct sk_buff *skb, u32 tx_flags, 5963 struct sk_buff *skb, u32 tx_flags,
5961 unsigned int first) 5964 unsigned int first)
5962{ 5965{
5963 struct pci_dev *pdev = adapter->pdev; 5966 struct pci_dev *pdev = adapter->pdev;
5964 struct ixgbe_tx_buffer *tx_buffer_info; 5967 struct ixgbe_tx_buffer *tx_buffer_info;
@@ -6053,7 +6056,7 @@ dma_error:
6053 6056
6054 /* clear timestamp and dma mappings for remaining portion of packet */ 6057 /* clear timestamp and dma mappings for remaining portion of packet */
6055 while (count--) { 6058 while (count--) {
6056 if (i==0) 6059 if (i == 0)
6057 i += tx_ring->count; 6060 i += tx_ring->count;
6058 i--; 6061 i--;
6059 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 6062 tx_buffer_info = &tx_ring->tx_buffer_info[i];
@@ -6064,8 +6067,8 @@ dma_error:
6064} 6067}
6065 6068
6066static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, 6069static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6067 struct ixgbe_ring *tx_ring, 6070 struct ixgbe_ring *tx_ring,
6068 int tx_flags, int count, u32 paylen, u8 hdr_len) 6071 int tx_flags, int count, u32 paylen, u8 hdr_len)
6069{ 6072{
6070 union ixgbe_adv_tx_desc *tx_desc = NULL; 6073 union ixgbe_adv_tx_desc *tx_desc = NULL;
6071 struct ixgbe_tx_buffer *tx_buffer_info; 6074 struct ixgbe_tx_buffer *tx_buffer_info;
@@ -6084,17 +6087,17 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6084 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 6087 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6085 6088
6086 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 6089 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
6087 IXGBE_ADVTXD_POPTS_SHIFT; 6090 IXGBE_ADVTXD_POPTS_SHIFT;
6088 6091
6089 /* use index 1 context for tso */ 6092 /* use index 1 context for tso */
6090 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 6093 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
6091 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 6094 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
6092 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 6095 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
6093 IXGBE_ADVTXD_POPTS_SHIFT; 6096 IXGBE_ADVTXD_POPTS_SHIFT;
6094 6097
6095 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) 6098 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
6096 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 6099 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
6097 IXGBE_ADVTXD_POPTS_SHIFT; 6100 IXGBE_ADVTXD_POPTS_SHIFT;
6098 6101
6099 if (tx_flags & IXGBE_TX_FLAGS_FCOE) { 6102 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6100 olinfo_status |= IXGBE_ADVTXD_CC; 6103 olinfo_status |= IXGBE_ADVTXD_CC;
@@ -6111,7 +6114,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6111 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); 6114 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
6112 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 6115 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
6113 tx_desc->read.cmd_type_len = 6116 tx_desc->read.cmd_type_len =
6114 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 6117 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
6115 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 6118 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6116 i++; 6119 i++;
6117 if (i == tx_ring->count) 6120 if (i == tx_ring->count)
@@ -6133,7 +6136,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6133} 6136}
6134 6137
6135static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, 6138static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6136 int queue, u32 tx_flags) 6139 int queue, u32 tx_flags)
6137{ 6140{
6138 struct ixgbe_atr_input atr_input; 6141 struct ixgbe_atr_input atr_input;
6139 struct tcphdr *th; 6142 struct tcphdr *th;
@@ -6161,7 +6164,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6161 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); 6164 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
6162 6165
6163 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> 6166 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
6164 IXGBE_TX_FLAGS_VLAN_SHIFT; 6167 IXGBE_TX_FLAGS_VLAN_SHIFT;
6165 src_ipv4_addr = iph->saddr; 6168 src_ipv4_addr = iph->saddr;
6166 dst_ipv4_addr = iph->daddr; 6169 dst_ipv4_addr = iph->daddr;
6167 flex_bytes = eth->h_proto; 6170 flex_bytes = eth->h_proto;
@@ -6180,7 +6183,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6180} 6183}
6181 6184
6182static int __ixgbe_maybe_stop_tx(struct net_device *netdev, 6185static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
6183 struct ixgbe_ring *tx_ring, int size) 6186 struct ixgbe_ring *tx_ring, int size)
6184{ 6187{
6185 netif_stop_subqueue(netdev, tx_ring->queue_index); 6188 netif_stop_subqueue(netdev, tx_ring->queue_index);
6186 /* Herbert's original patch had: 6189 /* Herbert's original patch had:
@@ -6200,7 +6203,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
6200} 6203}
6201 6204
6202static int ixgbe_maybe_stop_tx(struct net_device *netdev, 6205static int ixgbe_maybe_stop_tx(struct net_device *netdev,
6203 struct ixgbe_ring *tx_ring, int size) 6206 struct ixgbe_ring *tx_ring, int size)
6204{ 6207{
6205 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 6208 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
6206 return 0; 6209 return 0;
@@ -6343,10 +6346,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6343 if (tx_ring->atr_sample_rate) { 6346 if (tx_ring->atr_sample_rate) {
6344 ++tx_ring->atr_count; 6347 ++tx_ring->atr_count;
6345 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && 6348 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
6346 test_bit(__IXGBE_FDIR_INIT_DONE, 6349 test_bit(__IXGBE_FDIR_INIT_DONE,
6347 &tx_ring->reinit_state)) { 6350 &tx_ring->reinit_state)) {
6348 ixgbe_atr(adapter, skb, tx_ring->queue_index, 6351 ixgbe_atr(adapter, skb, tx_ring->queue_index,
6349 tx_flags); 6352 tx_flags);
6350 tx_ring->atr_count = 0; 6353 tx_ring->atr_count = 0;
6351 } 6354 }
6352 } 6355 }
@@ -6354,7 +6357,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6354 txq->tx_bytes += skb->len; 6357 txq->tx_bytes += skb->len;
6355 txq->tx_packets++; 6358 txq->tx_packets++;
6356 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, 6359 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
6357 hdr_len); 6360 hdr_len);
6358 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); 6361 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
6359 6362
6360 } else { 6363 } else {
@@ -6506,7 +6509,7 @@ static void ixgbe_netpoll(struct net_device *netdev)
6506#endif 6509#endif
6507 6510
6508static const struct net_device_ops ixgbe_netdev_ops = { 6511static const struct net_device_ops ixgbe_netdev_ops = {
6509 .ndo_open = ixgbe_open, 6512 .ndo_open = ixgbe_open,
6510 .ndo_stop = ixgbe_close, 6513 .ndo_stop = ixgbe_close,
6511 .ndo_start_xmit = ixgbe_xmit_frame, 6514 .ndo_start_xmit = ixgbe_xmit_frame,
6512 .ndo_select_queue = ixgbe_select_queue, 6515 .ndo_select_queue = ixgbe_select_queue,
@@ -6601,7 +6604,7 @@ err_novfs:
6601 * and a hardware reset occur. 6604 * and a hardware reset occur.
6602 **/ 6605 **/
6603static int __devinit ixgbe_probe(struct pci_dev *pdev, 6606static int __devinit ixgbe_probe(struct pci_dev *pdev,
6604 const struct pci_device_id *ent) 6607 const struct pci_device_id *ent)
6605{ 6608{
6606 struct net_device *netdev; 6609 struct net_device *netdev;
6607 struct ixgbe_adapter *adapter = NULL; 6610 struct ixgbe_adapter *adapter = NULL;
@@ -6646,7 +6649,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6646 } 6649 }
6647 6650
6648 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 6651 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
6649 IORESOURCE_MEM), ixgbe_driver_name); 6652 IORESOURCE_MEM), ixgbe_driver_name);
6650 if (err) { 6653 if (err) {
6651 dev_err(&pdev->dev, 6654 dev_err(&pdev->dev,
6652 "pci_request_selected_regions failed 0x%x\n", err); 6655 "pci_request_selected_regions failed 0x%x\n", err);
@@ -6686,7 +6689,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6686 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 6689 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
6687 6690
6688 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 6691 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
6689 pci_resource_len(pdev, 0)); 6692 pci_resource_len(pdev, 0));
6690 if (!hw->hw_addr) { 6693 if (!hw->hw_addr) {
6691 err = -EIO; 6694 err = -EIO;
6692 goto err_ioremap; 6695 goto err_ioremap;
@@ -6740,7 +6743,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6740 6743
6741 /* a new SFP+ module arrival, called from GPI SDP2 context */ 6744 /* a new SFP+ module arrival, called from GPI SDP2 context */
6742 INIT_WORK(&adapter->sfp_config_module_task, 6745 INIT_WORK(&adapter->sfp_config_module_task,
6743 ixgbe_sfp_config_module_task); 6746 ixgbe_sfp_config_module_task);
6744 6747
6745 ii->get_invariants(hw); 6748 ii->get_invariants(hw);
6746 6749
@@ -6792,10 +6795,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6792 ixgbe_probe_vf(adapter, ii); 6795 ixgbe_probe_vf(adapter, ii);
6793 6796
6794 netdev->features = NETIF_F_SG | 6797 netdev->features = NETIF_F_SG |
6795 NETIF_F_IP_CSUM | 6798 NETIF_F_IP_CSUM |
6796 NETIF_F_HW_VLAN_TX | 6799 NETIF_F_HW_VLAN_TX |
6797 NETIF_F_HW_VLAN_RX | 6800 NETIF_F_HW_VLAN_RX |
6798 NETIF_F_HW_VLAN_FILTER; 6801 NETIF_F_HW_VLAN_FILTER;
6799 6802
6800 netdev->features |= NETIF_F_IPV6_CSUM; 6803 netdev->features |= NETIF_F_IPV6_CSUM;
6801 netdev->features |= NETIF_F_TSO; 6804 netdev->features |= NETIF_F_TSO;
@@ -6875,7 +6878,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6875 switch (pdev->device) { 6878 switch (pdev->device) {
6876 case IXGBE_DEV_ID_82599_KX4: 6879 case IXGBE_DEV_ID_82599_KX4:
6877 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | 6880 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
6878 IXGBE_WUFC_MC | IXGBE_WUFC_BC); 6881 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
6879 break; 6882 break;
6880 default: 6883 default:
6881 adapter->wol = 0; 6884 adapter->wol = 0;
@@ -6888,13 +6891,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6888 6891
6889 /* print bus type/speed/width info */ 6892 /* print bus type/speed/width info */
6890 e_dev_info("(PCI Express:%s:%s) %pM\n", 6893 e_dev_info("(PCI Express:%s:%s) %pM\n",
6891 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s": 6894 (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0Gb/s" :
6892 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"), 6895 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5Gb/s" :
6893 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 6896 "Unknown"),
6894 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 6897 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
6895 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 6898 hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
6896 "Unknown"), 6899 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
6897 netdev->dev_addr); 6900 "Unknown"),
6901 netdev->dev_addr);
6898 ixgbe_read_pba_num_generic(hw, &part_num); 6902 ixgbe_read_pba_num_generic(hw, &part_num);
6899 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 6903 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
6900 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, " 6904 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
@@ -6941,7 +6945,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6941 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task); 6945 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
6942 6946
6943 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) 6947 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
6944 INIT_WORK(&adapter->check_overtemp_task, ixgbe_check_overtemp_task); 6948 INIT_WORK(&adapter->check_overtemp_task,
6949 ixgbe_check_overtemp_task);
6945#ifdef CONFIG_IXGBE_DCA 6950#ifdef CONFIG_IXGBE_DCA
6946 if (dca_add_requester(&pdev->dev) == 0) { 6951 if (dca_add_requester(&pdev->dev) == 0) {
6947 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 6952 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
@@ -6977,8 +6982,8 @@ err_eeprom:
6977err_ioremap: 6982err_ioremap:
6978 free_netdev(netdev); 6983 free_netdev(netdev);
6979err_alloc_etherdev: 6984err_alloc_etherdev:
6980 pci_release_selected_regions(pdev, pci_select_bars(pdev, 6985 pci_release_selected_regions(pdev,
6981 IORESOURCE_MEM)); 6986 pci_select_bars(pdev, IORESOURCE_MEM));
6982err_pci_reg: 6987err_pci_reg:
6983err_dma: 6988err_dma:
6984 pci_disable_device(pdev); 6989 pci_disable_device(pdev);
@@ -7045,7 +7050,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7045 7050
7046 iounmap(adapter->hw.hw_addr); 7051 iounmap(adapter->hw.hw_addr);
7047 pci_release_selected_regions(pdev, pci_select_bars(pdev, 7052 pci_release_selected_regions(pdev, pci_select_bars(pdev,
7048 IORESOURCE_MEM)); 7053 IORESOURCE_MEM));
7049 7054
7050 e_dev_info("complete\n"); 7055 e_dev_info("complete\n");
7051 7056
@@ -7065,7 +7070,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7065 * this device has been detected. 7070 * this device has been detected.
7066 */ 7071 */
7067static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, 7072static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
7068 pci_channel_state_t state) 7073 pci_channel_state_t state)
7069{ 7074{
7070 struct net_device *netdev = pci_get_drvdata(pdev); 7075 struct net_device *netdev = pci_get_drvdata(pdev);
7071 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7076 struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -7201,12 +7206,12 @@ static void __exit ixgbe_exit_module(void)
7201 7206
7202#ifdef CONFIG_IXGBE_DCA 7207#ifdef CONFIG_IXGBE_DCA
7203static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, 7208static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
7204 void *p) 7209 void *p)
7205{ 7210{
7206 int ret_val; 7211 int ret_val;
7207 7212
7208 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, 7213 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
7209 __ixgbe_notify_dca); 7214 __ixgbe_notify_dca);
7210 7215
7211 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 7216 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
7212} 7217}