diff options
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 676 |
1 files changed, 529 insertions, 147 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 951b73cf5ca..45e3532b166 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -45,12 +45,13 @@ | |||
45 | #include "ixgbe.h" | 45 | #include "ixgbe.h" |
46 | #include "ixgbe_common.h" | 46 | #include "ixgbe_common.h" |
47 | #include "ixgbe_dcb_82599.h" | 47 | #include "ixgbe_dcb_82599.h" |
48 | #include "ixgbe_sriov.h" | ||
48 | 49 | ||
49 | char ixgbe_driver_name[] = "ixgbe"; | 50 | char ixgbe_driver_name[] = "ixgbe"; |
50 | static const char ixgbe_driver_string[] = | 51 | static const char ixgbe_driver_string[] = |
51 | "Intel(R) 10 Gigabit PCI Express Network Driver"; | 52 | "Intel(R) 10 Gigabit PCI Express Network Driver"; |
52 | 53 | ||
53 | #define DRV_VERSION "2.0.44-k2" | 54 | #define DRV_VERSION "2.0.62-k2" |
54 | const char ixgbe_driver_version[] = DRV_VERSION; | 55 | const char ixgbe_driver_version[] = DRV_VERSION; |
55 | static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; | 56 | static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; |
56 | 57 | ||
@@ -67,7 +68,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { | |||
67 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, | 68 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, |
68 | * Class, Class Mask, private data (not used) } | 69 | * Class, Class Mask, private data (not used) } |
69 | */ | 70 | */ |
70 | static struct pci_device_id ixgbe_pci_tbl[] = { | 71 | static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { |
71 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), | 72 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), |
72 | board_82598 }, | 73 | board_82598 }, |
73 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), | 74 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), |
@@ -124,6 +125,13 @@ static struct notifier_block dca_notifier = { | |||
124 | }; | 125 | }; |
125 | #endif | 126 | #endif |
126 | 127 | ||
128 | #ifdef CONFIG_PCI_IOV | ||
129 | static unsigned int max_vfs; | ||
130 | module_param(max_vfs, uint, 0); | ||
131 | MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " | ||
132 | "per physical function"); | ||
133 | #endif /* CONFIG_PCI_IOV */ | ||
134 | |||
127 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); | 135 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); |
128 | MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); | 136 | MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); |
129 | MODULE_LICENSE("GPL"); | 137 | MODULE_LICENSE("GPL"); |
@@ -131,6 +139,41 @@ MODULE_VERSION(DRV_VERSION); | |||
131 | 139 | ||
132 | #define DEFAULT_DEBUG_LEVEL_SHIFT 3 | 140 | #define DEFAULT_DEBUG_LEVEL_SHIFT 3 |
133 | 141 | ||
142 | static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) | ||
143 | { | ||
144 | struct ixgbe_hw *hw = &adapter->hw; | ||
145 | u32 gcr; | ||
146 | u32 gpie; | ||
147 | u32 vmdctl; | ||
148 | |||
149 | #ifdef CONFIG_PCI_IOV | ||
150 | /* disable iov and allow time for transactions to clear */ | ||
151 | pci_disable_sriov(adapter->pdev); | ||
152 | #endif | ||
153 | |||
154 | /* turn off device IOV mode */ | ||
155 | gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); | ||
156 | gcr &= ~(IXGBE_GCR_EXT_SRIOV); | ||
157 | IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr); | ||
158 | gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); | ||
159 | gpie &= ~IXGBE_GPIE_VTMODE_MASK; | ||
160 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | ||
161 | |||
162 | /* set default pool back to 0 */ | ||
163 | vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); | ||
164 | vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; | ||
165 | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); | ||
166 | |||
167 | /* take a breather then clean up driver data */ | ||
168 | msleep(100); | ||
169 | if (adapter->vfinfo) | ||
170 | kfree(adapter->vfinfo); | ||
171 | adapter->vfinfo = NULL; | ||
172 | |||
173 | adapter->num_vfs = 0; | ||
174 | adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; | ||
175 | } | ||
176 | |||
134 | static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) | 177 | static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) |
135 | { | 178 | { |
136 | u32 ctrl_ext; | 179 | u32 ctrl_ext; |
@@ -451,7 +494,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, | |||
451 | { | 494 | { |
452 | u32 rxctrl; | 495 | u32 rxctrl; |
453 | int cpu = get_cpu(); | 496 | int cpu = get_cpu(); |
454 | int q = rx_ring - adapter->rx_ring; | 497 | int q = rx_ring->reg_idx; |
455 | 498 | ||
456 | if (rx_ring->cpu != cpu) { | 499 | if (rx_ring->cpu != cpu) { |
457 | rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); | 500 | rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); |
@@ -479,7 +522,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, | |||
479 | { | 522 | { |
480 | u32 txctrl; | 523 | u32 txctrl; |
481 | int cpu = get_cpu(); | 524 | int cpu = get_cpu(); |
482 | int q = tx_ring - adapter->tx_ring; | 525 | int q = tx_ring->reg_idx; |
483 | struct ixgbe_hw *hw = &adapter->hw; | 526 | struct ixgbe_hw *hw = &adapter->hw; |
484 | 527 | ||
485 | if (tx_ring->cpu != cpu) { | 528 | if (tx_ring->cpu != cpu) { |
@@ -513,12 +556,12 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) | |||
513 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); | 556 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); |
514 | 557 | ||
515 | for (i = 0; i < adapter->num_tx_queues; i++) { | 558 | for (i = 0; i < adapter->num_tx_queues; i++) { |
516 | adapter->tx_ring[i].cpu = -1; | 559 | adapter->tx_ring[i]->cpu = -1; |
517 | ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]); | 560 | ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]); |
518 | } | 561 | } |
519 | for (i = 0; i < adapter->num_rx_queues; i++) { | 562 | for (i = 0; i < adapter->num_rx_queues; i++) { |
520 | adapter->rx_ring[i].cpu = -1; | 563 | adapter->rx_ring[i]->cpu = -1; |
521 | ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]); | 564 | ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]); |
522 | } | 565 | } |
523 | } | 566 | } |
524 | 567 | ||
@@ -775,6 +818,12 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, | |||
775 | return skb; | 818 | return skb; |
776 | } | 819 | } |
777 | 820 | ||
821 | struct ixgbe_rsc_cb { | ||
822 | dma_addr_t dma; | ||
823 | }; | ||
824 | |||
825 | #define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) | ||
826 | |||
778 | static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | 827 | static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, |
779 | struct ixgbe_ring *rx_ring, | 828 | struct ixgbe_ring *rx_ring, |
780 | int *work_done, int work_to_do) | 829 | int *work_done, int work_to_do) |
@@ -806,6 +855,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
806 | break; | 855 | break; |
807 | (*work_done)++; | 856 | (*work_done)++; |
808 | 857 | ||
858 | rmb(); /* read descriptor and rx_buffer_info after status DD */ | ||
809 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { | 859 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { |
810 | hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); | 860 | hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); |
811 | len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> | 861 | len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> |
@@ -823,9 +873,21 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
823 | rx_buffer_info->skb = NULL; | 873 | rx_buffer_info->skb = NULL; |
824 | 874 | ||
825 | if (rx_buffer_info->dma) { | 875 | if (rx_buffer_info->dma) { |
826 | pci_unmap_single(pdev, rx_buffer_info->dma, | 876 | if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && |
827 | rx_ring->rx_buf_len, | 877 | (!(staterr & IXGBE_RXD_STAT_EOP)) && |
828 | PCI_DMA_FROMDEVICE); | 878 | (!(skb->prev))) |
879 | /* | ||
880 | * When HWRSC is enabled, delay unmapping | ||
881 | * of the first packet. It carries the | ||
882 | * header information, HW may still | ||
883 | * access the header after the writeback. | ||
884 | * Only unmap it when EOP is reached | ||
885 | */ | ||
886 | IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; | ||
887 | else | ||
888 | pci_unmap_single(pdev, rx_buffer_info->dma, | ||
889 | rx_ring->rx_buf_len, | ||
890 | PCI_DMA_FROMDEVICE); | ||
829 | rx_buffer_info->dma = 0; | 891 | rx_buffer_info->dma = 0; |
830 | skb_put(skb, len); | 892 | skb_put(skb, len); |
831 | } | 893 | } |
@@ -873,6 +935,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
873 | if (skb->prev) | 935 | if (skb->prev) |
874 | skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); | 936 | skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); |
875 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { | 937 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { |
938 | if (IXGBE_RSC_CB(skb)->dma) | ||
939 | pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma, | ||
940 | rx_ring->rx_buf_len, | ||
941 | PCI_DMA_FROMDEVICE); | ||
876 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) | 942 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) |
877 | rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; | 943 | rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; |
878 | else | 944 | else |
@@ -989,7 +1055,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
989 | adapter->num_rx_queues); | 1055 | adapter->num_rx_queues); |
990 | 1056 | ||
991 | for (i = 0; i < q_vector->rxr_count; i++) { | 1057 | for (i = 0; i < q_vector->rxr_count; i++) { |
992 | j = adapter->rx_ring[r_idx].reg_idx; | 1058 | j = adapter->rx_ring[r_idx]->reg_idx; |
993 | ixgbe_set_ivar(adapter, 0, j, v_idx); | 1059 | ixgbe_set_ivar(adapter, 0, j, v_idx); |
994 | r_idx = find_next_bit(q_vector->rxr_idx, | 1060 | r_idx = find_next_bit(q_vector->rxr_idx, |
995 | adapter->num_rx_queues, | 1061 | adapter->num_rx_queues, |
@@ -999,7 +1065,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
999 | adapter->num_tx_queues); | 1065 | adapter->num_tx_queues); |
1000 | 1066 | ||
1001 | for (i = 0; i < q_vector->txr_count; i++) { | 1067 | for (i = 0; i < q_vector->txr_count; i++) { |
1002 | j = adapter->tx_ring[r_idx].reg_idx; | 1068 | j = adapter->tx_ring[r_idx]->reg_idx; |
1003 | ixgbe_set_ivar(adapter, 1, j, v_idx); | 1069 | ixgbe_set_ivar(adapter, 1, j, v_idx); |
1004 | r_idx = find_next_bit(q_vector->txr_idx, | 1070 | r_idx = find_next_bit(q_vector->txr_idx, |
1005 | adapter->num_tx_queues, | 1071 | adapter->num_tx_queues, |
@@ -1025,7 +1091,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
1025 | 1091 | ||
1026 | /* set up to autoclear timer, and the vectors */ | 1092 | /* set up to autoclear timer, and the vectors */ |
1027 | mask = IXGBE_EIMS_ENABLE_MASK; | 1093 | mask = IXGBE_EIMS_ENABLE_MASK; |
1028 | mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); | 1094 | if (adapter->num_vfs) |
1095 | mask &= ~(IXGBE_EIMS_OTHER | | ||
1096 | IXGBE_EIMS_MAILBOX | | ||
1097 | IXGBE_EIMS_LSC); | ||
1098 | else | ||
1099 | mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); | ||
1029 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); | 1100 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); |
1030 | } | 1101 | } |
1031 | 1102 | ||
@@ -1134,7 +1205,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) | |||
1134 | 1205 | ||
1135 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | 1206 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
1136 | for (i = 0; i < q_vector->txr_count; i++) { | 1207 | for (i = 0; i < q_vector->txr_count; i++) { |
1137 | tx_ring = &(adapter->tx_ring[r_idx]); | 1208 | tx_ring = adapter->tx_ring[r_idx]; |
1138 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, | 1209 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, |
1139 | q_vector->tx_itr, | 1210 | q_vector->tx_itr, |
1140 | tx_ring->total_packets, | 1211 | tx_ring->total_packets, |
@@ -1149,7 +1220,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) | |||
1149 | 1220 | ||
1150 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 1221 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1151 | for (i = 0; i < q_vector->rxr_count; i++) { | 1222 | for (i = 0; i < q_vector->rxr_count; i++) { |
1152 | rx_ring = &(adapter->rx_ring[r_idx]); | 1223 | rx_ring = adapter->rx_ring[r_idx]; |
1153 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, | 1224 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, |
1154 | q_vector->rx_itr, | 1225 | q_vector->rx_itr, |
1155 | rx_ring->total_packets, | 1226 | rx_ring->total_packets, |
@@ -1254,6 +1325,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) | |||
1254 | if (eicr & IXGBE_EICR_LSC) | 1325 | if (eicr & IXGBE_EICR_LSC) |
1255 | ixgbe_check_lsc(adapter); | 1326 | ixgbe_check_lsc(adapter); |
1256 | 1327 | ||
1328 | if (eicr & IXGBE_EICR_MAILBOX) | ||
1329 | ixgbe_msg_task(adapter); | ||
1330 | |||
1257 | if (hw->mac.type == ixgbe_mac_82598EB) | 1331 | if (hw->mac.type == ixgbe_mac_82598EB) |
1258 | ixgbe_check_fan_failure(adapter, eicr); | 1332 | ixgbe_check_fan_failure(adapter, eicr); |
1259 | 1333 | ||
@@ -1268,7 +1342,7 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) | |||
1268 | netif_tx_stop_all_queues(netdev); | 1342 | netif_tx_stop_all_queues(netdev); |
1269 | for (i = 0; i < adapter->num_tx_queues; i++) { | 1343 | for (i = 0; i < adapter->num_tx_queues; i++) { |
1270 | struct ixgbe_ring *tx_ring = | 1344 | struct ixgbe_ring *tx_ring = |
1271 | &adapter->tx_ring[i]; | 1345 | adapter->tx_ring[i]; |
1272 | if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, | 1346 | if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, |
1273 | &tx_ring->reinit_state)) | 1347 | &tx_ring->reinit_state)) |
1274 | schedule_work(&adapter->fdir_reinit_task); | 1348 | schedule_work(&adapter->fdir_reinit_task); |
@@ -1327,7 +1401,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) | |||
1327 | 1401 | ||
1328 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | 1402 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
1329 | for (i = 0; i < q_vector->txr_count; i++) { | 1403 | for (i = 0; i < q_vector->txr_count; i++) { |
1330 | tx_ring = &(adapter->tx_ring[r_idx]); | 1404 | tx_ring = adapter->tx_ring[r_idx]; |
1331 | tx_ring->total_bytes = 0; | 1405 | tx_ring->total_bytes = 0; |
1332 | tx_ring->total_packets = 0; | 1406 | tx_ring->total_packets = 0; |
1333 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, | 1407 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
@@ -1355,7 +1429,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) | |||
1355 | 1429 | ||
1356 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 1430 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1357 | for (i = 0; i < q_vector->rxr_count; i++) { | 1431 | for (i = 0; i < q_vector->rxr_count; i++) { |
1358 | rx_ring = &(adapter->rx_ring[r_idx]); | 1432 | rx_ring = adapter->rx_ring[r_idx]; |
1359 | rx_ring->total_bytes = 0; | 1433 | rx_ring->total_bytes = 0; |
1360 | rx_ring->total_packets = 0; | 1434 | rx_ring->total_packets = 0; |
1361 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, | 1435 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, |
@@ -1385,7 +1459,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) | |||
1385 | 1459 | ||
1386 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | 1460 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
1387 | for (i = 0; i < q_vector->txr_count; i++) { | 1461 | for (i = 0; i < q_vector->txr_count; i++) { |
1388 | ring = &(adapter->tx_ring[r_idx]); | 1462 | ring = adapter->tx_ring[r_idx]; |
1389 | ring->total_bytes = 0; | 1463 | ring->total_bytes = 0; |
1390 | ring->total_packets = 0; | 1464 | ring->total_packets = 0; |
1391 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, | 1465 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
@@ -1394,7 +1468,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) | |||
1394 | 1468 | ||
1395 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 1469 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1396 | for (i = 0; i < q_vector->rxr_count; i++) { | 1470 | for (i = 0; i < q_vector->rxr_count; i++) { |
1397 | ring = &(adapter->rx_ring[r_idx]); | 1471 | ring = adapter->rx_ring[r_idx]; |
1398 | ring->total_bytes = 0; | 1472 | ring->total_bytes = 0; |
1399 | ring->total_packets = 0; | 1473 | ring->total_packets = 0; |
1400 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, | 1474 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, |
@@ -1425,7 +1499,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) | |||
1425 | long r_idx; | 1499 | long r_idx; |
1426 | 1500 | ||
1427 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 1501 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1428 | rx_ring = &(adapter->rx_ring[r_idx]); | 1502 | rx_ring = adapter->rx_ring[r_idx]; |
1429 | #ifdef CONFIG_IXGBE_DCA | 1503 | #ifdef CONFIG_IXGBE_DCA |
1430 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | 1504 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
1431 | ixgbe_update_rx_dca(adapter, rx_ring); | 1505 | ixgbe_update_rx_dca(adapter, rx_ring); |
@@ -1466,7 +1540,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) | |||
1466 | 1540 | ||
1467 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | 1541 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
1468 | for (i = 0; i < q_vector->txr_count; i++) { | 1542 | for (i = 0; i < q_vector->txr_count; i++) { |
1469 | ring = &(adapter->tx_ring[r_idx]); | 1543 | ring = adapter->tx_ring[r_idx]; |
1470 | #ifdef CONFIG_IXGBE_DCA | 1544 | #ifdef CONFIG_IXGBE_DCA |
1471 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | 1545 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
1472 | ixgbe_update_tx_dca(adapter, ring); | 1546 | ixgbe_update_tx_dca(adapter, ring); |
@@ -1482,7 +1556,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) | |||
1482 | budget = max(budget, 1); | 1556 | budget = max(budget, 1); |
1483 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 1557 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1484 | for (i = 0; i < q_vector->rxr_count; i++) { | 1558 | for (i = 0; i < q_vector->rxr_count; i++) { |
1485 | ring = &(adapter->rx_ring[r_idx]); | 1559 | ring = adapter->rx_ring[r_idx]; |
1486 | #ifdef CONFIG_IXGBE_DCA | 1560 | #ifdef CONFIG_IXGBE_DCA |
1487 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | 1561 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
1488 | ixgbe_update_rx_dca(adapter, ring); | 1562 | ixgbe_update_rx_dca(adapter, ring); |
@@ -1493,7 +1567,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) | |||
1493 | } | 1567 | } |
1494 | 1568 | ||
1495 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 1569 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1496 | ring = &(adapter->rx_ring[r_idx]); | 1570 | ring = adapter->rx_ring[r_idx]; |
1497 | /* If all Rx work done, exit the polling mode */ | 1571 | /* If all Rx work done, exit the polling mode */ |
1498 | if (work_done < budget) { | 1572 | if (work_done < budget) { |
1499 | napi_complete(napi); | 1573 | napi_complete(napi); |
@@ -1526,7 +1600,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) | |||
1526 | long r_idx; | 1600 | long r_idx; |
1527 | 1601 | ||
1528 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | 1602 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
1529 | tx_ring = &(adapter->tx_ring[r_idx]); | 1603 | tx_ring = adapter->tx_ring[r_idx]; |
1530 | #ifdef CONFIG_IXGBE_DCA | 1604 | #ifdef CONFIG_IXGBE_DCA |
1531 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | 1605 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
1532 | ixgbe_update_tx_dca(adapter, tx_ring); | 1606 | ixgbe_update_tx_dca(adapter, tx_ring); |
@@ -1711,8 +1785,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter) | |||
1711 | struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; | 1785 | struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; |
1712 | u8 current_itr; | 1786 | u8 current_itr; |
1713 | u32 new_itr = q_vector->eitr; | 1787 | u32 new_itr = q_vector->eitr; |
1714 | struct ixgbe_ring *rx_ring = &adapter->rx_ring[0]; | 1788 | struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; |
1715 | struct ixgbe_ring *tx_ring = &adapter->tx_ring[0]; | 1789 | struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; |
1716 | 1790 | ||
1717 | q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, | 1791 | q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, |
1718 | q_vector->tx_itr, | 1792 | q_vector->tx_itr, |
@@ -1768,6 +1842,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) | |||
1768 | mask |= IXGBE_EIMS_ECC; | 1842 | mask |= IXGBE_EIMS_ECC; |
1769 | mask |= IXGBE_EIMS_GPI_SDP1; | 1843 | mask |= IXGBE_EIMS_GPI_SDP1; |
1770 | mask |= IXGBE_EIMS_GPI_SDP2; | 1844 | mask |= IXGBE_EIMS_GPI_SDP2; |
1845 | if (adapter->num_vfs) | ||
1846 | mask |= IXGBE_EIMS_MAILBOX; | ||
1771 | } | 1847 | } |
1772 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | 1848 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || |
1773 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | 1849 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) |
@@ -1776,6 +1852,11 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) | |||
1776 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); | 1852 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); |
1777 | ixgbe_irq_enable_queues(adapter, ~0); | 1853 | ixgbe_irq_enable_queues(adapter, ~0); |
1778 | IXGBE_WRITE_FLUSH(&adapter->hw); | 1854 | IXGBE_WRITE_FLUSH(&adapter->hw); |
1855 | |||
1856 | if (adapter->num_vfs > 32) { | ||
1857 | u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; | ||
1858 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); | ||
1859 | } | ||
1779 | } | 1860 | } |
1780 | 1861 | ||
1781 | /** | 1862 | /** |
@@ -1817,10 +1898,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data) | |||
1817 | ixgbe_check_fan_failure(adapter, eicr); | 1898 | ixgbe_check_fan_failure(adapter, eicr); |
1818 | 1899 | ||
1819 | if (napi_schedule_prep(&(q_vector->napi))) { | 1900 | if (napi_schedule_prep(&(q_vector->napi))) { |
1820 | adapter->tx_ring[0].total_packets = 0; | 1901 | adapter->tx_ring[0]->total_packets = 0; |
1821 | adapter->tx_ring[0].total_bytes = 0; | 1902 | adapter->tx_ring[0]->total_bytes = 0; |
1822 | adapter->rx_ring[0].total_packets = 0; | 1903 | adapter->rx_ring[0]->total_packets = 0; |
1823 | adapter->rx_ring[0].total_bytes = 0; | 1904 | adapter->rx_ring[0]->total_bytes = 0; |
1824 | /* would disable interrupts here but EIAM disabled it */ | 1905 | /* would disable interrupts here but EIAM disabled it */ |
1825 | __napi_schedule(&(q_vector->napi)); | 1906 | __napi_schedule(&(q_vector->napi)); |
1826 | } | 1907 | } |
@@ -1905,6 +1986,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) | |||
1905 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); | 1986 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); |
1906 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); | 1987 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); |
1907 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); | 1988 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); |
1989 | if (adapter->num_vfs > 32) | ||
1990 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); | ||
1908 | } | 1991 | } |
1909 | IXGBE_WRITE_FLUSH(&adapter->hw); | 1992 | IXGBE_WRITE_FLUSH(&adapter->hw); |
1910 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 1993 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
@@ -1950,7 +2033,7 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) | |||
1950 | 2033 | ||
1951 | /* Setup the HW Tx Head and Tail descriptor pointers */ | 2034 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
1952 | for (i = 0; i < adapter->num_tx_queues; i++) { | 2035 | for (i = 0; i < adapter->num_tx_queues; i++) { |
1953 | struct ixgbe_ring *ring = &adapter->tx_ring[i]; | 2036 | struct ixgbe_ring *ring = adapter->tx_ring[i]; |
1954 | j = ring->reg_idx; | 2037 | j = ring->reg_idx; |
1955 | tdba = ring->dma; | 2038 | tdba = ring->dma; |
1956 | tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); | 2039 | tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); |
@@ -1960,8 +2043,8 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) | |||
1960 | IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); | 2043 | IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); |
1961 | IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); | 2044 | IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); |
1962 | IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); | 2045 | IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); |
1963 | adapter->tx_ring[i].head = IXGBE_TDH(j); | 2046 | adapter->tx_ring[i]->head = IXGBE_TDH(j); |
1964 | adapter->tx_ring[i].tail = IXGBE_TDT(j); | 2047 | adapter->tx_ring[i]->tail = IXGBE_TDT(j); |
1965 | /* | 2048 | /* |
1966 | * Disable Tx Head Writeback RO bit, since this hoses | 2049 | * Disable Tx Head Writeback RO bit, since this hoses |
1967 | * bookkeeping if things aren't delivered in order. | 2050 | * bookkeeping if things aren't delivered in order. |
@@ -1989,18 +2072,32 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) | |||
1989 | 2072 | ||
1990 | if (hw->mac.type == ixgbe_mac_82599EB) { | 2073 | if (hw->mac.type == ixgbe_mac_82599EB) { |
1991 | u32 rttdcs; | 2074 | u32 rttdcs; |
2075 | u32 mask; | ||
1992 | 2076 | ||
1993 | /* disable the arbiter while setting MTQC */ | 2077 | /* disable the arbiter while setting MTQC */ |
1994 | rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); | 2078 | rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); |
1995 | rttdcs |= IXGBE_RTTDCS_ARBDIS; | 2079 | rttdcs |= IXGBE_RTTDCS_ARBDIS; |
1996 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); | 2080 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); |
1997 | 2081 | ||
1998 | /* We enable 8 traffic classes, DCB only */ | 2082 | /* set transmit pool layout */ |
1999 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) | 2083 | mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED); |
2000 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA | | 2084 | switch (adapter->flags & mask) { |
2001 | IXGBE_MTQC_8TC_8TQ)); | 2085 | |
2002 | else | 2086 | case (IXGBE_FLAG_SRIOV_ENABLED): |
2087 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, | ||
2088 | (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); | ||
2089 | break; | ||
2090 | |||
2091 | case (IXGBE_FLAG_DCB_ENABLED): | ||
2092 | /* We enable 8 traffic classes, DCB only */ | ||
2093 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, | ||
2094 | (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ)); | ||
2095 | break; | ||
2096 | |||
2097 | default: | ||
2003 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); | 2098 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); |
2099 | break; | ||
2100 | } | ||
2004 | 2101 | ||
2005 | /* re-eable the arbiter */ | 2102 | /* re-eable the arbiter */ |
2006 | rttdcs &= ~IXGBE_RTTDCS_ARBDIS; | 2103 | rttdcs &= ~IXGBE_RTTDCS_ARBDIS; |
@@ -2059,12 +2156,16 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | |||
2059 | #ifdef CONFIG_IXGBE_DCB | 2156 | #ifdef CONFIG_IXGBE_DCB |
2060 | | IXGBE_FLAG_DCB_ENABLED | 2157 | | IXGBE_FLAG_DCB_ENABLED |
2061 | #endif | 2158 | #endif |
2159 | | IXGBE_FLAG_SRIOV_ENABLED | ||
2062 | ); | 2160 | ); |
2063 | 2161 | ||
2064 | switch (mask) { | 2162 | switch (mask) { |
2065 | case (IXGBE_FLAG_RSS_ENABLED): | 2163 | case (IXGBE_FLAG_RSS_ENABLED): |
2066 | mrqc = IXGBE_MRQC_RSSEN; | 2164 | mrqc = IXGBE_MRQC_RSSEN; |
2067 | break; | 2165 | break; |
2166 | case (IXGBE_FLAG_SRIOV_ENABLED): | ||
2167 | mrqc = IXGBE_MRQC_VMDQEN; | ||
2168 | break; | ||
2068 | #ifdef CONFIG_IXGBE_DCB | 2169 | #ifdef CONFIG_IXGBE_DCB |
2069 | case (IXGBE_FLAG_DCB_ENABLED): | 2170 | case (IXGBE_FLAG_DCB_ENABLED): |
2070 | mrqc = IXGBE_MRQC_RT8TCEN; | 2171 | mrqc = IXGBE_MRQC_RT8TCEN; |
@@ -2090,7 +2191,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index) | |||
2090 | u32 rscctrl; | 2191 | u32 rscctrl; |
2091 | int rx_buf_len; | 2192 | int rx_buf_len; |
2092 | 2193 | ||
2093 | rx_ring = &adapter->rx_ring[index]; | 2194 | rx_ring = adapter->rx_ring[index]; |
2094 | j = rx_ring->reg_idx; | 2195 | j = rx_ring->reg_idx; |
2095 | rx_buf_len = rx_ring->rx_buf_len; | 2196 | rx_buf_len = rx_ring->rx_buf_len; |
2096 | rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j)); | 2197 | rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j)); |
@@ -2145,7 +2246,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2145 | int rx_buf_len; | 2246 | int rx_buf_len; |
2146 | 2247 | ||
2147 | /* Decide whether to use packet split mode or not */ | 2248 | /* Decide whether to use packet split mode or not */ |
2148 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; | 2249 | /* Do not use packet split if we're in SR-IOV Mode */ |
2250 | if (!adapter->num_vfs) | ||
2251 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; | ||
2149 | 2252 | ||
2150 | /* Set the RX buffer length according to the mode */ | 2253 | /* Set the RX buffer length according to the mode */ |
2151 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | 2254 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { |
@@ -2157,7 +2260,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2157 | IXGBE_PSRTYPE_IPV4HDR | | 2260 | IXGBE_PSRTYPE_IPV4HDR | |
2158 | IXGBE_PSRTYPE_IPV6HDR | | 2261 | IXGBE_PSRTYPE_IPV6HDR | |
2159 | IXGBE_PSRTYPE_L2HDR; | 2262 | IXGBE_PSRTYPE_L2HDR; |
2160 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); | 2263 | IXGBE_WRITE_REG(hw, |
2264 | IXGBE_PSRTYPE(adapter->num_vfs), | ||
2265 | psrtype); | ||
2161 | } | 2266 | } |
2162 | } else { | 2267 | } else { |
2163 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && | 2268 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && |
@@ -2184,7 +2289,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2184 | #endif | 2289 | #endif |
2185 | IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); | 2290 | IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); |
2186 | 2291 | ||
2187 | rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); | 2292 | rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc); |
2188 | /* disable receives while setting up the descriptors */ | 2293 | /* disable receives while setting up the descriptors */ |
2189 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | 2294 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
2190 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); | 2295 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); |
@@ -2194,7 +2299,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2194 | * the Base and Length of the Rx Descriptor Ring | 2299 | * the Base and Length of the Rx Descriptor Ring |
2195 | */ | 2300 | */ |
2196 | for (i = 0; i < adapter->num_rx_queues; i++) { | 2301 | for (i = 0; i < adapter->num_rx_queues; i++) { |
2197 | rx_ring = &adapter->rx_ring[i]; | 2302 | rx_ring = adapter->rx_ring[i]; |
2198 | rdba = rx_ring->dma; | 2303 | rdba = rx_ring->dma; |
2199 | j = rx_ring->reg_idx; | 2304 | j = rx_ring->reg_idx; |
2200 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32))); | 2305 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32))); |
@@ -2243,6 +2348,30 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2243 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); | 2348 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); |
2244 | } | 2349 | } |
2245 | 2350 | ||
2351 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | ||
2352 | u32 vt_reg_bits; | ||
2353 | u32 reg_offset, vf_shift; | ||
2354 | u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); | ||
2355 | vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | ||
2356 | | IXGBE_VT_CTL_REPLEN; | ||
2357 | vt_reg_bits |= (adapter->num_vfs << | ||
2358 | IXGBE_VT_CTL_POOL_SHIFT); | ||
2359 | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits); | ||
2360 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0); | ||
2361 | |||
2362 | vf_shift = adapter->num_vfs % 32; | ||
2363 | reg_offset = adapter->num_vfs / 32; | ||
2364 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0); | ||
2365 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); | ||
2366 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0); | ||
2367 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0); | ||
2368 | /* Enable only the PF's pool for Tx/Rx */ | ||
2369 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); | ||
2370 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); | ||
2371 | IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); | ||
2372 | ixgbe_set_vmolr(hw, adapter->num_vfs); | ||
2373 | } | ||
2374 | |||
2246 | /* Program MRQC for the distribution of queues */ | 2375 | /* Program MRQC for the distribution of queues */ |
2247 | mrqc = ixgbe_setup_mrqc(adapter); | 2376 | mrqc = ixgbe_setup_mrqc(adapter); |
2248 | 2377 | ||
@@ -2274,6 +2403,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2274 | } | 2403 | } |
2275 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); | 2404 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); |
2276 | 2405 | ||
2406 | if (adapter->num_vfs) { | ||
2407 | u32 reg; | ||
2408 | |||
2409 | /* Map PF MAC address in RAR Entry 0 to first pool | ||
2410 | * following VFs */ | ||
2411 | hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); | ||
2412 | |||
2413 | /* Set up VF register offsets for selected VT Mode, i.e. | ||
2414 | * 64 VFs for SR-IOV */ | ||
2415 | reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); | ||
2416 | reg |= IXGBE_GCR_EXT_SRIOV; | ||
2417 | IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg); | ||
2418 | } | ||
2419 | |||
2277 | rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); | 2420 | rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); |
2278 | 2421 | ||
2279 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED || | 2422 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED || |
@@ -2312,15 +2455,17 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | |||
2312 | { | 2455 | { |
2313 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 2456 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
2314 | struct ixgbe_hw *hw = &adapter->hw; | 2457 | struct ixgbe_hw *hw = &adapter->hw; |
2458 | int pool_ndx = adapter->num_vfs; | ||
2315 | 2459 | ||
2316 | /* add VID to filter table */ | 2460 | /* add VID to filter table */ |
2317 | hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true); | 2461 | hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true); |
2318 | } | 2462 | } |
2319 | 2463 | ||
2320 | static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | 2464 | static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) |
2321 | { | 2465 | { |
2322 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 2466 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
2323 | struct ixgbe_hw *hw = &adapter->hw; | 2467 | struct ixgbe_hw *hw = &adapter->hw; |
2468 | int pool_ndx = adapter->num_vfs; | ||
2324 | 2469 | ||
2325 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 2470 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
2326 | ixgbe_irq_disable(adapter); | 2471 | ixgbe_irq_disable(adapter); |
@@ -2331,7 +2476,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
2331 | ixgbe_irq_enable(adapter); | 2476 | ixgbe_irq_enable(adapter); |
2332 | 2477 | ||
2333 | /* remove VID from filter table */ | 2478 | /* remove VID from filter table */ |
2334 | hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false); | 2479 | hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); |
2335 | } | 2480 | } |
2336 | 2481 | ||
2337 | static void ixgbe_vlan_rx_register(struct net_device *netdev, | 2482 | static void ixgbe_vlan_rx_register(struct net_device *netdev, |
@@ -2361,7 +2506,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev, | |||
2361 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 2506 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
2362 | for (i = 0; i < adapter->num_rx_queues; i++) { | 2507 | for (i = 0; i < adapter->num_rx_queues; i++) { |
2363 | u32 ctrl; | 2508 | u32 ctrl; |
2364 | j = adapter->rx_ring[i].reg_idx; | 2509 | j = adapter->rx_ring[i]->reg_idx; |
2365 | ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j)); | 2510 | ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j)); |
2366 | ctrl |= IXGBE_RXDCTL_VME; | 2511 | ctrl |= IXGBE_RXDCTL_VME; |
2367 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl); | 2512 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl); |
@@ -2414,7 +2559,7 @@ static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq) | |||
2414 | * responsible for configuring the hardware for proper unicast, multicast and | 2559 | * responsible for configuring the hardware for proper unicast, multicast and |
2415 | * promiscuous mode. | 2560 | * promiscuous mode. |
2416 | **/ | 2561 | **/ |
2417 | static void ixgbe_set_rx_mode(struct net_device *netdev) | 2562 | void ixgbe_set_rx_mode(struct net_device *netdev) |
2418 | { | 2563 | { |
2419 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 2564 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
2420 | struct ixgbe_hw *hw = &adapter->hw; | 2565 | struct ixgbe_hw *hw = &adapter->hw; |
@@ -2446,14 +2591,16 @@ static void ixgbe_set_rx_mode(struct net_device *netdev) | |||
2446 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | 2591 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
2447 | 2592 | ||
2448 | /* reprogram secondary unicast list */ | 2593 | /* reprogram secondary unicast list */ |
2449 | hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list); | 2594 | hw->mac.ops.update_uc_addr_list(hw, netdev); |
2450 | 2595 | ||
2451 | /* reprogram multicast list */ | 2596 | /* reprogram multicast list */ |
2452 | addr_count = netdev->mc_count; | 2597 | addr_count = netdev_mc_count(netdev); |
2453 | if (addr_count) | 2598 | if (addr_count) |
2454 | addr_list = netdev->mc_list->dmi_addr; | 2599 | addr_list = netdev->mc_list->dmi_addr; |
2455 | hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, | 2600 | hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, |
2456 | ixgbe_addr_list_itr); | 2601 | ixgbe_addr_list_itr); |
2602 | if (adapter->num_vfs) | ||
2603 | ixgbe_restore_vf_multicasts(adapter); | ||
2457 | } | 2604 | } |
2458 | 2605 | ||
2459 | static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) | 2606 | static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) |
@@ -2522,7 +2669,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | |||
2522 | ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg); | 2669 | ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg); |
2523 | 2670 | ||
2524 | for (i = 0; i < adapter->num_tx_queues; i++) { | 2671 | for (i = 0; i < adapter->num_tx_queues; i++) { |
2525 | j = adapter->tx_ring[i].reg_idx; | 2672 | j = adapter->tx_ring[i]->reg_idx; |
2526 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | 2673 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); |
2527 | /* PThresh workaround for Tx hang with DFP enabled. */ | 2674 | /* PThresh workaround for Tx hang with DFP enabled. */ |
2528 | txdctl |= 32; | 2675 | txdctl |= 32; |
@@ -2539,7 +2686,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | |||
2539 | vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; | 2686 | vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; |
2540 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | 2687 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
2541 | for (i = 0; i < adapter->num_rx_queues; i++) { | 2688 | for (i = 0; i < adapter->num_rx_queues; i++) { |
2542 | j = adapter->rx_ring[i].reg_idx; | 2689 | j = adapter->rx_ring[i]->reg_idx; |
2543 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); | 2690 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); |
2544 | vlnctrl |= IXGBE_RXDCTL_VME; | 2691 | vlnctrl |= IXGBE_RXDCTL_VME; |
2545 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); | 2692 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); |
@@ -2579,7 +2726,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) | |||
2579 | #endif /* IXGBE_FCOE */ | 2726 | #endif /* IXGBE_FCOE */ |
2580 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | 2727 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { |
2581 | for (i = 0; i < adapter->num_tx_queues; i++) | 2728 | for (i = 0; i < adapter->num_tx_queues; i++) |
2582 | adapter->tx_ring[i].atr_sample_rate = | 2729 | adapter->tx_ring[i]->atr_sample_rate = |
2583 | adapter->atr_sample_rate; | 2730 | adapter->atr_sample_rate; |
2584 | ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); | 2731 | ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); |
2585 | } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { | 2732 | } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { |
@@ -2589,8 +2736,8 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) | |||
2589 | ixgbe_configure_tx(adapter); | 2736 | ixgbe_configure_tx(adapter); |
2590 | ixgbe_configure_rx(adapter); | 2737 | ixgbe_configure_rx(adapter); |
2591 | for (i = 0; i < adapter->num_rx_queues; i++) | 2738 | for (i = 0; i < adapter->num_rx_queues; i++) |
2592 | ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i], | 2739 | ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i], |
2593 | (adapter->rx_ring[i].count - 1)); | 2740 | (adapter->rx_ring[i]->count - 1)); |
2594 | } | 2741 | } |
2595 | 2742 | ||
2596 | static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) | 2743 | static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) |
@@ -2673,7 +2820,7 @@ link_cfg_out: | |||
2673 | static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, | 2820 | static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, |
2674 | int rxr) | 2821 | int rxr) |
2675 | { | 2822 | { |
2676 | int j = adapter->rx_ring[rxr].reg_idx; | 2823 | int j = adapter->rx_ring[rxr]->reg_idx; |
2677 | int k; | 2824 | int k; |
2678 | 2825 | ||
2679 | for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { | 2826 | for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { |
@@ -2687,8 +2834,8 @@ static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, | |||
2687 | DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d " | 2834 | DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d " |
2688 | "not set within the polling period\n", rxr); | 2835 | "not set within the polling period\n", rxr); |
2689 | } | 2836 | } |
2690 | ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], | 2837 | ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr], |
2691 | (adapter->rx_ring[rxr].count - 1)); | 2838 | (adapter->rx_ring[rxr]->count - 1)); |
2692 | } | 2839 | } |
2693 | 2840 | ||
2694 | static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | 2841 | static int ixgbe_up_complete(struct ixgbe_adapter *adapter) |
@@ -2702,6 +2849,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2702 | u32 txdctl, rxdctl, mhadd; | 2849 | u32 txdctl, rxdctl, mhadd; |
2703 | u32 dmatxctl; | 2850 | u32 dmatxctl; |
2704 | u32 gpie; | 2851 | u32 gpie; |
2852 | u32 ctrl_ext; | ||
2705 | 2853 | ||
2706 | ixgbe_get_hw_control(adapter); | 2854 | ixgbe_get_hw_control(adapter); |
2707 | 2855 | ||
@@ -2714,6 +2862,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2714 | /* MSI only */ | 2862 | /* MSI only */ |
2715 | gpie = 0; | 2863 | gpie = 0; |
2716 | } | 2864 | } |
2865 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | ||
2866 | gpie &= ~IXGBE_GPIE_VTMODE_MASK; | ||
2867 | gpie |= IXGBE_GPIE_VTMODE_64; | ||
2868 | } | ||
2717 | /* XXX: to interrupt immediately for EICS writes, enable this */ | 2869 | /* XXX: to interrupt immediately for EICS writes, enable this */ |
2718 | /* gpie |= IXGBE_GPIE_EIMEN; */ | 2870 | /* gpie |= IXGBE_GPIE_EIMEN; */ |
2719 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | 2871 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); |
@@ -2770,7 +2922,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2770 | } | 2922 | } |
2771 | 2923 | ||
2772 | for (i = 0; i < adapter->num_tx_queues; i++) { | 2924 | for (i = 0; i < adapter->num_tx_queues; i++) { |
2773 | j = adapter->tx_ring[i].reg_idx; | 2925 | j = adapter->tx_ring[i]->reg_idx; |
2774 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | 2926 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); |
2775 | /* enable WTHRESH=8 descriptors, to encourage burst writeback */ | 2927 | /* enable WTHRESH=8 descriptors, to encourage burst writeback */ |
2776 | txdctl |= (8 << 16); | 2928 | txdctl |= (8 << 16); |
@@ -2784,14 +2936,26 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2784 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); | 2936 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); |
2785 | } | 2937 | } |
2786 | for (i = 0; i < adapter->num_tx_queues; i++) { | 2938 | for (i = 0; i < adapter->num_tx_queues; i++) { |
2787 | j = adapter->tx_ring[i].reg_idx; | 2939 | j = adapter->tx_ring[i]->reg_idx; |
2788 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | 2940 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); |
2789 | txdctl |= IXGBE_TXDCTL_ENABLE; | 2941 | txdctl |= IXGBE_TXDCTL_ENABLE; |
2790 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); | 2942 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); |
2943 | if (hw->mac.type == ixgbe_mac_82599EB) { | ||
2944 | int wait_loop = 10; | ||
2945 | /* poll for Tx Enable ready */ | ||
2946 | do { | ||
2947 | msleep(1); | ||
2948 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | ||
2949 | } while (--wait_loop && | ||
2950 | !(txdctl & IXGBE_TXDCTL_ENABLE)); | ||
2951 | if (!wait_loop) | ||
2952 | DPRINTK(DRV, ERR, "Could not enable " | ||
2953 | "Tx Queue %d\n", j); | ||
2954 | } | ||
2791 | } | 2955 | } |
2792 | 2956 | ||
2793 | for (i = 0; i < num_rx_rings; i++) { | 2957 | for (i = 0; i < num_rx_rings; i++) { |
2794 | j = adapter->rx_ring[i].reg_idx; | 2958 | j = adapter->rx_ring[i]->reg_idx; |
2795 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); | 2959 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); |
2796 | /* enable PTHRESH=32 descriptors (half the internal cache) | 2960 | /* enable PTHRESH=32 descriptors (half the internal cache) |
2797 | * and HTHRESH=0 descriptors (to minimize latency on fetch), | 2961 | * and HTHRESH=0 descriptors (to minimize latency on fetch), |
@@ -2865,7 +3029,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2865 | 3029 | ||
2866 | for (i = 0; i < adapter->num_tx_queues; i++) | 3030 | for (i = 0; i < adapter->num_tx_queues; i++) |
2867 | set_bit(__IXGBE_FDIR_INIT_DONE, | 3031 | set_bit(__IXGBE_FDIR_INIT_DONE, |
2868 | &(adapter->tx_ring[i].reinit_state)); | 3032 | &(adapter->tx_ring[i]->reinit_state)); |
2869 | 3033 | ||
2870 | /* enable transmits */ | 3034 | /* enable transmits */ |
2871 | netif_tx_start_all_queues(netdev); | 3035 | netif_tx_start_all_queues(netdev); |
@@ -2875,6 +3039,12 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2875 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; | 3039 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; |
2876 | adapter->link_check_timeout = jiffies; | 3040 | adapter->link_check_timeout = jiffies; |
2877 | mod_timer(&adapter->watchdog_timer, jiffies); | 3041 | mod_timer(&adapter->watchdog_timer, jiffies); |
3042 | |||
3043 | /* Set PF Reset Done bit so PF/VF Mail Ops can work */ | ||
3044 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); | ||
3045 | ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; | ||
3046 | IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); | ||
3047 | |||
2878 | return 0; | 3048 | return 0; |
2879 | } | 3049 | } |
2880 | 3050 | ||
@@ -2923,7 +3093,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) | |||
2923 | } | 3093 | } |
2924 | 3094 | ||
2925 | /* reprogram the RAR[0] in case user changed it. */ | 3095 | /* reprogram the RAR[0] in case user changed it. */ |
2926 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); | 3096 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, |
3097 | IXGBE_RAH_AV); | ||
2927 | } | 3098 | } |
2928 | 3099 | ||
2929 | /** | 3100 | /** |
@@ -2955,6 +3126,10 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | |||
2955 | rx_buffer_info->skb = NULL; | 3126 | rx_buffer_info->skb = NULL; |
2956 | do { | 3127 | do { |
2957 | struct sk_buff *this = skb; | 3128 | struct sk_buff *this = skb; |
3129 | if (IXGBE_RSC_CB(this)->dma) | ||
3130 | pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma, | ||
3131 | rx_ring->rx_buf_len, | ||
3132 | PCI_DMA_FROMDEVICE); | ||
2958 | skb = skb->prev; | 3133 | skb = skb->prev; |
2959 | dev_kfree_skb(this); | 3134 | dev_kfree_skb(this); |
2960 | } while (skb); | 3135 | } while (skb); |
@@ -3029,7 +3204,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) | |||
3029 | int i; | 3204 | int i; |
3030 | 3205 | ||
3031 | for (i = 0; i < adapter->num_rx_queues; i++) | 3206 | for (i = 0; i < adapter->num_rx_queues; i++) |
3032 | ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]); | 3207 | ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]); |
3033 | } | 3208 | } |
3034 | 3209 | ||
3035 | /** | 3210 | /** |
@@ -3041,7 +3216,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) | |||
3041 | int i; | 3216 | int i; |
3042 | 3217 | ||
3043 | for (i = 0; i < adapter->num_tx_queues; i++) | 3218 | for (i = 0; i < adapter->num_tx_queues; i++) |
3044 | ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]); | 3219 | ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]); |
3045 | } | 3220 | } |
3046 | 3221 | ||
3047 | void ixgbe_down(struct ixgbe_adapter *adapter) | 3222 | void ixgbe_down(struct ixgbe_adapter *adapter) |
@@ -3055,6 +3230,17 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3055 | /* signal that we are down to the interrupt handler */ | 3230 | /* signal that we are down to the interrupt handler */ |
3056 | set_bit(__IXGBE_DOWN, &adapter->state); | 3231 | set_bit(__IXGBE_DOWN, &adapter->state); |
3057 | 3232 | ||
3233 | /* disable receive for all VFs and wait one second */ | ||
3234 | if (adapter->num_vfs) { | ||
3235 | for (i = 0 ; i < adapter->num_vfs; i++) | ||
3236 | adapter->vfinfo[i].clear_to_send = 0; | ||
3237 | |||
3238 | /* ping all the active vfs to let them know we are going down */ | ||
3239 | ixgbe_ping_all_vfs(adapter); | ||
3240 | /* Disable all VFTE/VFRE TX/RX */ | ||
3241 | ixgbe_disable_tx_rx(adapter); | ||
3242 | } | ||
3243 | |||
3058 | /* disable receives */ | 3244 | /* disable receives */ |
3059 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | 3245 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
3060 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); | 3246 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); |
@@ -3081,7 +3267,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3081 | 3267 | ||
3082 | /* disable transmits in the hardware now that interrupts are off */ | 3268 | /* disable transmits in the hardware now that interrupts are off */ |
3083 | for (i = 0; i < adapter->num_tx_queues; i++) { | 3269 | for (i = 0; i < adapter->num_tx_queues; i++) { |
3084 | j = adapter->tx_ring[i].reg_idx; | 3270 | j = adapter->tx_ring[i]->reg_idx; |
3085 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | 3271 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); |
3086 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), | 3272 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), |
3087 | (txdctl & ~IXGBE_TXDCTL_ENABLE)); | 3273 | (txdctl & ~IXGBE_TXDCTL_ENABLE)); |
@@ -3094,6 +3280,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3094 | 3280 | ||
3095 | netif_carrier_off(netdev); | 3281 | netif_carrier_off(netdev); |
3096 | 3282 | ||
3283 | /* clear n-tuple filters that are cached */ | ||
3284 | ethtool_ntuple_flush(netdev); | ||
3285 | |||
3097 | if (!pci_channel_offline(adapter->pdev)) | 3286 | if (!pci_channel_offline(adapter->pdev)) |
3098 | ixgbe_reset(adapter); | 3287 | ixgbe_reset(adapter); |
3099 | ixgbe_clean_all_tx_rings(adapter); | 3288 | ixgbe_clean_all_tx_rings(adapter); |
@@ -3121,13 +3310,13 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) | |||
3121 | 3310 | ||
3122 | #ifdef CONFIG_IXGBE_DCA | 3311 | #ifdef CONFIG_IXGBE_DCA |
3123 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { | 3312 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { |
3124 | ixgbe_update_tx_dca(adapter, adapter->tx_ring); | 3313 | ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]); |
3125 | ixgbe_update_rx_dca(adapter, adapter->rx_ring); | 3314 | ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]); |
3126 | } | 3315 | } |
3127 | #endif | 3316 | #endif |
3128 | 3317 | ||
3129 | tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring); | 3318 | tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); |
3130 | ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget); | 3319 | ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget); |
3131 | 3320 | ||
3132 | if (!tx_clean_complete) | 3321 | if (!tx_clean_complete) |
3133 | work_done = budget; | 3322 | work_done = budget; |
@@ -3291,6 +3480,19 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) | |||
3291 | } | 3480 | } |
3292 | 3481 | ||
3293 | #endif /* IXGBE_FCOE */ | 3482 | #endif /* IXGBE_FCOE */ |
3483 | /** | ||
3484 | * ixgbe_set_sriov_queues: Allocate queues for IOV use | ||
3485 | * @adapter: board private structure to initialize | ||
3486 | * | ||
3487 | * IOV doesn't actually use anything, so just NAK the | ||
3488 | * request for now and let the other queue routines | ||
3489 | * figure out what to do. | ||
3490 | */ | ||
3491 | static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) | ||
3492 | { | ||
3493 | return false; | ||
3494 | } | ||
3495 | |||
3294 | /* | 3496 | /* |
3295 | * ixgbe_set_num_queues: Allocate queues for device, feature dependant | 3497 | * ixgbe_set_num_queues: Allocate queues for device, feature dependant |
3296 | * @adapter: board private structure to initialize | 3498 | * @adapter: board private structure to initialize |
@@ -3304,6 +3506,15 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) | |||
3304 | **/ | 3506 | **/ |
3305 | static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | 3507 | static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) |
3306 | { | 3508 | { |
3509 | /* Start with base case */ | ||
3510 | adapter->num_rx_queues = 1; | ||
3511 | adapter->num_tx_queues = 1; | ||
3512 | adapter->num_rx_pools = adapter->num_rx_queues; | ||
3513 | adapter->num_rx_queues_per_pool = 1; | ||
3514 | |||
3515 | if (ixgbe_set_sriov_queues(adapter)) | ||
3516 | return; | ||
3517 | |||
3307 | #ifdef IXGBE_FCOE | 3518 | #ifdef IXGBE_FCOE |
3308 | if (ixgbe_set_fcoe_queues(adapter)) | 3519 | if (ixgbe_set_fcoe_queues(adapter)) |
3309 | goto done; | 3520 | goto done; |
@@ -3393,9 +3604,9 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) | |||
3393 | 3604 | ||
3394 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | 3605 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { |
3395 | for (i = 0; i < adapter->num_rx_queues; i++) | 3606 | for (i = 0; i < adapter->num_rx_queues; i++) |
3396 | adapter->rx_ring[i].reg_idx = i; | 3607 | adapter->rx_ring[i]->reg_idx = i; |
3397 | for (i = 0; i < adapter->num_tx_queues; i++) | 3608 | for (i = 0; i < adapter->num_tx_queues; i++) |
3398 | adapter->tx_ring[i].reg_idx = i; | 3609 | adapter->tx_ring[i]->reg_idx = i; |
3399 | ret = true; | 3610 | ret = true; |
3400 | } else { | 3611 | } else { |
3401 | ret = false; | 3612 | ret = false; |
@@ -3422,8 +3633,8 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) | |||
3422 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 3633 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
3423 | /* the number of queues is assumed to be symmetric */ | 3634 | /* the number of queues is assumed to be symmetric */ |
3424 | for (i = 0; i < dcb_i; i++) { | 3635 | for (i = 0; i < dcb_i; i++) { |
3425 | adapter->rx_ring[i].reg_idx = i << 3; | 3636 | adapter->rx_ring[i]->reg_idx = i << 3; |
3426 | adapter->tx_ring[i].reg_idx = i << 2; | 3637 | adapter->tx_ring[i]->reg_idx = i << 2; |
3427 | } | 3638 | } |
3428 | ret = true; | 3639 | ret = true; |
3429 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 3640 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
@@ -3441,18 +3652,18 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) | |||
3441 | * Rx TC0-TC7 are offset by 16 queues each | 3652 | * Rx TC0-TC7 are offset by 16 queues each |
3442 | */ | 3653 | */ |
3443 | for (i = 0; i < 3; i++) { | 3654 | for (i = 0; i < 3; i++) { |
3444 | adapter->tx_ring[i].reg_idx = i << 5; | 3655 | adapter->tx_ring[i]->reg_idx = i << 5; |
3445 | adapter->rx_ring[i].reg_idx = i << 4; | 3656 | adapter->rx_ring[i]->reg_idx = i << 4; |
3446 | } | 3657 | } |
3447 | for ( ; i < 5; i++) { | 3658 | for ( ; i < 5; i++) { |
3448 | adapter->tx_ring[i].reg_idx = | 3659 | adapter->tx_ring[i]->reg_idx = |
3449 | ((i + 2) << 4); | 3660 | ((i + 2) << 4); |
3450 | adapter->rx_ring[i].reg_idx = i << 4; | 3661 | adapter->rx_ring[i]->reg_idx = i << 4; |
3451 | } | 3662 | } |
3452 | for ( ; i < dcb_i; i++) { | 3663 | for ( ; i < dcb_i; i++) { |
3453 | adapter->tx_ring[i].reg_idx = | 3664 | adapter->tx_ring[i]->reg_idx = |
3454 | ((i + 8) << 3); | 3665 | ((i + 8) << 3); |
3455 | adapter->rx_ring[i].reg_idx = i << 4; | 3666 | adapter->rx_ring[i]->reg_idx = i << 4; |
3456 | } | 3667 | } |
3457 | 3668 | ||
3458 | ret = true; | 3669 | ret = true; |
@@ -3465,12 +3676,12 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) | |||
3465 | * | 3676 | * |
3466 | * Rx TC0-TC3 are offset by 32 queues each | 3677 | * Rx TC0-TC3 are offset by 32 queues each |
3467 | */ | 3678 | */ |
3468 | adapter->tx_ring[0].reg_idx = 0; | 3679 | adapter->tx_ring[0]->reg_idx = 0; |
3469 | adapter->tx_ring[1].reg_idx = 64; | 3680 | adapter->tx_ring[1]->reg_idx = 64; |
3470 | adapter->tx_ring[2].reg_idx = 96; | 3681 | adapter->tx_ring[2]->reg_idx = 96; |
3471 | adapter->tx_ring[3].reg_idx = 112; | 3682 | adapter->tx_ring[3]->reg_idx = 112; |
3472 | for (i = 0 ; i < dcb_i; i++) | 3683 | for (i = 0 ; i < dcb_i; i++) |
3473 | adapter->rx_ring[i].reg_idx = i << 5; | 3684 | adapter->rx_ring[i]->reg_idx = i << 5; |
3474 | 3685 | ||
3475 | ret = true; | 3686 | ret = true; |
3476 | } else { | 3687 | } else { |
@@ -3503,9 +3714,9 @@ static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) | |||
3503 | ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || | 3714 | ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || |
3504 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) { | 3715 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) { |
3505 | for (i = 0; i < adapter->num_rx_queues; i++) | 3716 | for (i = 0; i < adapter->num_rx_queues; i++) |
3506 | adapter->rx_ring[i].reg_idx = i; | 3717 | adapter->rx_ring[i]->reg_idx = i; |
3507 | for (i = 0; i < adapter->num_tx_queues; i++) | 3718 | for (i = 0; i < adapter->num_tx_queues; i++) |
3508 | adapter->tx_ring[i].reg_idx = i; | 3719 | adapter->tx_ring[i]->reg_idx = i; |
3509 | ret = true; | 3720 | ret = true; |
3510 | } | 3721 | } |
3511 | 3722 | ||
@@ -3533,8 +3744,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) | |||
3533 | 3744 | ||
3534 | ixgbe_cache_ring_dcb(adapter); | 3745 | ixgbe_cache_ring_dcb(adapter); |
3535 | /* find out queues in TC for FCoE */ | 3746 | /* find out queues in TC for FCoE */ |
3536 | fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1; | 3747 | fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1; |
3537 | fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1; | 3748 | fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1; |
3538 | /* | 3749 | /* |
3539 | * In 82599, the number of Tx queues for each traffic | 3750 | * In 82599, the number of Tx queues for each traffic |
3540 | * class for both 8-TC and 4-TC modes are: | 3751 | * class for both 8-TC and 4-TC modes are: |
@@ -3565,8 +3776,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) | |||
3565 | fcoe_tx_i = f->mask; | 3776 | fcoe_tx_i = f->mask; |
3566 | } | 3777 | } |
3567 | for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { | 3778 | for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { |
3568 | adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i; | 3779 | adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; |
3569 | adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i; | 3780 | adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; |
3570 | } | 3781 | } |
3571 | ret = true; | 3782 | ret = true; |
3572 | } | 3783 | } |
@@ -3575,6 +3786,24 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) | |||
3575 | 3786 | ||
3576 | #endif /* IXGBE_FCOE */ | 3787 | #endif /* IXGBE_FCOE */ |
3577 | /** | 3788 | /** |
3789 | * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov | ||
3790 | * @adapter: board private structure to initialize | ||
3791 | * | ||
3792 | * SR-IOV doesn't use any descriptor rings but changes the default if | ||
3793 | * no other mapping is used. | ||
3794 | * | ||
3795 | */ | ||
3796 | static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) | ||
3797 | { | ||
3798 | adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2; | ||
3799 | adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2; | ||
3800 | if (adapter->num_vfs) | ||
3801 | return true; | ||
3802 | else | ||
3803 | return false; | ||
3804 | } | ||
3805 | |||
3806 | /** | ||
3578 | * ixgbe_cache_ring_register - Descriptor ring to register mapping | 3807 | * ixgbe_cache_ring_register - Descriptor ring to register mapping |
3579 | * @adapter: board private structure to initialize | 3808 | * @adapter: board private structure to initialize |
3580 | * | 3809 | * |
@@ -3588,8 +3817,11 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) | |||
3588 | static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | 3817 | static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) |
3589 | { | 3818 | { |
3590 | /* start with default case */ | 3819 | /* start with default case */ |
3591 | adapter->rx_ring[0].reg_idx = 0; | 3820 | adapter->rx_ring[0]->reg_idx = 0; |
3592 | adapter->tx_ring[0].reg_idx = 0; | 3821 | adapter->tx_ring[0]->reg_idx = 0; |
3822 | |||
3823 | if (ixgbe_cache_ring_sriov(adapter)) | ||
3824 | return; | ||
3593 | 3825 | ||
3594 | #ifdef IXGBE_FCOE | 3826 | #ifdef IXGBE_FCOE |
3595 | if (ixgbe_cache_ring_fcoe(adapter)) | 3827 | if (ixgbe_cache_ring_fcoe(adapter)) |
@@ -3619,33 +3851,63 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | |||
3619 | static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) | 3851 | static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) |
3620 | { | 3852 | { |
3621 | int i; | 3853 | int i; |
3622 | 3854 | int orig_node = adapter->node; | |
3623 | adapter->tx_ring = kcalloc(adapter->num_tx_queues, | ||
3624 | sizeof(struct ixgbe_ring), GFP_KERNEL); | ||
3625 | if (!adapter->tx_ring) | ||
3626 | goto err_tx_ring_allocation; | ||
3627 | |||
3628 | adapter->rx_ring = kcalloc(adapter->num_rx_queues, | ||
3629 | sizeof(struct ixgbe_ring), GFP_KERNEL); | ||
3630 | if (!adapter->rx_ring) | ||
3631 | goto err_rx_ring_allocation; | ||
3632 | 3855 | ||
3633 | for (i = 0; i < adapter->num_tx_queues; i++) { | 3856 | for (i = 0; i < adapter->num_tx_queues; i++) { |
3634 | adapter->tx_ring[i].count = adapter->tx_ring_count; | 3857 | struct ixgbe_ring *ring = adapter->tx_ring[i]; |
3635 | adapter->tx_ring[i].queue_index = i; | 3858 | if (orig_node == -1) { |
3859 | int cur_node = next_online_node(adapter->node); | ||
3860 | if (cur_node == MAX_NUMNODES) | ||
3861 | cur_node = first_online_node; | ||
3862 | adapter->node = cur_node; | ||
3863 | } | ||
3864 | ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, | ||
3865 | adapter->node); | ||
3866 | if (!ring) | ||
3867 | ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); | ||
3868 | if (!ring) | ||
3869 | goto err_tx_ring_allocation; | ||
3870 | ring->count = adapter->tx_ring_count; | ||
3871 | ring->queue_index = i; | ||
3872 | ring->numa_node = adapter->node; | ||
3873 | |||
3874 | adapter->tx_ring[i] = ring; | ||
3636 | } | 3875 | } |
3637 | 3876 | ||
3877 | /* Restore the adapter's original node */ | ||
3878 | adapter->node = orig_node; | ||
3879 | |||
3638 | for (i = 0; i < adapter->num_rx_queues; i++) { | 3880 | for (i = 0; i < adapter->num_rx_queues; i++) { |
3639 | adapter->rx_ring[i].count = adapter->rx_ring_count; | 3881 | struct ixgbe_ring *ring = adapter->rx_ring[i]; |
3640 | adapter->rx_ring[i].queue_index = i; | 3882 | if (orig_node == -1) { |
3883 | int cur_node = next_online_node(adapter->node); | ||
3884 | if (cur_node == MAX_NUMNODES) | ||
3885 | cur_node = first_online_node; | ||
3886 | adapter->node = cur_node; | ||
3887 | } | ||
3888 | ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, | ||
3889 | adapter->node); | ||
3890 | if (!ring) | ||
3891 | ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); | ||
3892 | if (!ring) | ||
3893 | goto err_rx_ring_allocation; | ||
3894 | ring->count = adapter->rx_ring_count; | ||
3895 | ring->queue_index = i; | ||
3896 | ring->numa_node = adapter->node; | ||
3897 | |||
3898 | adapter->rx_ring[i] = ring; | ||
3641 | } | 3899 | } |
3642 | 3900 | ||
3901 | /* Restore the adapter's original node */ | ||
3902 | adapter->node = orig_node; | ||
3903 | |||
3643 | ixgbe_cache_ring_register(adapter); | 3904 | ixgbe_cache_ring_register(adapter); |
3644 | 3905 | ||
3645 | return 0; | 3906 | return 0; |
3646 | 3907 | ||
3647 | err_rx_ring_allocation: | 3908 | err_rx_ring_allocation: |
3648 | kfree(adapter->tx_ring); | 3909 | for (i = 0; i < adapter->num_tx_queues; i++) |
3910 | kfree(adapter->tx_ring[i]); | ||
3649 | err_tx_ring_allocation: | 3911 | err_tx_ring_allocation: |
3650 | return -ENOMEM; | 3912 | return -ENOMEM; |
3651 | } | 3913 | } |
@@ -3700,6 +3962,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
3700 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | 3962 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; |
3701 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | 3963 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; |
3702 | adapter->atr_sample_rate = 0; | 3964 | adapter->atr_sample_rate = 0; |
3965 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | ||
3966 | ixgbe_disable_sriov(adapter); | ||
3967 | |||
3703 | ixgbe_set_num_queues(adapter); | 3968 | ixgbe_set_num_queues(adapter); |
3704 | 3969 | ||
3705 | err = pci_enable_msi(adapter->pdev); | 3970 | err = pci_enable_msi(adapter->pdev); |
@@ -3741,7 +4006,11 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) | |||
3741 | } | 4006 | } |
3742 | 4007 | ||
3743 | for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { | 4008 | for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { |
3744 | q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL); | 4009 | q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector), |
4010 | GFP_KERNEL, adapter->node); | ||
4011 | if (!q_vector) | ||
4012 | q_vector = kzalloc(sizeof(struct ixgbe_q_vector), | ||
4013 | GFP_KERNEL); | ||
3745 | if (!q_vector) | 4014 | if (!q_vector) |
3746 | goto err_out; | 4015 | goto err_out; |
3747 | q_vector->adapter = adapter; | 4016 | q_vector->adapter = adapter; |
@@ -3868,10 +4137,16 @@ err_set_interrupt: | |||
3868 | **/ | 4137 | **/ |
3869 | void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) | 4138 | void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) |
3870 | { | 4139 | { |
3871 | kfree(adapter->tx_ring); | 4140 | int i; |
3872 | kfree(adapter->rx_ring); | 4141 | |
3873 | adapter->tx_ring = NULL; | 4142 | for (i = 0; i < adapter->num_tx_queues; i++) { |
3874 | adapter->rx_ring = NULL; | 4143 | kfree(adapter->tx_ring[i]); |
4144 | adapter->tx_ring[i] = NULL; | ||
4145 | } | ||
4146 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
4147 | kfree(adapter->rx_ring[i]); | ||
4148 | adapter->rx_ring[i] = NULL; | ||
4149 | } | ||
3875 | 4150 | ||
3876 | ixgbe_free_q_vectors(adapter); | 4151 | ixgbe_free_q_vectors(adapter); |
3877 | ixgbe_reset_interrupt_capability(adapter); | 4152 | ixgbe_reset_interrupt_capability(adapter); |
@@ -3942,6 +4217,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
3942 | { | 4217 | { |
3943 | struct ixgbe_hw *hw = &adapter->hw; | 4218 | struct ixgbe_hw *hw = &adapter->hw; |
3944 | struct pci_dev *pdev = adapter->pdev; | 4219 | struct pci_dev *pdev = adapter->pdev; |
4220 | struct net_device *dev = adapter->netdev; | ||
3945 | unsigned int rss; | 4221 | unsigned int rss; |
3946 | #ifdef CONFIG_IXGBE_DCB | 4222 | #ifdef CONFIG_IXGBE_DCB |
3947 | int j; | 4223 | int j; |
@@ -3969,10 +4245,18 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
3969 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; | 4245 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; |
3970 | adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; | 4246 | adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; |
3971 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; | 4247 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; |
3972 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | 4248 | if (dev->features & NETIF_F_NTUPLE) { |
4249 | /* Flow Director perfect filter enabled */ | ||
4250 | adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | ||
4251 | adapter->atr_sample_rate = 0; | ||
4252 | spin_lock_init(&adapter->fdir_perfect_lock); | ||
4253 | } else { | ||
4254 | /* Flow Director hash filters enabled */ | ||
4255 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
4256 | adapter->atr_sample_rate = 20; | ||
4257 | } | ||
3973 | adapter->ring_feature[RING_F_FDIR].indices = | 4258 | adapter->ring_feature[RING_F_FDIR].indices = |
3974 | IXGBE_MAX_FDIR_INDICES; | 4259 | IXGBE_MAX_FDIR_INDICES; |
3975 | adapter->atr_sample_rate = 20; | ||
3976 | adapter->fdir_pballoc = 0; | 4260 | adapter->fdir_pballoc = 0; |
3977 | #ifdef IXGBE_FCOE | 4261 | #ifdef IXGBE_FCOE |
3978 | adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; | 4262 | adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; |
@@ -4041,6 +4325,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4041 | /* enable rx csum by default */ | 4325 | /* enable rx csum by default */ |
4042 | adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; | 4326 | adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; |
4043 | 4327 | ||
4328 | /* get assigned NUMA node */ | ||
4329 | adapter->node = dev_to_node(&pdev->dev); | ||
4330 | |||
4044 | set_bit(__IXGBE_DOWN, &adapter->state); | 4331 | set_bit(__IXGBE_DOWN, &adapter->state); |
4045 | 4332 | ||
4046 | return 0; | 4333 | return 0; |
@@ -4060,7 +4347,9 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, | |||
4060 | int size; | 4347 | int size; |
4061 | 4348 | ||
4062 | size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; | 4349 | size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; |
4063 | tx_ring->tx_buffer_info = vmalloc(size); | 4350 | tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node); |
4351 | if (!tx_ring->tx_buffer_info) | ||
4352 | tx_ring->tx_buffer_info = vmalloc(size); | ||
4064 | if (!tx_ring->tx_buffer_info) | 4353 | if (!tx_ring->tx_buffer_info) |
4065 | goto err; | 4354 | goto err; |
4066 | memset(tx_ring->tx_buffer_info, 0, size); | 4355 | memset(tx_ring->tx_buffer_info, 0, size); |
@@ -4102,7 +4391,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) | |||
4102 | int i, err = 0; | 4391 | int i, err = 0; |
4103 | 4392 | ||
4104 | for (i = 0; i < adapter->num_tx_queues; i++) { | 4393 | for (i = 0; i < adapter->num_tx_queues; i++) { |
4105 | err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]); | 4394 | err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]); |
4106 | if (!err) | 4395 | if (!err) |
4107 | continue; | 4396 | continue; |
4108 | DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i); | 4397 | DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i); |
@@ -4126,7 +4415,9 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, | |||
4126 | int size; | 4415 | int size; |
4127 | 4416 | ||
4128 | size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; | 4417 | size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; |
4129 | rx_ring->rx_buffer_info = vmalloc(size); | 4418 | rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node); |
4419 | if (!rx_ring->rx_buffer_info) | ||
4420 | rx_ring->rx_buffer_info = vmalloc(size); | ||
4130 | if (!rx_ring->rx_buffer_info) { | 4421 | if (!rx_ring->rx_buffer_info) { |
4131 | DPRINTK(PROBE, ERR, | 4422 | DPRINTK(PROBE, ERR, |
4132 | "vmalloc allocation failed for the rx desc ring\n"); | 4423 | "vmalloc allocation failed for the rx desc ring\n"); |
@@ -4172,7 +4463,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) | |||
4172 | int i, err = 0; | 4463 | int i, err = 0; |
4173 | 4464 | ||
4174 | for (i = 0; i < adapter->num_rx_queues; i++) { | 4465 | for (i = 0; i < adapter->num_rx_queues; i++) { |
4175 | err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]); | 4466 | err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); |
4176 | if (!err) | 4467 | if (!err) |
4177 | continue; | 4468 | continue; |
4178 | DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i); | 4469 | DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i); |
@@ -4215,8 +4506,8 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) | |||
4215 | int i; | 4506 | int i; |
4216 | 4507 | ||
4217 | for (i = 0; i < adapter->num_tx_queues; i++) | 4508 | for (i = 0; i < adapter->num_tx_queues; i++) |
4218 | if (adapter->tx_ring[i].desc) | 4509 | if (adapter->tx_ring[i]->desc) |
4219 | ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]); | 4510 | ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]); |
4220 | } | 4511 | } |
4221 | 4512 | ||
4222 | /** | 4513 | /** |
@@ -4252,8 +4543,8 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) | |||
4252 | int i; | 4543 | int i; |
4253 | 4544 | ||
4254 | for (i = 0; i < adapter->num_rx_queues; i++) | 4545 | for (i = 0; i < adapter->num_rx_queues; i++) |
4255 | if (adapter->rx_ring[i].desc) | 4546 | if (adapter->rx_ring[i]->desc) |
4256 | ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]); | 4547 | ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]); |
4257 | } | 4548 | } |
4258 | 4549 | ||
4259 | /** | 4550 | /** |
@@ -4530,8 +4821,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
4530 | adapter->hw_rx_no_dma_resources += | 4821 | adapter->hw_rx_no_dma_resources += |
4531 | IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); | 4822 | IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); |
4532 | for (i = 0; i < adapter->num_rx_queues; i++) { | 4823 | for (i = 0; i < adapter->num_rx_queues; i++) { |
4533 | rsc_count += adapter->rx_ring[i].rsc_count; | 4824 | rsc_count += adapter->rx_ring[i]->rsc_count; |
4534 | rsc_flush += adapter->rx_ring[i].rsc_flush; | 4825 | rsc_flush += adapter->rx_ring[i]->rsc_flush; |
4535 | } | 4826 | } |
4536 | adapter->rsc_total_count = rsc_count; | 4827 | adapter->rsc_total_count = rsc_count; |
4537 | adapter->rsc_total_flush = rsc_flush; | 4828 | adapter->rsc_total_flush = rsc_flush; |
@@ -4539,11 +4830,11 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
4539 | 4830 | ||
4540 | /* gather some stats to the adapter struct that are per queue */ | 4831 | /* gather some stats to the adapter struct that are per queue */ |
4541 | for (i = 0; i < adapter->num_tx_queues; i++) | 4832 | for (i = 0; i < adapter->num_tx_queues; i++) |
4542 | restart_queue += adapter->tx_ring[i].restart_queue; | 4833 | restart_queue += adapter->tx_ring[i]->restart_queue; |
4543 | adapter->restart_queue = restart_queue; | 4834 | adapter->restart_queue = restart_queue; |
4544 | 4835 | ||
4545 | for (i = 0; i < adapter->num_rx_queues; i++) | 4836 | for (i = 0; i < adapter->num_rx_queues; i++) |
4546 | non_eop_descs += adapter->rx_ring[i].non_eop_descs; | 4837 | non_eop_descs += adapter->rx_ring[i]->non_eop_descs; |
4547 | adapter->non_eop_descs = non_eop_descs; | 4838 | adapter->non_eop_descs = non_eop_descs; |
4548 | 4839 | ||
4549 | adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); | 4840 | adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); |
@@ -4782,7 +5073,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work) | |||
4782 | if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { | 5073 | if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { |
4783 | for (i = 0; i < adapter->num_tx_queues; i++) | 5074 | for (i = 0; i < adapter->num_tx_queues; i++) |
4784 | set_bit(__IXGBE_FDIR_INIT_DONE, | 5075 | set_bit(__IXGBE_FDIR_INIT_DONE, |
4785 | &(adapter->tx_ring[i].reinit_state)); | 5076 | &(adapter->tx_ring[i]->reinit_state)); |
4786 | } else { | 5077 | } else { |
4787 | DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " | 5078 | DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " |
4788 | "ignored adding FDIR ATR filters \n"); | 5079 | "ignored adding FDIR ATR filters \n"); |
@@ -4791,6 +5082,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work) | |||
4791 | netif_tx_start_all_queues(adapter->netdev); | 5082 | netif_tx_start_all_queues(adapter->netdev); |
4792 | } | 5083 | } |
4793 | 5084 | ||
5085 | static DEFINE_MUTEX(ixgbe_watchdog_lock); | ||
5086 | |||
4794 | /** | 5087 | /** |
4795 | * ixgbe_watchdog_task - worker thread to bring link up | 5088 | * ixgbe_watchdog_task - worker thread to bring link up |
4796 | * @work: pointer to work_struct containing our data | 5089 | * @work: pointer to work_struct containing our data |
@@ -4802,13 +5095,16 @@ static void ixgbe_watchdog_task(struct work_struct *work) | |||
4802 | watchdog_task); | 5095 | watchdog_task); |
4803 | struct net_device *netdev = adapter->netdev; | 5096 | struct net_device *netdev = adapter->netdev; |
4804 | struct ixgbe_hw *hw = &adapter->hw; | 5097 | struct ixgbe_hw *hw = &adapter->hw; |
4805 | u32 link_speed = adapter->link_speed; | 5098 | u32 link_speed; |
4806 | bool link_up = adapter->link_up; | 5099 | bool link_up; |
4807 | int i; | 5100 | int i; |
4808 | struct ixgbe_ring *tx_ring; | 5101 | struct ixgbe_ring *tx_ring; |
4809 | int some_tx_pending = 0; | 5102 | int some_tx_pending = 0; |
4810 | 5103 | ||
4811 | adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; | 5104 | mutex_lock(&ixgbe_watchdog_lock); |
5105 | |||
5106 | link_up = adapter->link_up; | ||
5107 | link_speed = adapter->link_speed; | ||
4812 | 5108 | ||
4813 | if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { | 5109 | if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { |
4814 | hw->mac.ops.check_link(hw, &link_speed, &link_up, false); | 5110 | hw->mac.ops.check_link(hw, &link_speed, &link_up, false); |
@@ -4879,7 +5175,7 @@ static void ixgbe_watchdog_task(struct work_struct *work) | |||
4879 | 5175 | ||
4880 | if (!netif_carrier_ok(netdev)) { | 5176 | if (!netif_carrier_ok(netdev)) { |
4881 | for (i = 0; i < adapter->num_tx_queues; i++) { | 5177 | for (i = 0; i < adapter->num_tx_queues; i++) { |
4882 | tx_ring = &adapter->tx_ring[i]; | 5178 | tx_ring = adapter->tx_ring[i]; |
4883 | if (tx_ring->next_to_use != tx_ring->next_to_clean) { | 5179 | if (tx_ring->next_to_use != tx_ring->next_to_clean) { |
4884 | some_tx_pending = 1; | 5180 | some_tx_pending = 1; |
4885 | break; | 5181 | break; |
@@ -4897,7 +5193,7 @@ static void ixgbe_watchdog_task(struct work_struct *work) | |||
4897 | } | 5193 | } |
4898 | 5194 | ||
4899 | ixgbe_update_stats(adapter); | 5195 | ixgbe_update_stats(adapter); |
4900 | adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; | 5196 | mutex_unlock(&ixgbe_watchdog_lock); |
4901 | } | 5197 | } |
4902 | 5198 | ||
4903 | static int ixgbe_tso(struct ixgbe_adapter *adapter, | 5199 | static int ixgbe_tso(struct ixgbe_adapter *adapter, |
@@ -5343,8 +5639,14 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
5343 | return txq; | 5639 | return txq; |
5344 | } | 5640 | } |
5345 | #endif | 5641 | #endif |
5346 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) | 5642 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
5347 | return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13; | 5643 | if (skb->priority == TC_PRIO_CONTROL) |
5644 | txq = adapter->ring_feature[RING_F_DCB].indices-1; | ||
5645 | else | ||
5646 | txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) | ||
5647 | >> 13; | ||
5648 | return txq; | ||
5649 | } | ||
5348 | 5650 | ||
5349 | return skb_tx_hash(dev, skb); | 5651 | return skb_tx_hash(dev, skb); |
5350 | } | 5652 | } |
@@ -5371,17 +5673,12 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, | |||
5371 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; | 5673 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; |
5372 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | 5674 | tx_flags |= IXGBE_TX_FLAGS_VLAN; |
5373 | } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 5675 | } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
5374 | if (skb->priority != TC_PRIO_CONTROL) { | 5676 | tx_flags |= ((skb->queue_mapping & 0x7) << 13); |
5375 | tx_flags |= ((skb->queue_mapping & 0x7) << 13); | 5677 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; |
5376 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; | 5678 | tx_flags |= IXGBE_TX_FLAGS_VLAN; |
5377 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | ||
5378 | } else { | ||
5379 | skb->queue_mapping = | ||
5380 | adapter->ring_feature[RING_F_DCB].indices-1; | ||
5381 | } | ||
5382 | } | 5679 | } |
5383 | 5680 | ||
5384 | tx_ring = &adapter->tx_ring[skb->queue_mapping]; | 5681 | tx_ring = adapter->tx_ring[skb->queue_mapping]; |
5385 | 5682 | ||
5386 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && | 5683 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && |
5387 | (skb->protocol == htons(ETH_P_FCOE))) { | 5684 | (skb->protocol == htons(ETH_P_FCOE))) { |
@@ -5487,7 +5784,8 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p) | |||
5487 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | 5784 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
5488 | memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); | 5785 | memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); |
5489 | 5786 | ||
5490 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); | 5787 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, |
5788 | IXGBE_RAH_AV); | ||
5491 | 5789 | ||
5492 | return 0; | 5790 | return 0; |
5493 | } | 5791 | } |
@@ -5624,6 +5922,61 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
5624 | #endif /* IXGBE_FCOE */ | 5922 | #endif /* IXGBE_FCOE */ |
5625 | }; | 5923 | }; |
5626 | 5924 | ||
5925 | static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, | ||
5926 | const struct ixgbe_info *ii) | ||
5927 | { | ||
5928 | #ifdef CONFIG_PCI_IOV | ||
5929 | struct ixgbe_hw *hw = &adapter->hw; | ||
5930 | int err; | ||
5931 | |||
5932 | if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs) | ||
5933 | return; | ||
5934 | |||
5935 | /* The 82599 supports up to 64 VFs per physical function | ||
5936 | * but this implementation limits allocation to 63 so that | ||
5937 | * basic networking resources are still available to the | ||
5938 | * physical function | ||
5939 | */ | ||
5940 | adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs; | ||
5941 | adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; | ||
5942 | err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); | ||
5943 | if (err) { | ||
5944 | DPRINTK(PROBE, ERR, | ||
5945 | "Failed to enable PCI sriov: %d\n", err); | ||
5946 | goto err_novfs; | ||
5947 | } | ||
5948 | /* If call to enable VFs succeeded then allocate memory | ||
5949 | * for per VF control structures. | ||
5950 | */ | ||
5951 | adapter->vfinfo = | ||
5952 | kcalloc(adapter->num_vfs, | ||
5953 | sizeof(struct vf_data_storage), GFP_KERNEL); | ||
5954 | if (adapter->vfinfo) { | ||
5955 | /* Now that we're sure SR-IOV is enabled | ||
5956 | * and memory allocated set up the mailbox parameters | ||
5957 | */ | ||
5958 | ixgbe_init_mbx_params_pf(hw); | ||
5959 | memcpy(&hw->mbx.ops, ii->mbx_ops, | ||
5960 | sizeof(hw->mbx.ops)); | ||
5961 | |||
5962 | /* Disable RSC when in SR-IOV mode */ | ||
5963 | adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | | ||
5964 | IXGBE_FLAG2_RSC_ENABLED); | ||
5965 | return; | ||
5966 | } | ||
5967 | |||
5968 | /* Oh oh */ | ||
5969 | DPRINTK(PROBE, ERR, | ||
5970 | "Unable to allocate memory for VF " | ||
5971 | "Data Storage - SRIOV disabled\n"); | ||
5972 | pci_disable_sriov(adapter->pdev); | ||
5973 | |||
5974 | err_novfs: | ||
5975 | adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; | ||
5976 | adapter->num_vfs = 0; | ||
5977 | #endif /* CONFIG_PCI_IOV */ | ||
5978 | } | ||
5979 | |||
5627 | /** | 5980 | /** |
5628 | * ixgbe_probe - Device Initialization Routine | 5981 | * ixgbe_probe - Device Initialization Routine |
5629 | * @pdev: PCI device information struct | 5982 | * @pdev: PCI device information struct |
@@ -5644,6 +5997,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5644 | const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; | 5997 | const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; |
5645 | static int cards_found; | 5998 | static int cards_found; |
5646 | int i, err, pci_using_dac; | 5999 | int i, err, pci_using_dac; |
6000 | unsigned int indices = num_possible_cpus(); | ||
5647 | #ifdef IXGBE_FCOE | 6001 | #ifdef IXGBE_FCOE |
5648 | u16 device_caps; | 6002 | u16 device_caps; |
5649 | #endif | 6003 | #endif |
@@ -5682,7 +6036,18 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5682 | pci_set_master(pdev); | 6036 | pci_set_master(pdev); |
5683 | pci_save_state(pdev); | 6037 | pci_save_state(pdev); |
5684 | 6038 | ||
5685 | netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES); | 6039 | if (ii->mac == ixgbe_mac_82598EB) |
6040 | indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); | ||
6041 | else | ||
6042 | indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); | ||
6043 | |||
6044 | indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES); | ||
6045 | #ifdef IXGBE_FCOE | ||
6046 | indices += min_t(unsigned int, num_possible_cpus(), | ||
6047 | IXGBE_MAX_FCOE_INDICES); | ||
6048 | #endif | ||
6049 | indices = min_t(unsigned int, indices, MAX_TX_QUEUES); | ||
6050 | netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); | ||
5686 | if (!netdev) { | 6051 | if (!netdev) { |
5687 | err = -ENOMEM; | 6052 | err = -ENOMEM; |
5688 | goto err_alloc_etherdev; | 6053 | goto err_alloc_etherdev; |
@@ -5802,6 +6167,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5802 | goto err_sw_init; | 6167 | goto err_sw_init; |
5803 | } | 6168 | } |
5804 | 6169 | ||
6170 | ixgbe_probe_vf(adapter, ii); | ||
6171 | |||
5805 | netdev->features = NETIF_F_SG | | 6172 | netdev->features = NETIF_F_SG | |
5806 | NETIF_F_IP_CSUM | | 6173 | NETIF_F_IP_CSUM | |
5807 | NETIF_F_HW_VLAN_TX | | 6174 | NETIF_F_HW_VLAN_TX | |
@@ -5822,6 +6189,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5822 | netdev->vlan_features |= NETIF_F_IPV6_CSUM; | 6189 | netdev->vlan_features |= NETIF_F_IPV6_CSUM; |
5823 | netdev->vlan_features |= NETIF_F_SG; | 6190 | netdev->vlan_features |= NETIF_F_SG; |
5824 | 6191 | ||
6192 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | ||
6193 | adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | | ||
6194 | IXGBE_FLAG_DCB_ENABLED); | ||
5825 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) | 6195 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) |
5826 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | 6196 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; |
5827 | 6197 | ||
@@ -5948,6 +6318,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5948 | ixgbe_setup_dca(adapter); | 6318 | ixgbe_setup_dca(adapter); |
5949 | } | 6319 | } |
5950 | #endif | 6320 | #endif |
6321 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | ||
6322 | DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n", | ||
6323 | adapter->num_vfs); | ||
6324 | for (i = 0; i < adapter->num_vfs; i++) | ||
6325 | ixgbe_vf_configuration(pdev, (i | 0x10000000)); | ||
6326 | } | ||
6327 | |||
5951 | /* add san mac addr to netdev */ | 6328 | /* add san mac addr to netdev */ |
5952 | ixgbe_add_sanmac_netdev(netdev); | 6329 | ixgbe_add_sanmac_netdev(netdev); |
5953 | 6330 | ||
@@ -5960,6 +6337,8 @@ err_register: | |||
5960 | ixgbe_clear_interrupt_scheme(adapter); | 6337 | ixgbe_clear_interrupt_scheme(adapter); |
5961 | err_sw_init: | 6338 | err_sw_init: |
5962 | err_eeprom: | 6339 | err_eeprom: |
6340 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | ||
6341 | ixgbe_disable_sriov(adapter); | ||
5963 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | 6342 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); |
5964 | del_timer_sync(&adapter->sfp_timer); | 6343 | del_timer_sync(&adapter->sfp_timer); |
5965 | cancel_work_sync(&adapter->sfp_task); | 6344 | cancel_work_sync(&adapter->sfp_task); |
@@ -6028,6 +6407,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
6028 | if (netdev->reg_state == NETREG_REGISTERED) | 6407 | if (netdev->reg_state == NETREG_REGISTERED) |
6029 | unregister_netdev(netdev); | 6408 | unregister_netdev(netdev); |
6030 | 6409 | ||
6410 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | ||
6411 | ixgbe_disable_sriov(adapter); | ||
6412 | |||
6031 | ixgbe_clear_interrupt_scheme(adapter); | 6413 | ixgbe_clear_interrupt_scheme(adapter); |
6032 | 6414 | ||
6033 | ixgbe_release_hw_control(adapter); | 6415 | ixgbe_release_hw_control(adapter); |