aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-01-16 18:35:08 -0500
committerDavid S. Miller <davem@davemloft.net>2014-01-16 18:35:08 -0500
commitf7cbdb7d7b4dc85dc96edf3a81e3b58151a72825 (patch)
tree39d0ca38a98f9ca6690b5653f1dc79108e473c9d
parent0864c158836c2d0edb61f7128475e192b09bc851 (diff)
parentd3cec927ef9f5783b5658e27401425fda4a2d4d9 (diff)
Merge branch 'ixgbe-next'
Aaron Brown says: ==================== Intel Wired LAN Driver Updates This series contains updates to ixgbe and ixgbevf. John adds rtnl lock / unlock semantics for ixgbe_reinit_locked() which was being called without the rtnl lock being held. Jacob corrects an issue where ixgbevf_qv_disable function does not set the disabled bit correctly. From the community, Wei uses a type of struct for pci driver-specific data in ixgbevf_suspend() Don changes the way we store ring arrays in a manner that allows support of multiple queues on multiple nodes and creates new ring initialization functions for work previously done across multiple functions - making the code closer to ixgbe and hopefully more readable. He also fixes incorrect fiber eeprom write logic. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h17
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c431
6 files changed, 274 insertions, 213 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 007a0083a636..edda6814108c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -626,7 +626,7 @@ static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
626 goto out; 626 goto out;
627 } 627 }
628 628
629 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs; 629 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
630 630
631 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, 631 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
632 IXGBE_I2C_EEPROM_DEV_ADDR2, 632 IXGBE_I2C_EEPROM_DEV_ADDR2,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 3ca59d21d0b2..b445ad121de1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6392,7 +6392,9 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
6392 netdev_err(adapter->netdev, "Reset adapter\n"); 6392 netdev_err(adapter->netdev, "Reset adapter\n");
6393 adapter->tx_timeout_count++; 6393 adapter->tx_timeout_count++;
6394 6394
6395 rtnl_lock();
6395 ixgbe_reinit_locked(adapter); 6396 ixgbe_reinit_locked(adapter);
6397 rtnl_unlock();
6396} 6398}
6397 6399
6398/** 6400/**
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 3147795bd135..5426b2dee6a6 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -277,4 +277,21 @@ struct ixgbe_adv_tx_context_desc {
277#define IXGBE_ERR_RESET_FAILED -2 277#define IXGBE_ERR_RESET_FAILED -2
278#define IXGBE_ERR_INVALID_ARGUMENT -3 278#define IXGBE_ERR_INVALID_ARGUMENT -3
279 279
280/* Transmit Config masks */
281#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */
282#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
283#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
284
285#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */
286#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */
287#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */
288#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */
289#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
290#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
291
292#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
293#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
294#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
295#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
296
280#endif /* _IXGBEVF_DEFINES_H_ */ 297#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 54d9acef9c4e..515ba4e29760 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -286,9 +286,9 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
286 286
287 if (!netif_running(adapter->netdev)) { 287 if (!netif_running(adapter->netdev)) {
288 for (i = 0; i < adapter->num_tx_queues; i++) 288 for (i = 0; i < adapter->num_tx_queues; i++)
289 adapter->tx_ring[i].count = new_tx_count; 289 adapter->tx_ring[i]->count = new_tx_count;
290 for (i = 0; i < adapter->num_rx_queues; i++) 290 for (i = 0; i < adapter->num_rx_queues; i++)
291 adapter->rx_ring[i].count = new_rx_count; 291 adapter->rx_ring[i]->count = new_rx_count;
292 adapter->tx_ring_count = new_tx_count; 292 adapter->tx_ring_count = new_tx_count;
293 adapter->rx_ring_count = new_rx_count; 293 adapter->rx_ring_count = new_rx_count;
294 goto clear_reset; 294 goto clear_reset;
@@ -303,7 +303,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
303 303
304 for (i = 0; i < adapter->num_tx_queues; i++) { 304 for (i = 0; i < adapter->num_tx_queues; i++) {
305 /* clone ring and setup updated count */ 305 /* clone ring and setup updated count */
306 tx_ring[i] = adapter->tx_ring[i]; 306 tx_ring[i] = *adapter->tx_ring[i];
307 tx_ring[i].count = new_tx_count; 307 tx_ring[i].count = new_tx_count;
308 err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]); 308 err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
309 if (!err) 309 if (!err)
@@ -329,7 +329,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
329 329
330 for (i = 0; i < adapter->num_rx_queues; i++) { 330 for (i = 0; i < adapter->num_rx_queues; i++) {
331 /* clone ring and setup updated count */ 331 /* clone ring and setup updated count */
332 rx_ring[i] = adapter->rx_ring[i]; 332 rx_ring[i] = *adapter->rx_ring[i];
333 rx_ring[i].count = new_rx_count; 333 rx_ring[i].count = new_rx_count;
334 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); 334 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
335 if (!err) 335 if (!err)
@@ -352,9 +352,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
352 /* Tx */ 352 /* Tx */
353 if (tx_ring) { 353 if (tx_ring) {
354 for (i = 0; i < adapter->num_tx_queues; i++) { 354 for (i = 0; i < adapter->num_tx_queues; i++) {
355 ixgbevf_free_tx_resources(adapter, 355 ixgbevf_free_tx_resources(adapter, adapter->tx_ring[i]);
356 &adapter->tx_ring[i]); 356 *adapter->tx_ring[i] = tx_ring[i];
357 adapter->tx_ring[i] = tx_ring[i];
358 } 357 }
359 adapter->tx_ring_count = new_tx_count; 358 adapter->tx_ring_count = new_tx_count;
360 359
@@ -365,9 +364,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
365 /* Rx */ 364 /* Rx */
366 if (rx_ring) { 365 if (rx_ring) {
367 for (i = 0; i < adapter->num_rx_queues; i++) { 366 for (i = 0; i < adapter->num_rx_queues; i++) {
368 ixgbevf_free_rx_resources(adapter, 367 ixgbevf_free_rx_resources(adapter, adapter->rx_ring[i]);
369 &adapter->rx_ring[i]); 368 *adapter->rx_ring[i] = rx_ring[i];
370 adapter->rx_ring[i] = rx_ring[i];
371 } 369 }
372 adapter->rx_ring_count = new_rx_count; 370 adapter->rx_ring_count = new_rx_count;
373 371
@@ -413,15 +411,15 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
413 tx_yields = 0, tx_cleaned = 0, tx_missed = 0; 411 tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
414 412
415 for (i = 0; i < adapter->num_rx_queues; i++) { 413 for (i = 0; i < adapter->num_rx_queues; i++) {
416 rx_yields += adapter->rx_ring[i].bp_yields; 414 rx_yields += adapter->rx_ring[i]->bp_yields;
417 rx_cleaned += adapter->rx_ring[i].bp_cleaned; 415 rx_cleaned += adapter->rx_ring[i]->bp_cleaned;
418 rx_yields += adapter->rx_ring[i].bp_yields; 416 rx_yields += adapter->rx_ring[i]->bp_yields;
419 } 417 }
420 418
421 for (i = 0; i < adapter->num_tx_queues; i++) { 419 for (i = 0; i < adapter->num_tx_queues; i++) {
422 tx_yields += adapter->tx_ring[i].bp_yields; 420 tx_yields += adapter->tx_ring[i]->bp_yields;
423 tx_cleaned += adapter->tx_ring[i].bp_cleaned; 421 tx_cleaned += adapter->tx_ring[i]->bp_cleaned;
424 tx_yields += adapter->tx_ring[i].bp_yields; 422 tx_yields += adapter->tx_ring[i]->bp_yields;
425 } 423 }
426 424
427 adapter->bp_rx_yields = rx_yields; 425 adapter->bp_rx_yields = rx_yields;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index bb76e96f8278..0547e40980cb 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -260,6 +260,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
260 spin_lock_bh(&q_vector->lock); 260 spin_lock_bh(&q_vector->lock);
261 if (q_vector->state & IXGBEVF_QV_OWNED) 261 if (q_vector->state & IXGBEVF_QV_OWNED)
262 rc = false; 262 rc = false;
263 q_vector->state |= IXGBEVF_QV_STATE_DISABLED;
263 spin_unlock_bh(&q_vector->lock); 264 spin_unlock_bh(&q_vector->lock);
264 return rc; 265 return rc;
265} 266}
@@ -326,7 +327,7 @@ struct ixgbevf_adapter {
326 u32 eims_other; 327 u32 eims_other;
327 328
328 /* TX */ 329 /* TX */
329 struct ixgbevf_ring *tx_ring; /* One per active queue */ 330 struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
330 int num_tx_queues; 331 int num_tx_queues;
331 u64 restart_queue; 332 u64 restart_queue;
332 u64 hw_csum_tx_good; 333 u64 hw_csum_tx_good;
@@ -336,7 +337,7 @@ struct ixgbevf_adapter {
336 u32 tx_timeout_count; 337 u32 tx_timeout_count;
337 338
338 /* RX */ 339 /* RX */
339 struct ixgbevf_ring *rx_ring; /* One per active queue */ 340 struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
340 int num_rx_queues; 341 int num_rx_queues;
341 u64 hw_csum_rx_error; 342 u64 hw_csum_rx_error;
342 u64 hw_rx_no_dma_resources; 343 u64 hw_rx_no_dma_resources;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index a5d31674ff42..6cf41207a31d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -848,8 +848,8 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
848{ 848{
849 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 849 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
850 850
851 a->rx_ring[r_idx].next = q_vector->rx.ring; 851 a->rx_ring[r_idx]->next = q_vector->rx.ring;
852 q_vector->rx.ring = &a->rx_ring[r_idx]; 852 q_vector->rx.ring = a->rx_ring[r_idx];
853 q_vector->rx.count++; 853 q_vector->rx.count++;
854} 854}
855 855
@@ -858,8 +858,8 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
858{ 858{
859 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 859 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
860 860
861 a->tx_ring[t_idx].next = q_vector->tx.ring; 861 a->tx_ring[t_idx]->next = q_vector->tx.ring;
862 q_vector->tx.ring = &a->tx_ring[t_idx]; 862 q_vector->tx.ring = a->tx_ring[t_idx];
863 q_vector->tx.count++; 863 q_vector->tx.count++;
864} 864}
865 865
@@ -1087,6 +1087,70 @@ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1087} 1087}
1088 1088
1089/** 1089/**
1090 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1091 * @adapter: board private structure
1092 * @ring: structure containing ring specific data
1093 *
1094 * Configure the Tx descriptor ring after a reset.
1095 **/
1096static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1097 struct ixgbevf_ring *ring)
1098{
1099 struct ixgbe_hw *hw = &adapter->hw;
1100 u64 tdba = ring->dma;
1101 int wait_loop = 10;
1102 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1103 u8 reg_idx = ring->reg_idx;
1104
1105 /* disable queue to avoid issues while updating state */
1106 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1107 IXGBE_WRITE_FLUSH(hw);
1108
1109 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1110 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1111 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1112 ring->count * sizeof(union ixgbe_adv_tx_desc));
1113
1114 /* disable head writeback */
1115 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1116 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1117
1118 /* enable relaxed ordering */
1119 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1120 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1121 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1122
1123 /* reset head and tail pointers */
1124 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1125 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1126 ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx);
1127
1128 /* reset ntu and ntc to place SW in sync with hardwdare */
1129 ring->next_to_clean = 0;
1130 ring->next_to_use = 0;
1131
1132 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1133 * to or less than the number of on chip descriptors, which is
1134 * currently 40.
1135 */
1136 txdctl |= (8 << 16); /* WTHRESH = 8 */
1137
1138 /* Setting PTHRESH to 32 both improves performance */
1139 txdctl |= (1 << 8) | /* HTHRESH = 1 */
1140 32; /* PTHRESH = 32 */
1141
1142 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1143
1144 /* poll to verify queue is enabled */
1145 do {
1146 usleep_range(1000, 2000);
1147 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1148 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1149 if (!wait_loop)
1150 pr_err("Could not enable Tx Queue %d\n", reg_idx);
1151}
1152
1153/**
1090 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset 1154 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1091 * @adapter: board private structure 1155 * @adapter: board private structure
1092 * 1156 *
@@ -1094,32 +1158,11 @@ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1094 **/ 1158 **/
1095static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) 1159static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1096{ 1160{
1097 u64 tdba; 1161 u32 i;
1098 struct ixgbe_hw *hw = &adapter->hw;
1099 u32 i, j, tdlen, txctrl;
1100 1162
1101 /* Setup the HW Tx Head and Tail descriptor pointers */ 1163 /* Setup the HW Tx Head and Tail descriptor pointers */
1102 for (i = 0; i < adapter->num_tx_queues; i++) { 1164 for (i = 0; i < adapter->num_tx_queues; i++)
1103 struct ixgbevf_ring *ring = &adapter->tx_ring[i]; 1165 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1104 j = ring->reg_idx;
1105 tdba = ring->dma;
1106 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1107 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1108 (tdba & DMA_BIT_MASK(32)));
1109 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1110 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1111 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1112 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1113 ring->tail = hw->hw_addr + IXGBE_VFTDT(j);
1114 ring->next_to_clean = 0;
1115 ring->next_to_use = 0;
1116 /* Disable Tx Head Writeback RO bit, since this hoses
1117 * bookkeeping if things aren't delivered in order.
1118 */
1119 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1120 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1121 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1122 }
1123} 1166}
1124 1167
1125#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1168#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
@@ -1130,7 +1173,7 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1130 struct ixgbe_hw *hw = &adapter->hw; 1173 struct ixgbe_hw *hw = &adapter->hw;
1131 u32 srrctl; 1174 u32 srrctl;
1132 1175
1133 rx_ring = &adapter->rx_ring[index]; 1176 rx_ring = adapter->rx_ring[index];
1134 1177
1135 srrctl = IXGBE_SRRCTL_DROP_EN; 1178 srrctl = IXGBE_SRRCTL_DROP_EN;
1136 1179
@@ -1188,7 +1231,93 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1188 rx_buf_len = IXGBEVF_RXBUFFER_10K; 1231 rx_buf_len = IXGBEVF_RXBUFFER_10K;
1189 1232
1190 for (i = 0; i < adapter->num_rx_queues; i++) 1233 for (i = 0; i < adapter->num_rx_queues; i++)
1191 adapter->rx_ring[i].rx_buf_len = rx_buf_len; 1234 adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
1235}
1236
1237#define IXGBEVF_MAX_RX_DESC_POLL 10
1238static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1239 struct ixgbevf_ring *ring)
1240{
1241 struct ixgbe_hw *hw = &adapter->hw;
1242 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1243 u32 rxdctl;
1244 u8 reg_idx = ring->reg_idx;
1245
1246 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1247 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1248
1249 /* write value back with RXDCTL.ENABLE bit cleared */
1250 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1251
1252 /* the hardware may take up to 100us to really disable the rx queue */
1253 do {
1254 udelay(10);
1255 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1256 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1257
1258 if (!wait_loop)
1259 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1260 reg_idx);
1261}
1262
1263static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1264 struct ixgbevf_ring *ring)
1265{
1266 struct ixgbe_hw *hw = &adapter->hw;
1267 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1268 u32 rxdctl;
1269 u8 reg_idx = ring->reg_idx;
1270
1271 do {
1272 usleep_range(1000, 2000);
1273 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1274 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1275
1276 if (!wait_loop)
1277 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1278 reg_idx);
1279}
1280
1281static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1282 struct ixgbevf_ring *ring)
1283{
1284 struct ixgbe_hw *hw = &adapter->hw;
1285 u64 rdba = ring->dma;
1286 u32 rxdctl;
1287 u8 reg_idx = ring->reg_idx;
1288
1289 /* disable queue to avoid issues while updating state */
1290 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1291 ixgbevf_disable_rx_queue(adapter, ring);
1292
1293 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1294 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1295 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1296 ring->count * sizeof(union ixgbe_adv_rx_desc));
1297
1298 /* enable relaxed ordering */
1299 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1300 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1301
1302 /* reset head and tail pointers */
1303 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1304 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1305 ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx);
1306
1307 /* reset ntu and ntc to place SW in sync with hardwdare */
1308 ring->next_to_clean = 0;
1309 ring->next_to_use = 0;
1310
1311 ixgbevf_configure_srrctl(adapter, reg_idx);
1312
1313 /* prevent DMA from exceeding buffer space available */
1314 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1315 rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN;
1316 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1317 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1318
1319 ixgbevf_rx_desc_queue_enable(adapter, ring);
1320 ixgbevf_alloc_rx_buffers(adapter, ring, ixgbevf_desc_unused(ring));
1192} 1321}
1193 1322
1194/** 1323/**
@@ -1199,10 +1328,7 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1199 **/ 1328 **/
1200static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) 1329static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1201{ 1330{
1202 u64 rdba; 1331 int i;
1203 struct ixgbe_hw *hw = &adapter->hw;
1204 int i, j;
1205 u32 rdlen;
1206 1332
1207 ixgbevf_setup_psrtype(adapter); 1333 ixgbevf_setup_psrtype(adapter);
1208 1334
@@ -1211,23 +1337,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1211 1337
1212 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1338 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1213 * the Base and Length of the Rx Descriptor Ring */ 1339 * the Base and Length of the Rx Descriptor Ring */
1214 for (i = 0; i < adapter->num_rx_queues; i++) { 1340 for (i = 0; i < adapter->num_rx_queues; i++)
1215 struct ixgbevf_ring *ring = &adapter->rx_ring[i]; 1341 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
1216 rdba = ring->dma;
1217 j = ring->reg_idx;
1218 rdlen = ring->count * sizeof(union ixgbe_adv_rx_desc);
1219 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1220 (rdba & DMA_BIT_MASK(32)));
1221 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1222 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1223 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1224 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1225 ring->tail = hw->hw_addr + IXGBE_VFRDT(j);
1226 ring->next_to_clean = 0;
1227 ring->next_to_use = 0;
1228
1229 ixgbevf_configure_srrctl(adapter, j);
1230 }
1231} 1342}
1232 1343
1233static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, 1344static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
@@ -1389,7 +1500,7 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1389 1500
1390 if (num_tcs > 1) { 1501 if (num_tcs > 1) {
1391 /* update default Tx ring register index */ 1502 /* update default Tx ring register index */
1392 adapter->tx_ring[0].reg_idx = def_q; 1503 adapter->tx_ring[0]->reg_idx = def_q;
1393 1504
1394 /* we need as many queues as traffic classes */ 1505 /* we need as many queues as traffic classes */
1395 num_rx_queues = num_tcs; 1506 num_rx_queues = num_tcs;
@@ -1409,69 +1520,14 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1409 1520
1410static void ixgbevf_configure(struct ixgbevf_adapter *adapter) 1521static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1411{ 1522{
1412 struct net_device *netdev = adapter->netdev;
1413 int i;
1414
1415 ixgbevf_configure_dcb(adapter); 1523 ixgbevf_configure_dcb(adapter);
1416 1524
1417 ixgbevf_set_rx_mode(netdev); 1525 ixgbevf_set_rx_mode(adapter->netdev);
1418 1526
1419 ixgbevf_restore_vlan(adapter); 1527 ixgbevf_restore_vlan(adapter);
1420 1528
1421 ixgbevf_configure_tx(adapter); 1529 ixgbevf_configure_tx(adapter);
1422 ixgbevf_configure_rx(adapter); 1530 ixgbevf_configure_rx(adapter);
1423 for (i = 0; i < adapter->num_rx_queues; i++) {
1424 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1425 ixgbevf_alloc_rx_buffers(adapter, ring,
1426 ixgbevf_desc_unused(ring));
1427 }
1428}
1429
1430#define IXGBEVF_MAX_RX_DESC_POLL 10
1431static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1432 int rxr)
1433{
1434 struct ixgbe_hw *hw = &adapter->hw;
1435 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1436 u32 rxdctl;
1437 int j = adapter->rx_ring[rxr].reg_idx;
1438
1439 do {
1440 usleep_range(1000, 2000);
1441 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1442 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1443
1444 if (!wait_loop)
1445 hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
1446 rxr);
1447
1448 ixgbevf_release_rx_desc(&adapter->rx_ring[rxr],
1449 (adapter->rx_ring[rxr].count - 1));
1450}
1451
1452static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1453 struct ixgbevf_ring *ring)
1454{
1455 struct ixgbe_hw *hw = &adapter->hw;
1456 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1457 u32 rxdctl;
1458 u8 reg_idx = ring->reg_idx;
1459
1460 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1461 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1462
1463 /* write value back with RXDCTL.ENABLE bit cleared */
1464 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1465
1466 /* the hardware may take up to 100us to really disable the rx queue */
1467 do {
1468 udelay(10);
1469 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1470 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1471
1472 if (!wait_loop)
1473 hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
1474 reg_idx);
1475} 1531}
1476 1532
1477static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) 1533static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1536,37 +1592,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1536{ 1592{
1537 struct net_device *netdev = adapter->netdev; 1593 struct net_device *netdev = adapter->netdev;
1538 struct ixgbe_hw *hw = &adapter->hw; 1594 struct ixgbe_hw *hw = &adapter->hw;
1539 int i, j = 0;
1540 int num_rx_rings = adapter->num_rx_queues;
1541 u32 txdctl, rxdctl;
1542
1543 for (i = 0; i < adapter->num_tx_queues; i++) {
1544 j = adapter->tx_ring[i].reg_idx;
1545 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1546 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1547 txdctl |= (8 << 16);
1548 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1549 }
1550
1551 for (i = 0; i < adapter->num_tx_queues; i++) {
1552 j = adapter->tx_ring[i].reg_idx;
1553 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1554 txdctl |= IXGBE_TXDCTL_ENABLE;
1555 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1556 }
1557
1558 for (i = 0; i < num_rx_rings; i++) {
1559 j = adapter->rx_ring[i].reg_idx;
1560 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1561 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1562 if (hw->mac.type == ixgbe_mac_X540_vf) {
1563 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1564 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1565 IXGBE_RXDCTL_RLPML_EN);
1566 }
1567 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1568 ixgbevf_rx_desc_queue_enable(adapter, i);
1569 }
1570 1595
1571 ixgbevf_configure_msix(adapter); 1596 ixgbevf_configure_msix(adapter);
1572 1597
@@ -1686,7 +1711,7 @@ static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1686 int i; 1711 int i;
1687 1712
1688 for (i = 0; i < adapter->num_rx_queues; i++) 1713 for (i = 0; i < adapter->num_rx_queues; i++)
1689 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]); 1714 ixgbevf_clean_rx_ring(adapter, adapter->rx_ring[i]);
1690} 1715}
1691 1716
1692/** 1717/**
@@ -1698,22 +1723,21 @@ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1698 int i; 1723 int i;
1699 1724
1700 for (i = 0; i < adapter->num_tx_queues; i++) 1725 for (i = 0; i < adapter->num_tx_queues; i++)
1701 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]); 1726 ixgbevf_clean_tx_ring(adapter, adapter->tx_ring[i]);
1702} 1727}
1703 1728
1704void ixgbevf_down(struct ixgbevf_adapter *adapter) 1729void ixgbevf_down(struct ixgbevf_adapter *adapter)
1705{ 1730{
1706 struct net_device *netdev = adapter->netdev; 1731 struct net_device *netdev = adapter->netdev;
1707 struct ixgbe_hw *hw = &adapter->hw; 1732 struct ixgbe_hw *hw = &adapter->hw;
1708 u32 txdctl; 1733 int i;
1709 int i, j;
1710 1734
1711 /* signal that we are down to the interrupt handler */ 1735 /* signal that we are down to the interrupt handler */
1712 set_bit(__IXGBEVF_DOWN, &adapter->state); 1736 set_bit(__IXGBEVF_DOWN, &adapter->state);
1713 1737
1714 /* disable all enabled rx queues */ 1738 /* disable all enabled rx queues */
1715 for (i = 0; i < adapter->num_rx_queues; i++) 1739 for (i = 0; i < adapter->num_rx_queues; i++)
1716 ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]); 1740 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
1717 1741
1718 netif_tx_disable(netdev); 1742 netif_tx_disable(netdev);
1719 1743
@@ -1734,10 +1758,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
1734 1758
1735 /* disable transmits in the hardware now that interrupts are off */ 1759 /* disable transmits in the hardware now that interrupts are off */
1736 for (i = 0; i < adapter->num_tx_queues; i++) { 1760 for (i = 0; i < adapter->num_tx_queues; i++) {
1737 j = adapter->tx_ring[i].reg_idx; 1761 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
1738 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1762
1739 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), 1763 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
1740 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 1764 IXGBE_TXDCTL_SWFLSH);
1741 } 1765 }
1742 1766
1743 netif_carrier_off(netdev); 1767 netif_carrier_off(netdev);
@@ -1875,40 +1899,50 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1875 **/ 1899 **/
1876static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) 1900static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1877{ 1901{
1878 int i; 1902 struct ixgbevf_ring *ring;
1903 int rx = 0, tx = 0;
1879 1904
1880 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1905 for (; tx < adapter->num_tx_queues; tx++) {
1881 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1906 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1882 if (!adapter->tx_ring) 1907 if (!ring)
1883 goto err_tx_ring_allocation; 1908 goto err_allocation;
1884 1909
1885 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1910 ring->dev = &adapter->pdev->dev;
1886 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1911 ring->netdev = adapter->netdev;
1887 if (!adapter->rx_ring) 1912 ring->count = adapter->tx_ring_count;
1888 goto err_rx_ring_allocation; 1913 ring->queue_index = tx;
1914 ring->reg_idx = tx;
1889 1915
1890 for (i = 0; i < adapter->num_tx_queues; i++) { 1916 adapter->tx_ring[tx] = ring;
1891 adapter->tx_ring[i].count = adapter->tx_ring_count;
1892 adapter->tx_ring[i].queue_index = i;
1893 /* reg_idx may be remapped later by DCB config */
1894 adapter->tx_ring[i].reg_idx = i;
1895 adapter->tx_ring[i].dev = &adapter->pdev->dev;
1896 adapter->tx_ring[i].netdev = adapter->netdev;
1897 } 1917 }
1898 1918
1899 for (i = 0; i < adapter->num_rx_queues; i++) { 1919 for (; rx < adapter->num_rx_queues; rx++) {
1900 adapter->rx_ring[i].count = adapter->rx_ring_count; 1920 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1901 adapter->rx_ring[i].queue_index = i; 1921 if (!ring)
1902 adapter->rx_ring[i].reg_idx = i; 1922 goto err_allocation;
1903 adapter->rx_ring[i].dev = &adapter->pdev->dev; 1923
1904 adapter->rx_ring[i].netdev = adapter->netdev; 1924 ring->dev = &adapter->pdev->dev;
1925 ring->netdev = adapter->netdev;
1926
1927 ring->count = adapter->rx_ring_count;
1928 ring->queue_index = rx;
1929 ring->reg_idx = rx;
1930
1931 adapter->rx_ring[rx] = ring;
1905 } 1932 }
1906 1933
1907 return 0; 1934 return 0;
1908 1935
1909err_rx_ring_allocation: 1936err_allocation:
1910 kfree(adapter->tx_ring); 1937 while (tx) {
1911err_tx_ring_allocation: 1938 kfree(adapter->tx_ring[--tx]);
1939 adapter->tx_ring[tx] = NULL;
1940 }
1941
1942 while (rx) {
1943 kfree(adapter->rx_ring[--rx]);
1944 adapter->rx_ring[rx] = NULL;
1945 }
1912 return -ENOMEM; 1946 return -ENOMEM;
1913} 1947}
1914 1948
@@ -2099,6 +2133,17 @@ err_set_interrupt:
2099 **/ 2133 **/
2100static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) 2134static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2101{ 2135{
2136 int i;
2137
2138 for (i = 0; i < adapter->num_tx_queues; i++) {
2139 kfree(adapter->tx_ring[i]);
2140 adapter->tx_ring[i] = NULL;
2141 }
2142 for (i = 0; i < adapter->num_rx_queues; i++) {
2143 kfree(adapter->rx_ring[i]);
2144 adapter->rx_ring[i] = NULL;
2145 }
2146
2102 adapter->num_tx_queues = 0; 2147 adapter->num_tx_queues = 0;
2103 adapter->num_rx_queues = 0; 2148 adapter->num_rx_queues = 0;
2104 2149
@@ -2229,11 +2274,11 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2229 2274
2230 for (i = 0; i < adapter->num_rx_queues; i++) { 2275 for (i = 0; i < adapter->num_rx_queues; i++) {
2231 adapter->hw_csum_rx_error += 2276 adapter->hw_csum_rx_error +=
2232 adapter->rx_ring[i].hw_csum_rx_error; 2277 adapter->rx_ring[i]->hw_csum_rx_error;
2233 adapter->hw_csum_rx_good += 2278 adapter->hw_csum_rx_good +=
2234 adapter->rx_ring[i].hw_csum_rx_good; 2279 adapter->rx_ring[i]->hw_csum_rx_good;
2235 adapter->rx_ring[i].hw_csum_rx_error = 0; 2280 adapter->rx_ring[i]->hw_csum_rx_error = 0;
2236 adapter->rx_ring[i].hw_csum_rx_good = 0; 2281 adapter->rx_ring[i]->hw_csum_rx_good = 0;
2237 } 2282 }
2238} 2283}
2239 2284
@@ -2396,6 +2441,10 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2396 vfree(tx_ring->tx_buffer_info); 2441 vfree(tx_ring->tx_buffer_info);
2397 tx_ring->tx_buffer_info = NULL; 2442 tx_ring->tx_buffer_info = NULL;
2398 2443
2444 /* if not set, then don't free */
2445 if (!tx_ring->desc)
2446 return;
2447
2399 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 2448 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2400 tx_ring->dma); 2449 tx_ring->dma);
2401 2450
@@ -2413,10 +2462,8 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2413 int i; 2462 int i;
2414 2463
2415 for (i = 0; i < adapter->num_tx_queues; i++) 2464 for (i = 0; i < adapter->num_tx_queues; i++)
2416 if (adapter->tx_ring[i].desc) 2465 if (adapter->tx_ring[i]->desc)
2417 ixgbevf_free_tx_resources(adapter, 2466 ixgbevf_free_tx_resources(adapter, adapter->tx_ring[i]);
2418 &adapter->tx_ring[i]);
2419
2420} 2467}
2421 2468
2422/** 2469/**
@@ -2471,7 +2518,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2471 int i, err = 0; 2518 int i, err = 0;
2472 2519
2473 for (i = 0; i < adapter->num_tx_queues; i++) { 2520 for (i = 0; i < adapter->num_tx_queues; i++) {
2474 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]); 2521 err = ixgbevf_setup_tx_resources(adapter, adapter->tx_ring[i]);
2475 if (!err) 2522 if (!err)
2476 continue; 2523 continue;
2477 hw_dbg(&adapter->hw, 2524 hw_dbg(&adapter->hw,
@@ -2533,7 +2580,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2533 int i, err = 0; 2580 int i, err = 0;
2534 2581
2535 for (i = 0; i < adapter->num_rx_queues; i++) { 2582 for (i = 0; i < adapter->num_rx_queues; i++) {
2536 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2583 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]);
2537 if (!err) 2584 if (!err)
2538 continue; 2585 continue;
2539 hw_dbg(&adapter->hw, 2586 hw_dbg(&adapter->hw,
@@ -2577,9 +2624,8 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2577 int i; 2624 int i;
2578 2625
2579 for (i = 0; i < adapter->num_rx_queues; i++) 2626 for (i = 0; i < adapter->num_rx_queues; i++)
2580 if (adapter->rx_ring[i].desc) 2627 if (adapter->rx_ring[i]->desc)
2581 ixgbevf_free_rx_resources(adapter, 2628 ixgbevf_free_rx_resources(adapter, adapter->rx_ring[i]);
2582 &adapter->rx_ring[i]);
2583} 2629}
2584 2630
2585/** 2631/**
@@ -3069,7 +3115,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3069 return NETDEV_TX_OK; 3115 return NETDEV_TX_OK;
3070 } 3116 }
3071 3117
3072 tx_ring = &adapter->tx_ring[r_idx]; 3118 tx_ring = adapter->tx_ring[r_idx];
3073 3119
3074 /* 3120 /*
3075 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, 3121 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
@@ -3222,8 +3268,8 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3222#ifdef CONFIG_PM 3268#ifdef CONFIG_PM
3223static int ixgbevf_resume(struct pci_dev *pdev) 3269static int ixgbevf_resume(struct pci_dev *pdev)
3224{ 3270{
3225 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev); 3271 struct net_device *netdev = pci_get_drvdata(pdev);
3226 struct net_device *netdev = adapter->netdev; 3272 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3227 u32 err; 3273 u32 err;
3228 3274
3229 pci_set_power_state(pdev, PCI_D0); 3275 pci_set_power_state(pdev, PCI_D0);
@@ -3282,7 +3328,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3282 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; 3328 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3283 3329
3284 for (i = 0; i < adapter->num_rx_queues; i++) { 3330 for (i = 0; i < adapter->num_rx_queues; i++) {
3285 ring = &adapter->rx_ring[i]; 3331 ring = adapter->rx_ring[i];
3286 do { 3332 do {
3287 start = u64_stats_fetch_begin_bh(&ring->syncp); 3333 start = u64_stats_fetch_begin_bh(&ring->syncp);
3288 bytes = ring->total_bytes; 3334 bytes = ring->total_bytes;
@@ -3293,7 +3339,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3293 } 3339 }
3294 3340
3295 for (i = 0; i < adapter->num_tx_queues; i++) { 3341 for (i = 0; i < adapter->num_tx_queues; i++) {
3296 ring = &adapter->tx_ring[i]; 3342 ring = adapter->tx_ring[i];
3297 do { 3343 do {
3298 start = u64_stats_fetch_begin_bh(&ring->syncp); 3344 start = u64_stats_fetch_begin_bh(&ring->syncp);
3299 bytes = ring->total_bytes; 3345 bytes = ring->total_bytes;
@@ -3528,9 +3574,6 @@ static void ixgbevf_remove(struct pci_dev *pdev)
3528 3574
3529 hw_dbg(&adapter->hw, "Remove complete\n"); 3575 hw_dbg(&adapter->hw, "Remove complete\n");
3530 3576
3531 kfree(adapter->tx_ring);
3532 kfree(adapter->rx_ring);
3533
3534 free_netdev(netdev); 3577 free_netdev(netdev);
3535 3578
3536 pci_disable_device(pdev); 3579 pci_disable_device(pdev);