aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-01-17 22:16:02 -0500
committerDavid S. Miller <davem@davemloft.net>2014-01-17 22:16:02 -0500
commit7b1e46c5a265b142dd05ff3463fa3e0a1f4e4172 (patch)
tree5c141ddec63c0ad7bef6656d92c1bb68a83e3f74
parent144651d1f3ea374781fe3aa016dd05d7c4bd5ac6 (diff)
parent29d37fa162af3ba70229326f02831e24dcba64eb (diff)
Merge branch 'ixgbevf'
Aaron Brown says: ==================== Intel Wired LAN Driver Updates This series contains updates from Emil to ixgbevf. He cleans up the code by removing the adapter structure as a parameter from multiple functions in favor of using the ixgbevf_ring structure and moves hot-path specific statistic int the ring structure for anticipated performance gains. He also removes the Tx/Rx counters for checksum offload and adds counters for tx_restart_queue and tx_timeout_count. Next he makes it so that the first tx_buffer structure acts as a central storage location for most the skb info we are about to transmit, then takes advantage of the dma buffer always being present in the first descriptor and mapped as single allowing a call to dma_unmap_single which alleviates the need to check for DMA mapping in ixgbevf_clean_tx_irq(). Finally he merges the ixgbevf_tx_map call and the ixgbevf_tx_queue call into a single function. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c62
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h90
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c620
4 files changed, 403 insertions, 370 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 5426b2dee6a6..05e4f32d84f7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -183,6 +183,7 @@ typedef u32 ixgbe_link_speed;
183#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ 183#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
184#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ 184#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
185#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ 185#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
186#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS)
186 187
187/* Transmit Descriptor - Advanced */ 188/* Transmit Descriptor - Advanced */
188union ixgbe_adv_tx_desc { 189union ixgbe_adv_tx_desc {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 515ba4e29760..f68b78c732a8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -77,11 +77,11 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
77 {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc, 77 {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
78 stats.saved_reset_vfgotc)}, 78 stats.saved_reset_vfgotc)},
79 {"tx_busy", IXGBEVF_ZSTAT(tx_busy)}, 79 {"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
80 {"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)},
81 {"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)},
80 {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc, 82 {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
81 stats.saved_reset_vfmprc)}, 83 stats.saved_reset_vfmprc)},
82 {"rx_csum_offload_good", IXGBEVF_ZSTAT(hw_csum_rx_good)},
83 {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)}, 84 {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
84 {"tx_csum_offload_ctxt", IXGBEVF_ZSTAT(hw_csum_tx_good)},
85#ifdef BP_EXTENDED_STATS 85#ifdef BP_EXTENDED_STATS
86 {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)}, 86 {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
87 {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)}, 87 {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
@@ -305,18 +305,18 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
305 /* clone ring and setup updated count */ 305 /* clone ring and setup updated count */
306 tx_ring[i] = *adapter->tx_ring[i]; 306 tx_ring[i] = *adapter->tx_ring[i];
307 tx_ring[i].count = new_tx_count; 307 tx_ring[i].count = new_tx_count;
308 err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]); 308 err = ixgbevf_setup_tx_resources(&tx_ring[i]);
309 if (!err) 309 if (err) {
310 continue; 310 while (i) {
311 while (i) { 311 i--;
312 i--; 312 ixgbevf_free_tx_resources(&tx_ring[i]);
313 ixgbevf_free_tx_resources(adapter, &tx_ring[i]); 313 }
314 }
315 314
316 vfree(tx_ring); 315 vfree(tx_ring);
317 tx_ring = NULL; 316 tx_ring = NULL;
318 317
319 goto clear_reset; 318 goto clear_reset;
319 }
320 } 320 }
321 } 321 }
322 322
@@ -331,18 +331,18 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
331 /* clone ring and setup updated count */ 331 /* clone ring and setup updated count */
332 rx_ring[i] = *adapter->rx_ring[i]; 332 rx_ring[i] = *adapter->rx_ring[i];
333 rx_ring[i].count = new_rx_count; 333 rx_ring[i].count = new_rx_count;
334 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); 334 err = ixgbevf_setup_rx_resources(&rx_ring[i]);
335 if (!err) 335 if (err) {
336 continue; 336 while (i) {
337 while (i) { 337 i--;
338 i--; 338 ixgbevf_free_rx_resources(&rx_ring[i]);
339 ixgbevf_free_rx_resources(adapter, &rx_ring[i]); 339 }
340 }
341 340
342 vfree(rx_ring); 341 vfree(rx_ring);
343 rx_ring = NULL; 342 rx_ring = NULL;
344 343
345 goto clear_reset; 344 goto clear_reset;
345 }
346 } 346 }
347 } 347 }
348 348
@@ -352,7 +352,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
352 /* Tx */ 352 /* Tx */
353 if (tx_ring) { 353 if (tx_ring) {
354 for (i = 0; i < adapter->num_tx_queues; i++) { 354 for (i = 0; i < adapter->num_tx_queues; i++) {
355 ixgbevf_free_tx_resources(adapter, adapter->tx_ring[i]); 355 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
356 *adapter->tx_ring[i] = tx_ring[i]; 356 *adapter->tx_ring[i] = tx_ring[i];
357 } 357 }
358 adapter->tx_ring_count = new_tx_count; 358 adapter->tx_ring_count = new_tx_count;
@@ -364,7 +364,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
364 /* Rx */ 364 /* Rx */
365 if (rx_ring) { 365 if (rx_ring) {
366 for (i = 0; i < adapter->num_rx_queues; i++) { 366 for (i = 0; i < adapter->num_rx_queues; i++) {
367 ixgbevf_free_rx_resources(adapter, adapter->rx_ring[i]); 367 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
368 *adapter->rx_ring[i] = rx_ring[i]; 368 *adapter->rx_ring[i] = rx_ring[i];
369 } 369 }
370 adapter->rx_ring_count = new_rx_count; 370 adapter->rx_ring_count = new_rx_count;
@@ -380,7 +380,7 @@ clear_reset:
380 /* free Tx resources if Rx error is encountered */ 380 /* free Tx resources if Rx error is encountered */
381 if (tx_ring) { 381 if (tx_ring) {
382 for (i = 0; i < adapter->num_tx_queues; i++) 382 for (i = 0; i < adapter->num_tx_queues; i++)
383 ixgbevf_free_tx_resources(adapter, &tx_ring[i]); 383 ixgbevf_free_tx_resources(&tx_ring[i]);
384 vfree(tx_ring); 384 vfree(tx_ring);
385 } 385 }
386 386
@@ -411,15 +411,15 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
411 tx_yields = 0, tx_cleaned = 0, tx_missed = 0; 411 tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
412 412
413 for (i = 0; i < adapter->num_rx_queues; i++) { 413 for (i = 0; i < adapter->num_rx_queues; i++) {
414 rx_yields += adapter->rx_ring[i]->bp_yields; 414 rx_yields += adapter->rx_ring[i]->stats.yields;
415 rx_cleaned += adapter->rx_ring[i]->bp_cleaned; 415 rx_cleaned += adapter->rx_ring[i]->stats.cleaned;
416 rx_yields += adapter->rx_ring[i]->bp_yields; 416 rx_yields += adapter->rx_ring[i]->stats.yields;
417 } 417 }
418 418
419 for (i = 0; i < adapter->num_tx_queues; i++) { 419 for (i = 0; i < adapter->num_tx_queues; i++) {
420 tx_yields += adapter->tx_ring[i]->bp_yields; 420 tx_yields += adapter->tx_ring[i]->stats.yields;
421 tx_cleaned += adapter->tx_ring[i]->bp_cleaned; 421 tx_cleaned += adapter->tx_ring[i]->stats.cleaned;
422 tx_yields += adapter->tx_ring[i]->bp_yields; 422 tx_yields += adapter->tx_ring[i]->stats.yields;
423 } 423 }
424 424
425 adapter->bp_rx_yields = rx_yields; 425 adapter->bp_rx_yields = rx_yields;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 0547e40980cb..54829326bb09 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -46,12 +46,15 @@
46/* wrapper around a pointer to a socket buffer, 46/* wrapper around a pointer to a socket buffer,
47 * so a DMA handle can be stored along with the buffer */ 47 * so a DMA handle can be stored along with the buffer */
48struct ixgbevf_tx_buffer { 48struct ixgbevf_tx_buffer {
49 struct sk_buff *skb;
50 dma_addr_t dma;
51 unsigned long time_stamp;
52 union ixgbe_adv_tx_desc *next_to_watch; 49 union ixgbe_adv_tx_desc *next_to_watch;
53 u16 length; 50 unsigned long time_stamp;
54 u16 mapped_as_page; 51 struct sk_buff *skb;
52 unsigned int bytecount;
53 unsigned short gso_segs;
54 __be16 protocol;
55 DEFINE_DMA_UNMAP_ADDR(dma);
56 DEFINE_DMA_UNMAP_LEN(len);
57 u32 tx_flags;
55}; 58};
56 59
57struct ixgbevf_rx_buffer { 60struct ixgbevf_rx_buffer {
@@ -59,6 +62,29 @@ struct ixgbevf_rx_buffer {
59 dma_addr_t dma; 62 dma_addr_t dma;
60}; 63};
61 64
65struct ixgbevf_stats {
66 u64 packets;
67 u64 bytes;
68#ifdef BP_EXTENDED_STATS
69 u64 yields;
70 u64 misses;
71 u64 cleaned;
72#endif
73};
74
75struct ixgbevf_tx_queue_stats {
76 u64 restart_queue;
77 u64 tx_busy;
78 u64 tx_done_old;
79};
80
81struct ixgbevf_rx_queue_stats {
82 u64 non_eop_descs;
83 u64 alloc_rx_page_failed;
84 u64 alloc_rx_buff_failed;
85 u64 csum_err;
86};
87
62struct ixgbevf_ring { 88struct ixgbevf_ring {
63 struct ixgbevf_ring *next; 89 struct ixgbevf_ring *next;
64 struct net_device *netdev; 90 struct net_device *netdev;
@@ -70,22 +96,19 @@ struct ixgbevf_ring {
70 unsigned int next_to_use; 96 unsigned int next_to_use;
71 unsigned int next_to_clean; 97 unsigned int next_to_clean;
72 98
73 int queue_index; /* needed for multiqueue queue management */
74 union { 99 union {
75 struct ixgbevf_tx_buffer *tx_buffer_info; 100 struct ixgbevf_tx_buffer *tx_buffer_info;
76 struct ixgbevf_rx_buffer *rx_buffer_info; 101 struct ixgbevf_rx_buffer *rx_buffer_info;
77 }; 102 };
78 103
79 u64 total_bytes; 104 struct ixgbevf_stats stats;
80 u64 total_packets; 105 struct u64_stats_sync syncp;
81 struct u64_stats_sync syncp; 106 union {
107 struct ixgbevf_tx_queue_stats tx_stats;
108 struct ixgbevf_rx_queue_stats rx_stats;
109 };
110
82 u64 hw_csum_rx_error; 111 u64 hw_csum_rx_error;
83 u64 hw_csum_rx_good;
84#ifdef BP_EXTENDED_STATS
85 u64 bp_yields;
86 u64 bp_misses;
87 u64 bp_cleaned;
88#endif
89 u8 __iomem *tail; 112 u8 __iomem *tail;
90 113
91 u16 reg_idx; /* holds the special value that gets the hardware register 114 u16 reg_idx; /* holds the special value that gets the hardware register
@@ -93,6 +116,7 @@ struct ixgbevf_ring {
93 * for DCB and RSS modes */ 116 * for DCB and RSS modes */
94 117
95 u16 rx_buf_len; 118 u16 rx_buf_len;
119 int queue_index; /* needed for multiqueue queue management */
96}; 120};
97 121
98/* How many Rx Buffers do we bundle into one write to the hardware ? */ 122/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -123,8 +147,6 @@ struct ixgbevf_ring {
123#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) 147#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
124#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) 148#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
125#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) 149#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
126#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4)
127#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5)
128#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 150#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
129#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 151#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
130#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 152#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
@@ -186,7 +208,7 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
186 q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD; 208 q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
187 rc = false; 209 rc = false;
188#ifdef BP_EXTENDED_STATS 210#ifdef BP_EXTENDED_STATS
189 q_vector->tx.ring->bp_yields++; 211 q_vector->tx.ring->stats.yields++;
190#endif 212#endif
191 } else { 213 } else {
192 /* we don't care if someone yielded */ 214 /* we don't care if someone yielded */
@@ -221,7 +243,7 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
221 q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; 243 q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
222 rc = false; 244 rc = false;
223#ifdef BP_EXTENDED_STATS 245#ifdef BP_EXTENDED_STATS
224 q_vector->rx.ring->bp_yields++; 246 q_vector->rx.ring->stats.yields++;
225#endif 247#endif
226 } else { 248 } else {
227 /* preserve yield marks */ 249 /* preserve yield marks */
@@ -314,7 +336,6 @@ static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
314struct ixgbevf_adapter { 336struct ixgbevf_adapter {
315 struct timer_list watchdog_timer; 337 struct timer_list watchdog_timer;
316 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 338 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
317 u16 bd_number;
318 struct work_struct reset_task; 339 struct work_struct reset_task;
319 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 340 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
320 341
@@ -327,25 +348,18 @@ struct ixgbevf_adapter {
327 u32 eims_other; 348 u32 eims_other;
328 349
329 /* TX */ 350 /* TX */
330 struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
331 int num_tx_queues; 351 int num_tx_queues;
352 struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
332 u64 restart_queue; 353 u64 restart_queue;
333 u64 hw_csum_tx_good;
334 u64 lsc_int;
335 u64 hw_tso_ctxt;
336 u64 hw_tso6_ctxt;
337 u32 tx_timeout_count; 354 u32 tx_timeout_count;
338 355
339 /* RX */ 356 /* RX */
340 struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
341 int num_rx_queues; 357 int num_rx_queues;
358 struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
342 u64 hw_csum_rx_error; 359 u64 hw_csum_rx_error;
343 u64 hw_rx_no_dma_resources; 360 u64 hw_rx_no_dma_resources;
344 u64 hw_csum_rx_good;
345 u64 non_eop_descs; 361 u64 non_eop_descs;
346 int num_msix_vectors; 362 int num_msix_vectors;
347 struct msix_entry *msix_entries;
348
349 u32 alloc_rx_page_failed; 363 u32 alloc_rx_page_failed;
350 u32 alloc_rx_buff_failed; 364 u32 alloc_rx_buff_failed;
351 365
@@ -357,6 +371,8 @@ struct ixgbevf_adapter {
357#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1) 371#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1)
358#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2) 372#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
359 373
374 struct msix_entry *msix_entries;
375
360 /* OS defined structs */ 376 /* OS defined structs */
361 struct net_device *netdev; 377 struct net_device *netdev;
362 struct pci_dev *pdev; 378 struct pci_dev *pdev;
@@ -364,10 +380,12 @@ struct ixgbevf_adapter {
364 /* structs defined in ixgbe_vf.h */ 380 /* structs defined in ixgbe_vf.h */
365 struct ixgbe_hw hw; 381 struct ixgbe_hw hw;
366 u16 msg_enable; 382 u16 msg_enable;
367 struct ixgbevf_hw_stats stats; 383 u16 bd_number;
368 /* Interrupt Throttle Rate */ 384 /* Interrupt Throttle Rate */
369 u32 eitr_param; 385 u32 eitr_param;
370 386
387 struct ixgbevf_hw_stats stats;
388
371 unsigned long state; 389 unsigned long state;
372 u64 tx_busy; 390 u64 tx_busy;
373 unsigned int tx_ring_count; 391 unsigned int tx_ring_count;
@@ -386,9 +404,9 @@ struct ixgbevf_adapter {
386 u32 link_speed; 404 u32 link_speed;
387 bool link_up; 405 bool link_up;
388 406
389 struct work_struct watchdog_task;
390
391 spinlock_t mbx_lock; 407 spinlock_t mbx_lock;
408
409 struct work_struct watchdog_task;
392}; 410};
393 411
394enum ixbgevf_state_t { 412enum ixbgevf_state_t {
@@ -420,10 +438,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter);
420void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); 438void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
421void ixgbevf_reset(struct ixgbevf_adapter *adapter); 439void ixgbevf_reset(struct ixgbevf_adapter *adapter);
422void ixgbevf_set_ethtool_ops(struct net_device *netdev); 440void ixgbevf_set_ethtool_ops(struct net_device *netdev);
423int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); 441int ixgbevf_setup_rx_resources(struct ixgbevf_ring *);
424int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); 442int ixgbevf_setup_tx_resources(struct ixgbevf_ring *);
425void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); 443void ixgbevf_free_rx_resources(struct ixgbevf_ring *);
426void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); 444void ixgbevf_free_tx_resources(struct ixgbevf_ring *);
427void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); 445void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
428int ethtool_ioctl(struct ifreq *ifr); 446int ethtool_ioctl(struct ifreq *ifr);
429 447
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 9c9291803fdd..9df28985eba7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -145,28 +145,25 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
145} 145}
146 146
147static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, 147static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
148 struct ixgbevf_tx_buffer 148 struct ixgbevf_tx_buffer *tx_buffer)
149 *tx_buffer_info) 149{
150{ 150 if (tx_buffer->skb) {
151 if (tx_buffer_info->dma) { 151 dev_kfree_skb_any(tx_buffer->skb);
152 if (tx_buffer_info->mapped_as_page) 152 if (dma_unmap_len(tx_buffer, len))
153 dma_unmap_page(tx_ring->dev,
154 tx_buffer_info->dma,
155 tx_buffer_info->length,
156 DMA_TO_DEVICE);
157 else
158 dma_unmap_single(tx_ring->dev, 153 dma_unmap_single(tx_ring->dev,
159 tx_buffer_info->dma, 154 dma_unmap_addr(tx_buffer, dma),
160 tx_buffer_info->length, 155 dma_unmap_len(tx_buffer, len),
161 DMA_TO_DEVICE); 156 DMA_TO_DEVICE);
162 tx_buffer_info->dma = 0; 157 } else if (dma_unmap_len(tx_buffer, len)) {
158 dma_unmap_page(tx_ring->dev,
159 dma_unmap_addr(tx_buffer, dma),
160 dma_unmap_len(tx_buffer, len),
161 DMA_TO_DEVICE);
163 } 162 }
164 if (tx_buffer_info->skb) { 163 tx_buffer->next_to_watch = NULL;
165 dev_kfree_skb_any(tx_buffer_info->skb); 164 tx_buffer->skb = NULL;
166 tx_buffer_info->skb = NULL; 165 dma_unmap_len_set(tx_buffer, len, 0);
167 } 166 /* tx_buffer must be completely set up in the transmit path */
168 tx_buffer_info->time_stamp = 0;
169 /* tx_buffer_info must be completely set up in the transmit path */
170} 167}
171 168
172#define IXGBE_MAX_TXD_PWR 14 169#define IXGBE_MAX_TXD_PWR 14
@@ -187,20 +184,21 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
187 struct ixgbevf_ring *tx_ring) 184 struct ixgbevf_ring *tx_ring)
188{ 185{
189 struct ixgbevf_adapter *adapter = q_vector->adapter; 186 struct ixgbevf_adapter *adapter = q_vector->adapter;
190 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 187 struct ixgbevf_tx_buffer *tx_buffer;
191 struct ixgbevf_tx_buffer *tx_buffer_info; 188 union ixgbe_adv_tx_desc *tx_desc;
192 unsigned int i, count = 0;
193 unsigned int total_bytes = 0, total_packets = 0; 189 unsigned int total_bytes = 0, total_packets = 0;
190 unsigned int budget = tx_ring->count / 2;
191 unsigned int i = tx_ring->next_to_clean;
194 192
195 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 193 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
196 return true; 194 return true;
197 195
198 i = tx_ring->next_to_clean; 196 tx_buffer = &tx_ring->tx_buffer_info[i];
199 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 197 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
200 eop_desc = tx_buffer_info->next_to_watch; 198 i -= tx_ring->count;
201 199
202 do { 200 do {
203 bool cleaned = false; 201 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
204 202
205 /* if next_to_watch is not set then there is no work pending */ 203 /* if next_to_watch is not set then there is no work pending */
206 if (!eop_desc) 204 if (!eop_desc)
@@ -214,67 +212,90 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
214 break; 212 break;
215 213
216 /* clear next_to_watch to prevent false hangs */ 214 /* clear next_to_watch to prevent false hangs */
217 tx_buffer_info->next_to_watch = NULL; 215 tx_buffer->next_to_watch = NULL;
218 216
219 for ( ; !cleaned; count++) { 217 /* update the statistics for this packet */
220 struct sk_buff *skb; 218 total_bytes += tx_buffer->bytecount;
221 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 219 total_packets += tx_buffer->gso_segs;
222 cleaned = (tx_desc == eop_desc);
223 skb = tx_buffer_info->skb;
224
225 if (cleaned && skb) {
226 unsigned int segs, bytecount;
227
228 /* gso_segs is currently only valid for tcp */
229 segs = skb_shinfo(skb)->gso_segs ?: 1;
230 /* multiply data chunks by size of headers */
231 bytecount = ((segs - 1) * skb_headlen(skb)) +
232 skb->len;
233 total_packets += segs;
234 total_bytes += bytecount;
235 }
236 220
237 ixgbevf_unmap_and_free_tx_resource(tx_ring, 221 /* free the skb */
238 tx_buffer_info); 222 dev_kfree_skb_any(tx_buffer->skb);
239 223
240 tx_desc->wb.status = 0; 224 /* unmap skb header data */
225 dma_unmap_single(tx_ring->dev,
226 dma_unmap_addr(tx_buffer, dma),
227 dma_unmap_len(tx_buffer, len),
228 DMA_TO_DEVICE);
241 229
230 /* clear tx_buffer data */
231 tx_buffer->skb = NULL;
232 dma_unmap_len_set(tx_buffer, len, 0);
233
234 /* unmap remaining buffers */
235 while (tx_desc != eop_desc) {
236 tx_buffer++;
237 tx_desc++;
242 i++; 238 i++;
243 if (i == tx_ring->count) 239 if (unlikely(!i)) {
244 i = 0; 240 i -= tx_ring->count;
241 tx_buffer = tx_ring->tx_buffer_info;
242 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
243 }
245 244
246 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 245 /* unmap any remaining paged data */
246 if (dma_unmap_len(tx_buffer, len)) {
247 dma_unmap_page(tx_ring->dev,
248 dma_unmap_addr(tx_buffer, dma),
249 dma_unmap_len(tx_buffer, len),
250 DMA_TO_DEVICE);
251 dma_unmap_len_set(tx_buffer, len, 0);
252 }
253 }
254
255 /* move us one more past the eop_desc for start of next pkt */
256 tx_buffer++;
257 tx_desc++;
258 i++;
259 if (unlikely(!i)) {
260 i -= tx_ring->count;
261 tx_buffer = tx_ring->tx_buffer_info;
262 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
247 } 263 }
248 264
249 eop_desc = tx_buffer_info->next_to_watch; 265 /* issue prefetch for next Tx descriptor */
250 } while (count < tx_ring->count); 266 prefetch(tx_desc);
251 267
268 /* update budget accounting */
269 budget--;
270 } while (likely(budget));
271
272 i += tx_ring->count;
252 tx_ring->next_to_clean = i; 273 tx_ring->next_to_clean = i;
274 u64_stats_update_begin(&tx_ring->syncp);
275 tx_ring->stats.bytes += total_bytes;
276 tx_ring->stats.packets += total_packets;
277 u64_stats_update_end(&tx_ring->syncp);
278 q_vector->tx.total_bytes += total_bytes;
279 q_vector->tx.total_packets += total_packets;
253 280
254#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 281#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
255 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && 282 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
256 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { 283 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
257 /* Make sure that anybody stopping the queue after this 284 /* Make sure that anybody stopping the queue after this
258 * sees the new next_to_clean. 285 * sees the new next_to_clean.
259 */ 286 */
260 smp_mb(); 287 smp_mb();
288
261 if (__netif_subqueue_stopped(tx_ring->netdev, 289 if (__netif_subqueue_stopped(tx_ring->netdev,
262 tx_ring->queue_index) && 290 tx_ring->queue_index) &&
263 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { 291 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
264 netif_wake_subqueue(tx_ring->netdev, 292 netif_wake_subqueue(tx_ring->netdev,
265 tx_ring->queue_index); 293 tx_ring->queue_index);
266 ++adapter->restart_queue; 294 ++tx_ring->tx_stats.restart_queue;
267 } 295 }
268 } 296 }
269 297
270 u64_stats_update_begin(&tx_ring->syncp); 298 return !!budget;
271 tx_ring->total_bytes += total_bytes;
272 tx_ring->total_packets += total_packets;
273 u64_stats_update_end(&tx_ring->syncp);
274 q_vector->tx.total_bytes += total_bytes;
275 q_vector->tx.total_packets += total_packets;
276
277 return count < tx_ring->count;
278} 299}
279 300
280/** 301/**
@@ -343,7 +364,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
343 /* if IP and error */ 364 /* if IP and error */
344 if ((status_err & IXGBE_RXD_STAT_IPCS) && 365 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
345 (status_err & IXGBE_RXDADV_ERR_IPE)) { 366 (status_err & IXGBE_RXDADV_ERR_IPE)) {
346 ring->hw_csum_rx_error++; 367 ring->rx_stats.csum_err++;
347 return; 368 return;
348 } 369 }
349 370
@@ -351,51 +372,46 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
351 return; 372 return;
352 373
353 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 374 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
354 ring->hw_csum_rx_error++; 375 ring->rx_stats.csum_err++;
355 return; 376 return;
356 } 377 }
357 378
358 /* It must be a TCP or UDP packet with a valid checksum */ 379 /* It must be a TCP or UDP packet with a valid checksum */
359 skb->ip_summed = CHECKSUM_UNNECESSARY; 380 skb->ip_summed = CHECKSUM_UNNECESSARY;
360 ring->hw_csum_rx_good++;
361} 381}
362 382
363/** 383/**
364 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split 384 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
365 * @adapter: address of board private structure 385 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
366 **/ 386 **/
367static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, 387static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
368 struct ixgbevf_ring *rx_ring,
369 int cleaned_count) 388 int cleaned_count)
370{ 389{
371 struct pci_dev *pdev = adapter->pdev;
372 union ixgbe_adv_rx_desc *rx_desc; 390 union ixgbe_adv_rx_desc *rx_desc;
373 struct ixgbevf_rx_buffer *bi; 391 struct ixgbevf_rx_buffer *bi;
374 unsigned int i = rx_ring->next_to_use; 392 unsigned int i = rx_ring->next_to_use;
375 393
376 bi = &rx_ring->rx_buffer_info[i];
377
378 while (cleaned_count--) { 394 while (cleaned_count--) {
379 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 395 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
396 bi = &rx_ring->rx_buffer_info[i];
380 397
381 if (!bi->skb) { 398 if (!bi->skb) {
382 struct sk_buff *skb; 399 struct sk_buff *skb;
383 400
384 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 401 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
385 rx_ring->rx_buf_len); 402 rx_ring->rx_buf_len);
386 if (!skb) { 403 if (!skb)
387 adapter->alloc_rx_buff_failed++;
388 goto no_buffers; 404 goto no_buffers;
389 } 405
390 bi->skb = skb; 406 bi->skb = skb;
391 407
392 bi->dma = dma_map_single(&pdev->dev, skb->data, 408 bi->dma = dma_map_single(rx_ring->dev, skb->data,
393 rx_ring->rx_buf_len, 409 rx_ring->rx_buf_len,
394 DMA_FROM_DEVICE); 410 DMA_FROM_DEVICE);
395 if (dma_mapping_error(&pdev->dev, bi->dma)) { 411 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
396 dev_kfree_skb(skb); 412 dev_kfree_skb(skb);
397 bi->skb = NULL; 413 bi->skb = NULL;
398 dev_err(&pdev->dev, "RX DMA map failed\n"); 414 dev_err(rx_ring->dev, "Rx DMA map failed\n");
399 break; 415 break;
400 } 416 }
401 } 417 }
@@ -404,10 +420,10 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
404 i++; 420 i++;
405 if (i == rx_ring->count) 421 if (i == rx_ring->count)
406 i = 0; 422 i = 0;
407 bi = &rx_ring->rx_buffer_info[i];
408 } 423 }
409 424
410no_buffers: 425no_buffers:
426 rx_ring->rx_stats.alloc_rx_buff_failed++;
411 if (rx_ring->next_to_use != i) 427 if (rx_ring->next_to_use != i)
412 ixgbevf_release_rx_desc(rx_ring, i); 428 ixgbevf_release_rx_desc(rx_ring, i);
413} 429}
@@ -424,8 +440,6 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
424 struct ixgbevf_ring *rx_ring, 440 struct ixgbevf_ring *rx_ring,
425 int budget) 441 int budget)
426{ 442{
427 struct ixgbevf_adapter *adapter = q_vector->adapter;
428 struct pci_dev *pdev = adapter->pdev;
429 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 443 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
430 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; 444 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
431 struct sk_buff *skb; 445 struct sk_buff *skb;
@@ -451,7 +465,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
451 rx_buffer_info->skb = NULL; 465 rx_buffer_info->skb = NULL;
452 466
453 if (rx_buffer_info->dma) { 467 if (rx_buffer_info->dma) {
454 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 468 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
455 rx_ring->rx_buf_len, 469 rx_ring->rx_buf_len,
456 DMA_FROM_DEVICE); 470 DMA_FROM_DEVICE);
457 rx_buffer_info->dma = 0; 471 rx_buffer_info->dma = 0;
@@ -471,7 +485,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
471 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 485 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
472 skb->next = next_buffer->skb; 486 skb->next = next_buffer->skb;
473 IXGBE_CB(skb->next)->prev = skb; 487 IXGBE_CB(skb->next)->prev = skb;
474 adapter->non_eop_descs++; 488 rx_ring->rx_stats.non_eop_descs++;
475 goto next_desc; 489 goto next_desc;
476 } 490 }
477 491
@@ -503,7 +517,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
503 * source pruning. 517 * source pruning.
504 */ 518 */
505 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) && 519 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
506 ether_addr_equal(adapter->netdev->dev_addr, 520 ether_addr_equal(rx_ring->netdev->dev_addr,
507 eth_hdr(skb)->h_source)) { 521 eth_hdr(skb)->h_source)) {
508 dev_kfree_skb_irq(skb); 522 dev_kfree_skb_irq(skb);
509 goto next_desc; 523 goto next_desc;
@@ -516,8 +530,7 @@ next_desc:
516 530
517 /* return some buffers to hardware, one at a time is too slow */ 531 /* return some buffers to hardware, one at a time is too slow */
518 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { 532 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
519 ixgbevf_alloc_rx_buffers(adapter, rx_ring, 533 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
520 cleaned_count);
521 cleaned_count = 0; 534 cleaned_count = 0;
522 } 535 }
523 536
@@ -532,11 +545,11 @@ next_desc:
532 cleaned_count = ixgbevf_desc_unused(rx_ring); 545 cleaned_count = ixgbevf_desc_unused(rx_ring);
533 546
534 if (cleaned_count) 547 if (cleaned_count)
535 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 548 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
536 549
537 u64_stats_update_begin(&rx_ring->syncp); 550 u64_stats_update_begin(&rx_ring->syncp);
538 rx_ring->total_packets += total_rx_packets; 551 rx_ring->stats.packets += total_rx_packets;
539 rx_ring->total_bytes += total_rx_bytes; 552 rx_ring->stats.bytes += total_rx_bytes;
540 u64_stats_update_end(&rx_ring->syncp); 553 u64_stats_update_end(&rx_ring->syncp);
541 q_vector->rx.total_packets += total_rx_packets; 554 q_vector->rx.total_packets += total_rx_packets;
542 q_vector->rx.total_bytes += total_rx_bytes; 555 q_vector->rx.total_bytes += total_rx_bytes;
@@ -641,9 +654,9 @@ static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
641 found = ixgbevf_clean_rx_irq(q_vector, ring, 4); 654 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
642#ifdef BP_EXTENDED_STATS 655#ifdef BP_EXTENDED_STATS
643 if (found) 656 if (found)
644 ring->bp_cleaned += found; 657 ring->stats.cleaned += found;
645 else 658 else
646 ring->bp_misses++; 659 ring->stats.misses++;
647#endif 660#endif
648 if (found) 661 if (found)
649 break; 662 break;
@@ -1317,7 +1330,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1317 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); 1330 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1318 1331
1319 ixgbevf_rx_desc_queue_enable(adapter, ring); 1332 ixgbevf_rx_desc_queue_enable(adapter, ring);
1320 ixgbevf_alloc_rx_buffers(adapter, ring, ixgbevf_desc_unused(ring)); 1333 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1321} 1334}
1322 1335
1323/** 1336/**
@@ -1633,13 +1646,10 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
1633 1646
1634/** 1647/**
1635 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue 1648 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1636 * @adapter: board private structure
1637 * @rx_ring: ring to free buffers from 1649 * @rx_ring: ring to free buffers from
1638 **/ 1650 **/
1639static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, 1651static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
1640 struct ixgbevf_ring *rx_ring)
1641{ 1652{
1642 struct pci_dev *pdev = adapter->pdev;
1643 unsigned long size; 1653 unsigned long size;
1644 unsigned int i; 1654 unsigned int i;
1645 1655
@@ -1652,7 +1662,7 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1652 1662
1653 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1663 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1654 if (rx_buffer_info->dma) { 1664 if (rx_buffer_info->dma) {
1655 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 1665 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
1656 rx_ring->rx_buf_len, 1666 rx_ring->rx_buf_len,
1657 DMA_FROM_DEVICE); 1667 DMA_FROM_DEVICE);
1658 rx_buffer_info->dma = 0; 1668 rx_buffer_info->dma = 0;
@@ -1677,11 +1687,9 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1677 1687
1678/** 1688/**
1679 * ixgbevf_clean_tx_ring - Free Tx Buffers 1689 * ixgbevf_clean_tx_ring - Free Tx Buffers
1680 * @adapter: board private structure
1681 * @tx_ring: ring to be cleaned 1690 * @tx_ring: ring to be cleaned
1682 **/ 1691 **/
1683static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, 1692static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
1684 struct ixgbevf_ring *tx_ring)
1685{ 1693{
1686 struct ixgbevf_tx_buffer *tx_buffer_info; 1694 struct ixgbevf_tx_buffer *tx_buffer_info;
1687 unsigned long size; 1695 unsigned long size;
@@ -1711,7 +1719,7 @@ static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1711 int i; 1719 int i;
1712 1720
1713 for (i = 0; i < adapter->num_rx_queues; i++) 1721 for (i = 0; i < adapter->num_rx_queues; i++)
1714 ixgbevf_clean_rx_ring(adapter, adapter->rx_ring[i]); 1722 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
1715} 1723}
1716 1724
1717/** 1725/**
@@ -1723,7 +1731,7 @@ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1723 int i; 1731 int i;
1724 1732
1725 for (i = 0; i < adapter->num_tx_queues; i++) 1733 for (i = 0; i < adapter->num_tx_queues; i++)
1726 ixgbevf_clean_tx_ring(adapter, adapter->tx_ring[i]); 1734 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
1727} 1735}
1728 1736
1729void ixgbevf_down(struct ixgbevf_adapter *adapter) 1737void ixgbevf_down(struct ixgbevf_adapter *adapter)
@@ -2275,10 +2283,7 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2275 for (i = 0; i < adapter->num_rx_queues; i++) { 2283 for (i = 0; i < adapter->num_rx_queues; i++) {
2276 adapter->hw_csum_rx_error += 2284 adapter->hw_csum_rx_error +=
2277 adapter->rx_ring[i]->hw_csum_rx_error; 2285 adapter->rx_ring[i]->hw_csum_rx_error;
2278 adapter->hw_csum_rx_good +=
2279 adapter->rx_ring[i]->hw_csum_rx_good;
2280 adapter->rx_ring[i]->hw_csum_rx_error = 0; 2286 adapter->rx_ring[i]->hw_csum_rx_error = 0;
2281 adapter->rx_ring[i]->hw_csum_rx_good = 0;
2282 } 2287 }
2283} 2288}
2284 2289
@@ -2426,17 +2431,13 @@ pf_has_reset:
2426 2431
2427/** 2432/**
2428 * ixgbevf_free_tx_resources - Free Tx Resources per Queue 2433 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2429 * @adapter: board private structure
2430 * @tx_ring: Tx descriptor ring for a specific queue 2434 * @tx_ring: Tx descriptor ring for a specific queue
2431 * 2435 *
2432 * Free all transmit software resources 2436 * Free all transmit software resources
2433 **/ 2437 **/
2434void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, 2438void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
2435 struct ixgbevf_ring *tx_ring)
2436{ 2439{
2437 struct pci_dev *pdev = adapter->pdev; 2440 ixgbevf_clean_tx_ring(tx_ring);
2438
2439 ixgbevf_clean_tx_ring(adapter, tx_ring);
2440 2441
2441 vfree(tx_ring->tx_buffer_info); 2442 vfree(tx_ring->tx_buffer_info);
2442 tx_ring->tx_buffer_info = NULL; 2443 tx_ring->tx_buffer_info = NULL;
@@ -2445,7 +2446,7 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2445 if (!tx_ring->desc) 2446 if (!tx_ring->desc)
2446 return; 2447 return;
2447 2448
2448 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 2449 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
2449 tx_ring->dma); 2450 tx_ring->dma);
2450 2451
2451 tx_ring->desc = NULL; 2452 tx_ring->desc = NULL;
@@ -2463,20 +2464,17 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2463 2464
2464 for (i = 0; i < adapter->num_tx_queues; i++) 2465 for (i = 0; i < adapter->num_tx_queues; i++)
2465 if (adapter->tx_ring[i]->desc) 2466 if (adapter->tx_ring[i]->desc)
2466 ixgbevf_free_tx_resources(adapter, adapter->tx_ring[i]); 2467 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
2467} 2468}
2468 2469
2469/** 2470/**
2470 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) 2471 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2471 * @adapter: board private structure
2472 * @tx_ring: tx descriptor ring (for a specific queue) to setup 2472 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2473 * 2473 *
2474 * Return 0 on success, negative on failure 2474 * Return 0 on success, negative on failure
2475 **/ 2475 **/
2476int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, 2476int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
2477 struct ixgbevf_ring *tx_ring)
2478{ 2477{
2479 struct pci_dev *pdev = adapter->pdev;
2480 int size; 2478 int size;
2481 2479
2482 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2480 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
@@ -2488,7 +2486,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2488 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2486 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2489 tx_ring->size = ALIGN(tx_ring->size, 4096); 2487 tx_ring->size = ALIGN(tx_ring->size, 4096);
2490 2488
2491 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 2489 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
2492 &tx_ring->dma, GFP_KERNEL); 2490 &tx_ring->dma, GFP_KERNEL);
2493 if (!tx_ring->desc) 2491 if (!tx_ring->desc)
2494 goto err; 2492 goto err;
@@ -2518,7 +2516,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2518 int i, err = 0; 2516 int i, err = 0;
2519 2517
2520 for (i = 0; i < adapter->num_tx_queues; i++) { 2518 for (i = 0; i < adapter->num_tx_queues; i++) {
2521 err = ixgbevf_setup_tx_resources(adapter, adapter->tx_ring[i]); 2519 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
2522 if (!err) 2520 if (!err)
2523 continue; 2521 continue;
2524 hw_dbg(&adapter->hw, 2522 hw_dbg(&adapter->hw,
@@ -2531,37 +2529,34 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2531 2529
2532/** 2530/**
2533 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) 2531 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2534 * @adapter: board private structure
2535 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2532 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2536 * 2533 *
2537 * Returns 0 on success, negative on failure 2534 * Returns 0 on success, negative on failure
2538 **/ 2535 **/
2539int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, 2536int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
2540 struct ixgbevf_ring *rx_ring)
2541{ 2537{
2542 struct pci_dev *pdev = adapter->pdev;
2543 int size; 2538 int size;
2544 2539
2545 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2540 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2546 rx_ring->rx_buffer_info = vzalloc(size); 2541 rx_ring->rx_buffer_info = vzalloc(size);
2547 if (!rx_ring->rx_buffer_info) 2542 if (!rx_ring->rx_buffer_info)
2548 goto alloc_failed; 2543 goto err;
2549 2544
2550 /* Round up to nearest 4K */ 2545 /* Round up to nearest 4K */
2551 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2546 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2552 rx_ring->size = ALIGN(rx_ring->size, 4096); 2547 rx_ring->size = ALIGN(rx_ring->size, 4096);
2553 2548
2554 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 2549 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
2555 &rx_ring->dma, GFP_KERNEL); 2550 &rx_ring->dma, GFP_KERNEL);
2556 2551
2557 if (!rx_ring->desc) { 2552 if (!rx_ring->desc)
2558 vfree(rx_ring->rx_buffer_info); 2553 goto err;
2559 rx_ring->rx_buffer_info = NULL;
2560 goto alloc_failed;
2561 }
2562 2554
2563 return 0; 2555 return 0;
2564alloc_failed: 2556err:
2557 vfree(rx_ring->rx_buffer_info);
2558 rx_ring->rx_buffer_info = NULL;
2559 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
2565 return -ENOMEM; 2560 return -ENOMEM;
2566} 2561}
2567 2562
@@ -2580,7 +2575,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2580 int i, err = 0; 2575 int i, err = 0;
2581 2576
2582 for (i = 0; i < adapter->num_rx_queues; i++) { 2577 for (i = 0; i < adapter->num_rx_queues; i++) {
2583 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]); 2578 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
2584 if (!err) 2579 if (!err)
2585 continue; 2580 continue;
2586 hw_dbg(&adapter->hw, 2581 hw_dbg(&adapter->hw,
@@ -2592,22 +2587,18 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2592 2587
2593/** 2588/**
2594 * ixgbevf_free_rx_resources - Free Rx Resources 2589 * ixgbevf_free_rx_resources - Free Rx Resources
2595 * @adapter: board private structure
2596 * @rx_ring: ring to clean the resources from 2590 * @rx_ring: ring to clean the resources from
2597 * 2591 *
2598 * Free all receive software resources 2592 * Free all receive software resources
2599 **/ 2593 **/
2600void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter, 2594void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
2601 struct ixgbevf_ring *rx_ring)
2602{ 2595{
2603 struct pci_dev *pdev = adapter->pdev; 2596 ixgbevf_clean_rx_ring(rx_ring);
2604
2605 ixgbevf_clean_rx_ring(adapter, rx_ring);
2606 2597
2607 vfree(rx_ring->rx_buffer_info); 2598 vfree(rx_ring->rx_buffer_info);
2608 rx_ring->rx_buffer_info = NULL; 2599 rx_ring->rx_buffer_info = NULL;
2609 2600
2610 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2601 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
2611 rx_ring->dma); 2602 rx_ring->dma);
2612 2603
2613 rx_ring->desc = NULL; 2604 rx_ring->desc = NULL;
@@ -2625,7 +2616,7 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2625 2616
2626 for (i = 0; i < adapter->num_rx_queues; i++) 2617 for (i = 0; i < adapter->num_rx_queues; i++)
2627 if (adapter->rx_ring[i]->desc) 2618 if (adapter->rx_ring[i]->desc)
2628 ixgbevf_free_rx_resources(adapter, adapter->rx_ring[i]); 2619 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
2629} 2620}
2630 2621
2631/** 2622/**
@@ -2789,8 +2780,10 @@ static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2789} 2780}
2790 2781
2791static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, 2782static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2792 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 2783 struct ixgbevf_tx_buffer *first,
2784 u8 *hdr_len)
2793{ 2785{
2786 struct sk_buff *skb = first->skb;
2794 u32 vlan_macip_lens, type_tucmd; 2787 u32 vlan_macip_lens, type_tucmd;
2795 u32 mss_l4len_idx, l4len; 2788 u32 mss_l4len_idx, l4len;
2796 2789
@@ -2815,12 +2808,17 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2815 IPPROTO_TCP, 2808 IPPROTO_TCP,
2816 0); 2809 0);
2817 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2810 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2811 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
2812 IXGBE_TX_FLAGS_CSUM |
2813 IXGBE_TX_FLAGS_IPV4;
2818 } else if (skb_is_gso_v6(skb)) { 2814 } else if (skb_is_gso_v6(skb)) {
2819 ipv6_hdr(skb)->payload_len = 0; 2815 ipv6_hdr(skb)->payload_len = 0;
2820 tcp_hdr(skb)->check = 2816 tcp_hdr(skb)->check =
2821 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2817 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2822 &ipv6_hdr(skb)->daddr, 2818 &ipv6_hdr(skb)->daddr,
2823 0, IPPROTO_TCP, 0); 2819 0, IPPROTO_TCP, 0);
2820 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
2821 IXGBE_TX_FLAGS_CSUM;
2824 } 2822 }
2825 2823
2826 /* compute header lengths */ 2824 /* compute header lengths */
@@ -2828,6 +2826,10 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2828 *hdr_len += l4len; 2826 *hdr_len += l4len;
2829 *hdr_len = skb_transport_offset(skb) + l4len; 2827 *hdr_len = skb_transport_offset(skb) + l4len;
2830 2828
2829 /* update gso size and bytecount with header size */
2830 first->gso_segs = skb_shinfo(skb)->gso_segs;
2831 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2832
2831 /* mss_l4len_id: use 1 as index for TSO */ 2833 /* mss_l4len_id: use 1 as index for TSO */
2832 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; 2834 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2833 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 2835 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
@@ -2836,7 +2838,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2836 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 2838 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2837 vlan_macip_lens = skb_network_header_len(skb); 2839 vlan_macip_lens = skb_network_header_len(skb);
2838 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2840 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2839 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2841 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2840 2842
2841 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2843 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2842 type_tucmd, mss_l4len_idx); 2844 type_tucmd, mss_l4len_idx);
@@ -2844,9 +2846,10 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2844 return 1; 2846 return 1;
2845} 2847}
2846 2848
2847static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, 2849static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2848 struct sk_buff *skb, u32 tx_flags) 2850 struct ixgbevf_tx_buffer *first)
2849{ 2851{
2852 struct sk_buff *skb = first->skb;
2850 u32 vlan_macip_lens = 0; 2853 u32 vlan_macip_lens = 0;
2851 u32 mss_l4len_idx = 0; 2854 u32 mss_l4len_idx = 0;
2852 u32 type_tucmd = 0; 2855 u32 type_tucmd = 0;
@@ -2867,7 +2870,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2867 if (unlikely(net_ratelimit())) { 2870 if (unlikely(net_ratelimit())) {
2868 dev_warn(tx_ring->dev, 2871 dev_warn(tx_ring->dev,
2869 "partial checksum but proto=%x!\n", 2872 "partial checksum but proto=%x!\n",
2870 skb->protocol); 2873 first->protocol);
2871 } 2874 }
2872 break; 2875 break;
2873 } 2876 }
@@ -2895,184 +2898,190 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2895 } 2898 }
2896 break; 2899 break;
2897 } 2900 }
2901
2902 /* update TX checksum flag */
2903 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
2898 } 2904 }
2899 2905
2900 /* vlan_macip_lens: MACLEN, VLAN tag */ 2906 /* vlan_macip_lens: MACLEN, VLAN tag */
2901 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2907 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2902 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2908 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2903 2909
2904 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2910 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2905 type_tucmd, mss_l4len_idx); 2911 type_tucmd, mss_l4len_idx);
2906
2907 return (skb->ip_summed == CHECKSUM_PARTIAL);
2908} 2912}
2909 2913
2910static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, 2914static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
2911 struct sk_buff *skb, u32 tx_flags)
2912{ 2915{
2913 struct ixgbevf_tx_buffer *tx_buffer_info; 2916 /* set type for advanced descriptor with frame checksum insertion */
2914 unsigned int len; 2917 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
2915 unsigned int total = skb->len; 2918 IXGBE_ADVTXD_DCMD_IFCS |
2916 unsigned int offset = 0, size; 2919 IXGBE_ADVTXD_DCMD_DEXT);
2917 int count = 0;
2918 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2919 unsigned int f;
2920 int i;
2921 2920
2922 i = tx_ring->next_to_use; 2921 /* set HW vlan bit if vlan is present */
2922 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2923 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
2923 2924
2924 len = min(skb_headlen(skb), total); 2925 /* set segmentation enable bits for TSO/FSO */
2925 while (len) { 2926 if (tx_flags & IXGBE_TX_FLAGS_TSO)
2926 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2927 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
2927 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2928
2929 tx_buffer_info->length = size;
2930 tx_buffer_info->mapped_as_page = false;
2931 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
2932 skb->data + offset,
2933 size, DMA_TO_DEVICE);
2934 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
2935 goto dma_error;
2936 2928
2937 len -= size; 2929 return cmd_type;
2938 total -= size; 2930}
2939 offset += size;
2940 count++;
2941 i++;
2942 if (i == tx_ring->count)
2943 i = 0;
2944 }
2945 2931
2946 for (f = 0; f < nr_frags; f++) { 2932static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
2947 const struct skb_frag_struct *frag; 2933 u32 tx_flags, unsigned int paylen)
2934{
2935 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
2948 2936
2949 frag = &skb_shinfo(skb)->frags[f]; 2937 /* enable L4 checksum for TSO and TX checksum offload */
2950 len = min((unsigned int)skb_frag_size(frag), total); 2938 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2951 offset = 0; 2939 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
2952 2940
2953 while (len) { 2941 /* enble IPv4 checksum for TSO */
2954 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2942 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
2955 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2943 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
2956 2944
2957 tx_buffer_info->length = size; 2945 /* use index 1 context for TSO/FSO/FCOE */
2958 tx_buffer_info->dma = 2946 if (tx_flags & IXGBE_TX_FLAGS_TSO)
2959 skb_frag_dma_map(tx_ring->dev, frag, 2947 olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
2960 offset, size, DMA_TO_DEVICE);
2961 if (dma_mapping_error(tx_ring->dev,
2962 tx_buffer_info->dma))
2963 goto dma_error;
2964 tx_buffer_info->mapped_as_page = true;
2965 2948
2966 len -= size; 2949 /* Check Context must be set if Tx switch is enabled, which it
2967 total -= size; 2950 * always is for case where virtual functions are running
2968 offset += size; 2951 */
2969 count++; 2952 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
2970 i++;
2971 if (i == tx_ring->count)
2972 i = 0;
2973 }
2974 if (total == 0)
2975 break;
2976 }
2977 2953
2978 if (i == 0) 2954 tx_desc->read.olinfo_status = olinfo_status;
2979 i = tx_ring->count - 1; 2955}
2980 else
2981 i = i - 1;
2982 tx_ring->tx_buffer_info[i].skb = skb;
2983 2956
2984 return count; 2957static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2958 struct ixgbevf_tx_buffer *first,
2959 const u8 hdr_len)
2960{
2961 dma_addr_t dma;
2962 struct sk_buff *skb = first->skb;
2963 struct ixgbevf_tx_buffer *tx_buffer;
2964 union ixgbe_adv_tx_desc *tx_desc;
2965 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
2966 unsigned int data_len = skb->data_len;
2967 unsigned int size = skb_headlen(skb);
2968 unsigned int paylen = skb->len - hdr_len;
2969 u32 tx_flags = first->tx_flags;
2970 __le32 cmd_type;
2971 u16 i = tx_ring->next_to_use;
2985 2972
2986dma_error: 2973 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2987 dev_err(tx_ring->dev, "TX DMA map failed\n");
2988 2974
2989 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 2975 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
2990 tx_buffer_info->dma = 0; 2976 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
2991 count--;
2992 2977
2993 /* clear timestamp and dma mappings for remaining portion of packet */ 2978 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2994 while (count >= 0) { 2979 if (dma_mapping_error(tx_ring->dev, dma))
2995 count--; 2980 goto dma_error;
2996 i--;
2997 if (i < 0)
2998 i += tx_ring->count;
2999 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3000 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
3001 }
3002 2981
3003 return count; 2982 /* record length, and DMA address */
3004} 2983 dma_unmap_len_set(first, len, size);
2984 dma_unmap_addr_set(first, dma, dma);
3005 2985
3006static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, 2986 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3007 int count, unsigned int first, u32 paylen,
3008 u8 hdr_len)
3009{
3010 union ixgbe_adv_tx_desc *tx_desc = NULL;
3011 struct ixgbevf_tx_buffer *tx_buffer_info;
3012 u32 olinfo_status = 0, cmd_type_len = 0;
3013 unsigned int i;
3014 2987
3015 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; 2988 for (;;) {
2989 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
2990 tx_desc->read.cmd_type_len =
2991 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3016 2992
3017 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; 2993 i++;
2994 tx_desc++;
2995 if (i == tx_ring->count) {
2996 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
2997 i = 0;
2998 }
3018 2999
3019 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; 3000 dma += IXGBE_MAX_DATA_PER_TXD;
3001 size -= IXGBE_MAX_DATA_PER_TXD;
3020 3002
3021 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 3003 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3022 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; 3004 tx_desc->read.olinfo_status = 0;
3005 }
3023 3006
3024 if (tx_flags & IXGBE_TX_FLAGS_CSUM) 3007 if (likely(!data_len))
3025 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM; 3008 break;
3026 3009
3027 if (tx_flags & IXGBE_TX_FLAGS_TSO) { 3010 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3028 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3029 3011
3030 /* use index 1 context for tso */ 3012 i++;
3031 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 3013 tx_desc++;
3032 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 3014 if (i == tx_ring->count) {
3033 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; 3015 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3034 } 3016 i = 0;
3017 }
3035 3018
3036 /* 3019 size = skb_frag_size(frag);
3037 * Check Context must be set if Tx switch is enabled, which it 3020 data_len -= size;
3038 * always is for case where virtual functions are running
3039 */
3040 olinfo_status |= IXGBE_ADVTXD_CC;
3041 3021
3042 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 3022 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3023 DMA_TO_DEVICE);
3024 if (dma_mapping_error(tx_ring->dev, dma))
3025 goto dma_error;
3043 3026
3044 i = tx_ring->next_to_use; 3027 tx_buffer = &tx_ring->tx_buffer_info[i];
3045 while (count--) { 3028 dma_unmap_len_set(tx_buffer, len, size);
3046 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 3029 dma_unmap_addr_set(tx_buffer, dma, dma);
3047 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 3030
3048 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 3031 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3049 tx_desc->read.cmd_type_len = 3032 tx_desc->read.olinfo_status = 0;
3050 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 3033
3051 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 3034 frag++;
3052 i++;
3053 if (i == tx_ring->count)
3054 i = 0;
3055 } 3035 }
3056 3036
3057 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); 3037 /* write last descriptor with RS and EOP bits */
3038 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3039 tx_desc->read.cmd_type_len = cmd_type;
3058 3040
3059 tx_ring->tx_buffer_info[first].time_stamp = jiffies; 3041 /* set the timestamp */
3042 first->time_stamp = jiffies;
3060 3043
3061 /* Force memory writes to complete before letting h/w 3044 /* Force memory writes to complete before letting h/w know there
3062 * know there are new descriptors to fetch. (Only 3045 * are new descriptors to fetch. (Only applicable for weak-ordered
3063 * applicable for weak-ordered memory model archs, 3046 * memory model archs, such as IA-64).
3064 * such as IA-64). 3047 *
3048 * We also need this memory barrier (wmb) to make certain all of the
3049 * status bits have been updated before next_to_watch is written.
3065 */ 3050 */
3066 wmb(); 3051 wmb();
3067 3052
3068 tx_ring->tx_buffer_info[first].next_to_watch = tx_desc; 3053 /* set next_to_watch value indicating a packet is present */
3054 first->next_to_watch = tx_desc;
3055
3056 i++;
3057 if (i == tx_ring->count)
3058 i = 0;
3059
3060 tx_ring->next_to_use = i;
3061
3062 /* notify HW of packet */
3063 writel(i, tx_ring->tail);
3064
3065 return;
3066dma_error:
3067 dev_err(tx_ring->dev, "TX DMA map failed\n");
3068
3069 /* clear dma mappings for failed tx_buffer_info map */
3070 for (;;) {
3071 tx_buffer = &tx_ring->tx_buffer_info[i];
3072 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
3073 if (tx_buffer == first)
3074 break;
3075 if (i == 0)
3076 i = tx_ring->count;
3077 i--;
3078 }
3079
3069 tx_ring->next_to_use = i; 3080 tx_ring->next_to_use = i;
3070} 3081}
3071 3082
3072static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 3083static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3073{ 3084{
3074 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
3075
3076 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 3085 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3077 /* Herbert's original patch had: 3086 /* Herbert's original patch had:
3078 * smp_mb__after_netif_stop_queue(); 3087 * smp_mb__after_netif_stop_queue();
@@ -3086,7 +3095,8 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3086 3095
3087 /* A reprieve! - use start_queue because it doesn't call schedule */ 3096 /* A reprieve! - use start_queue because it doesn't call schedule */
3088 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 3097 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3089 ++adapter->restart_queue; 3098 ++tx_ring->tx_stats.restart_queue;
3099
3090 return 0; 3100 return 0;
3091} 3101}
3092 3102
@@ -3100,22 +3110,23 @@ static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3100static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 3110static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3101{ 3111{
3102 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3112 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3113 struct ixgbevf_tx_buffer *first;
3103 struct ixgbevf_ring *tx_ring; 3114 struct ixgbevf_ring *tx_ring;
3104 unsigned int first; 3115 int tso;
3105 unsigned int tx_flags = 0; 3116 u32 tx_flags = 0;
3106 u8 hdr_len = 0;
3107 int r_idx = 0, tso;
3108 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 3117 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3109#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 3118#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3110 unsigned short f; 3119 unsigned short f;
3111#endif 3120#endif
3121 u8 hdr_len = 0;
3112 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); 3122 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
3123
3113 if (!dst_mac || is_link_local_ether_addr(dst_mac)) { 3124 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
3114 dev_kfree_skb(skb); 3125 dev_kfree_skb(skb);
3115 return NETDEV_TX_OK; 3126 return NETDEV_TX_OK;
3116 } 3127 }
3117 3128
3118 tx_ring = adapter->tx_ring[r_idx]; 3129 tx_ring = adapter->tx_ring[skb->queue_mapping];
3119 3130
3120 /* 3131 /*
3121 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, 3132 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
@@ -3131,38 +3142,41 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3131 count += skb_shinfo(skb)->nr_frags; 3142 count += skb_shinfo(skb)->nr_frags;
3132#endif 3143#endif
3133 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { 3144 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3134 adapter->tx_busy++; 3145 tx_ring->tx_stats.tx_busy++;
3135 return NETDEV_TX_BUSY; 3146 return NETDEV_TX_BUSY;
3136 } 3147 }
3137 3148
3149 /* record the location of the first descriptor for this packet */
3150 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
3151 first->skb = skb;
3152 first->bytecount = skb->len;
3153 first->gso_segs = 1;
3154
3138 if (vlan_tx_tag_present(skb)) { 3155 if (vlan_tx_tag_present(skb)) {
3139 tx_flags |= vlan_tx_tag_get(skb); 3156 tx_flags |= vlan_tx_tag_get(skb);
3140 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 3157 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3141 tx_flags |= IXGBE_TX_FLAGS_VLAN; 3158 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3142 } 3159 }
3143 3160
3144 first = tx_ring->next_to_use; 3161 /* record initial flags and protocol */
3162 first->tx_flags = tx_flags;
3163 first->protocol = vlan_get_protocol(skb);
3145 3164
3146 if (skb->protocol == htons(ETH_P_IP)) 3165 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3147 tx_flags |= IXGBE_TX_FLAGS_IPV4; 3166 if (tso < 0)
3148 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len); 3167 goto out_drop;
3149 if (tso < 0) { 3168 else
3150 dev_kfree_skb_any(skb); 3169 ixgbevf_tx_csum(tx_ring, first);
3151 return NETDEV_TX_OK;
3152 }
3153 3170
3154 if (tso) 3171 ixgbevf_tx_map(tx_ring, first, hdr_len);
3155 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
3156 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
3157 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3158 3172
3159 ixgbevf_tx_queue(tx_ring, tx_flags, 3173 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3160 ixgbevf_tx_map(tx_ring, skb, tx_flags),
3161 first, skb->len, hdr_len);
3162 3174
3163 writel(tx_ring->next_to_use, tx_ring->tail); 3175 return NETDEV_TX_OK;
3164 3176
3165 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); 3177out_drop:
3178 dev_kfree_skb_any(first->skb);
3179 first->skb = NULL;
3166 3180
3167 return NETDEV_TX_OK; 3181 return NETDEV_TX_OK;
3168} 3182}
@@ -3331,8 +3345,8 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3331 ring = adapter->rx_ring[i]; 3345 ring = adapter->rx_ring[i];
3332 do { 3346 do {
3333 start = u64_stats_fetch_begin_bh(&ring->syncp); 3347 start = u64_stats_fetch_begin_bh(&ring->syncp);
3334 bytes = ring->total_bytes; 3348 bytes = ring->stats.bytes;
3335 packets = ring->total_packets; 3349 packets = ring->stats.packets;
3336 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3350 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3337 stats->rx_bytes += bytes; 3351 stats->rx_bytes += bytes;
3338 stats->rx_packets += packets; 3352 stats->rx_packets += packets;
@@ -3342,8 +3356,8 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3342 ring = adapter->tx_ring[i]; 3356 ring = adapter->tx_ring[i];
3343 do { 3357 do {
3344 start = u64_stats_fetch_begin_bh(&ring->syncp); 3358 start = u64_stats_fetch_begin_bh(&ring->syncp);
3345 bytes = ring->total_bytes; 3359 bytes = ring->stats.bytes;
3346 packets = ring->total_packets; 3360 packets = ring->stats.packets;
3347 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3361 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3348 stats->tx_bytes += bytes; 3362 stats->tx_bytes += bytes;
3349 stats->tx_packets += packets; 3363 stats->tx_packets += packets;