aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-10-15 13:27:10 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-18 10:54:29 -0400
commit12dcd86b75d571772512676ab301279952efc0b0 (patch)
tree8214cabf20bfcba9fb58c642f5d1d2b544f4966e /drivers/net/igb
parentdce87b960cf4794141f067d8c8180ccc6716513f (diff)
igb: fix stats handling
There are currently some problems with igb. - On 32bit arches, maintaining 64bit counters without proper synchronization between writers and readers. - Stats updated every two seconds, as reported by Jesper. (Jesper provided a patch for this) - Potential problem between worker thread and ethtool -S This patch uses u64_stats_sync, and convert everything to be 64bit safe, SMP safe, even on 32bit arches. It integrates Jesper idea of providing accurate stats at the time user reads them. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Tested-by: Emil Tantilov <emil.s.tantilov@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/igb')
-rw-r--r--drivers/net/igb/igb.h9
-rw-r--r--drivers/net/igb/igb_ethtool.c52
-rw-r--r--drivers/net/igb/igb_main.c113
3 files changed, 129 insertions, 45 deletions
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 44e0ff1494e0..edab9c442399 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -159,6 +159,7 @@ struct igb_tx_queue_stats {
159 u64 packets; 159 u64 packets;
160 u64 bytes; 160 u64 bytes;
161 u64 restart_queue; 161 u64 restart_queue;
162 u64 restart_queue2;
162}; 163};
163 164
164struct igb_rx_queue_stats { 165struct igb_rx_queue_stats {
@@ -210,11 +211,14 @@ struct igb_ring {
210 /* TX */ 211 /* TX */
211 struct { 212 struct {
212 struct igb_tx_queue_stats tx_stats; 213 struct igb_tx_queue_stats tx_stats;
214 struct u64_stats_sync tx_syncp;
215 struct u64_stats_sync tx_syncp2;
213 bool detect_tx_hung; 216 bool detect_tx_hung;
214 }; 217 };
215 /* RX */ 218 /* RX */
216 struct { 219 struct {
217 struct igb_rx_queue_stats rx_stats; 220 struct igb_rx_queue_stats rx_stats;
221 struct u64_stats_sync rx_syncp;
218 u32 rx_buffer_len; 222 u32 rx_buffer_len;
219 }; 223 };
220 }; 224 };
@@ -288,6 +292,9 @@ struct igb_adapter {
288 struct timecompare compare; 292 struct timecompare compare;
289 struct hwtstamp_config hwtstamp_config; 293 struct hwtstamp_config hwtstamp_config;
290 294
295 spinlock_t stats64_lock;
296 struct rtnl_link_stats64 stats64;
297
291 /* structs defined in e1000_hw.h */ 298 /* structs defined in e1000_hw.h */
292 struct e1000_hw hw; 299 struct e1000_hw hw;
293 struct e1000_hw_stats stats; 300 struct e1000_hw_stats stats;
@@ -357,7 +364,7 @@ extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *);
357extern void igb_unmap_and_free_tx_resource(struct igb_ring *, 364extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
358 struct igb_buffer *); 365 struct igb_buffer *);
359extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int); 366extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
360extern void igb_update_stats(struct igb_adapter *); 367extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
361extern bool igb_has_link(struct igb_adapter *adapter); 368extern bool igb_has_link(struct igb_adapter *adapter);
362extern void igb_set_ethtool_ops(struct net_device *); 369extern void igb_set_ethtool_ops(struct net_device *);
363extern void igb_power_up_link(struct igb_adapter *); 370extern void igb_power_up_link(struct igb_adapter *);
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 26bf6a13d1c1..a70e16bcfa7e 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -90,8 +90,8 @@ static const struct igb_stats igb_gstrings_stats[] = {
90 90
91#define IGB_NETDEV_STAT(_net_stat) { \ 91#define IGB_NETDEV_STAT(_net_stat) { \
92 .stat_string = __stringify(_net_stat), \ 92 .stat_string = __stringify(_net_stat), \
93 .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ 93 .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \
94 .stat_offset = offsetof(struct net_device_stats, _net_stat) \ 94 .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \
95} 95}
96static const struct igb_stats igb_gstrings_net_stats[] = { 96static const struct igb_stats igb_gstrings_net_stats[] = {
97 IGB_NETDEV_STAT(rx_errors), 97 IGB_NETDEV_STAT(rx_errors),
@@ -111,8 +111,9 @@ static const struct igb_stats igb_gstrings_net_stats[] = {
111 (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) 111 (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats))
112#define IGB_RX_QUEUE_STATS_LEN \ 112#define IGB_RX_QUEUE_STATS_LEN \
113 (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) 113 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))
114#define IGB_TX_QUEUE_STATS_LEN \ 114
115 (sizeof(struct igb_tx_queue_stats) / sizeof(u64)) 115#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */
116
116#define IGB_QUEUE_STATS_LEN \ 117#define IGB_QUEUE_STATS_LEN \
117 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ 118 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
118 IGB_RX_QUEUE_STATS_LEN) + \ 119 IGB_RX_QUEUE_STATS_LEN) + \
@@ -2070,12 +2071,14 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
2070 struct ethtool_stats *stats, u64 *data) 2071 struct ethtool_stats *stats, u64 *data)
2071{ 2072{
2072 struct igb_adapter *adapter = netdev_priv(netdev); 2073 struct igb_adapter *adapter = netdev_priv(netdev);
2073 struct net_device_stats *net_stats = &netdev->stats; 2074 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
2074 u64 *queue_stat; 2075 unsigned int start;
2075 int i, j, k; 2076 struct igb_ring *ring;
2077 int i, j;
2076 char *p; 2078 char *p;
2077 2079
2078 igb_update_stats(adapter); 2080 spin_lock(&adapter->stats64_lock);
2081 igb_update_stats(adapter, net_stats);
2079 2082
2080 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 2083 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
2081 p = (char *)adapter + igb_gstrings_stats[i].stat_offset; 2084 p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
@@ -2088,15 +2091,36 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
2088 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 2091 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2089 } 2092 }
2090 for (j = 0; j < adapter->num_tx_queues; j++) { 2093 for (j = 0; j < adapter->num_tx_queues; j++) {
2091 queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats; 2094 u64 restart2;
2092 for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++) 2095
2093 data[i] = queue_stat[k]; 2096 ring = adapter->tx_ring[j];
2097 do {
2098 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
2099 data[i] = ring->tx_stats.packets;
2100 data[i+1] = ring->tx_stats.bytes;
2101 data[i+2] = ring->tx_stats.restart_queue;
2102 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
2103 do {
2104 start = u64_stats_fetch_begin_bh(&ring->tx_syncp2);
2105 restart2 = ring->tx_stats.restart_queue2;
2106 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start));
2107 data[i+2] += restart2;
2108
2109 i += IGB_TX_QUEUE_STATS_LEN;
2094 } 2110 }
2095 for (j = 0; j < adapter->num_rx_queues; j++) { 2111 for (j = 0; j < adapter->num_rx_queues; j++) {
2096 queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats; 2112 ring = adapter->rx_ring[j];
2097 for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++) 2113 do {
2098 data[i] = queue_stat[k]; 2114 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
2115 data[i] = ring->rx_stats.packets;
2116 data[i+1] = ring->rx_stats.bytes;
2117 data[i+2] = ring->rx_stats.drops;
2118 data[i+3] = ring->rx_stats.csum_err;
2119 data[i+4] = ring->rx_stats.alloc_failed;
2120 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
2121 i += IGB_RX_QUEUE_STATS_LEN;
2099 } 2122 }
2123 spin_unlock(&adapter->stats64_lock);
2100} 2124}
2101 2125
2102static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 2126static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 5b04eff2fd23..b8dccc0ac089 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -96,7 +96,6 @@ static int igb_setup_all_rx_resources(struct igb_adapter *);
96static void igb_free_all_tx_resources(struct igb_adapter *); 96static void igb_free_all_tx_resources(struct igb_adapter *);
97static void igb_free_all_rx_resources(struct igb_adapter *); 97static void igb_free_all_rx_resources(struct igb_adapter *);
98static void igb_setup_mrqc(struct igb_adapter *); 98static void igb_setup_mrqc(struct igb_adapter *);
99void igb_update_stats(struct igb_adapter *);
100static int igb_probe(struct pci_dev *, const struct pci_device_id *); 99static int igb_probe(struct pci_dev *, const struct pci_device_id *);
101static void __devexit igb_remove(struct pci_dev *pdev); 100static void __devexit igb_remove(struct pci_dev *pdev);
102static int igb_sw_init(struct igb_adapter *); 101static int igb_sw_init(struct igb_adapter *);
@@ -113,7 +112,8 @@ static void igb_update_phy_info(unsigned long);
113static void igb_watchdog(unsigned long); 112static void igb_watchdog(unsigned long);
114static void igb_watchdog_task(struct work_struct *); 113static void igb_watchdog_task(struct work_struct *);
115static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *); 114static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
116static struct net_device_stats *igb_get_stats(struct net_device *); 115static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
116 struct rtnl_link_stats64 *stats);
117static int igb_change_mtu(struct net_device *, int); 117static int igb_change_mtu(struct net_device *, int);
118static int igb_set_mac(struct net_device *, void *); 118static int igb_set_mac(struct net_device *, void *);
119static void igb_set_uta(struct igb_adapter *adapter); 119static void igb_set_uta(struct igb_adapter *adapter);
@@ -1536,7 +1536,9 @@ void igb_down(struct igb_adapter *adapter)
1536 netif_carrier_off(netdev); 1536 netif_carrier_off(netdev);
1537 1537
1538 /* record the stats before reset*/ 1538 /* record the stats before reset*/
1539 igb_update_stats(adapter); 1539 spin_lock(&adapter->stats64_lock);
1540 igb_update_stats(adapter, &adapter->stats64);
1541 spin_unlock(&adapter->stats64_lock);
1540 1542
1541 adapter->link_speed = 0; 1543 adapter->link_speed = 0;
1542 adapter->link_duplex = 0; 1544 adapter->link_duplex = 0;
@@ -1689,7 +1691,7 @@ static const struct net_device_ops igb_netdev_ops = {
1689 .ndo_open = igb_open, 1691 .ndo_open = igb_open,
1690 .ndo_stop = igb_close, 1692 .ndo_stop = igb_close,
1691 .ndo_start_xmit = igb_xmit_frame_adv, 1693 .ndo_start_xmit = igb_xmit_frame_adv,
1692 .ndo_get_stats = igb_get_stats, 1694 .ndo_get_stats64 = igb_get_stats64,
1693 .ndo_set_rx_mode = igb_set_rx_mode, 1695 .ndo_set_rx_mode = igb_set_rx_mode,
1694 .ndo_set_multicast_list = igb_set_rx_mode, 1696 .ndo_set_multicast_list = igb_set_rx_mode,
1695 .ndo_set_mac_address = igb_set_mac, 1697 .ndo_set_mac_address = igb_set_mac,
@@ -2276,6 +2278,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2276 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 2278 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2277 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 2279 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2278 2280
2281 spin_lock_init(&adapter->stats64_lock);
2279#ifdef CONFIG_PCI_IOV 2282#ifdef CONFIG_PCI_IOV
2280 if (hw->mac.type == e1000_82576) 2283 if (hw->mac.type == e1000_82576)
2281 adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs; 2284 adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs;
@@ -3483,7 +3486,9 @@ static void igb_watchdog_task(struct work_struct *work)
3483 } 3486 }
3484 } 3487 }
3485 3488
3486 igb_update_stats(adapter); 3489 spin_lock(&adapter->stats64_lock);
3490 igb_update_stats(adapter, &adapter->stats64);
3491 spin_unlock(&adapter->stats64_lock);
3487 3492
3488 for (i = 0; i < adapter->num_tx_queues; i++) { 3493 for (i = 0; i < adapter->num_tx_queues; i++) {
3489 struct igb_ring *tx_ring = adapter->tx_ring[i]; 3494 struct igb_ring *tx_ring = adapter->tx_ring[i];
@@ -3550,6 +3555,8 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
3550 int new_val = q_vector->itr_val; 3555 int new_val = q_vector->itr_val;
3551 int avg_wire_size = 0; 3556 int avg_wire_size = 0;
3552 struct igb_adapter *adapter = q_vector->adapter; 3557 struct igb_adapter *adapter = q_vector->adapter;
3558 struct igb_ring *ring;
3559 unsigned int packets;
3553 3560
3554 /* For non-gigabit speeds, just fix the interrupt rate at 4000 3561 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3555 * ints/sec - ITR timer value of 120 ticks. 3562 * ints/sec - ITR timer value of 120 ticks.
@@ -3559,16 +3566,21 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
3559 goto set_itr_val; 3566 goto set_itr_val;
3560 } 3567 }
3561 3568
3562 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) { 3569 ring = q_vector->rx_ring;
3563 struct igb_ring *ring = q_vector->rx_ring; 3570 if (ring) {
3564 avg_wire_size = ring->total_bytes / ring->total_packets; 3571 packets = ACCESS_ONCE(ring->total_packets);
3572
3573 if (packets)
3574 avg_wire_size = ring->total_bytes / packets;
3565 } 3575 }
3566 3576
3567 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) { 3577 ring = q_vector->tx_ring;
3568 struct igb_ring *ring = q_vector->tx_ring; 3578 if (ring) {
3569 avg_wire_size = max_t(u32, avg_wire_size, 3579 packets = ACCESS_ONCE(ring->total_packets);
3570 (ring->total_bytes / 3580
3571 ring->total_packets)); 3581 if (packets)
3582 avg_wire_size = max_t(u32, avg_wire_size,
3583 ring->total_bytes / packets);
3572 } 3584 }
3573 3585
3574 /* if avg_wire_size isn't set no work was done */ 3586 /* if avg_wire_size isn't set no work was done */
@@ -4077,7 +4089,11 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
4077 4089
4078 /* A reprieve! */ 4090 /* A reprieve! */
4079 netif_wake_subqueue(netdev, tx_ring->queue_index); 4091 netif_wake_subqueue(netdev, tx_ring->queue_index);
4080 tx_ring->tx_stats.restart_queue++; 4092
4093 u64_stats_update_begin(&tx_ring->tx_syncp2);
4094 tx_ring->tx_stats.restart_queue2++;
4095 u64_stats_update_end(&tx_ring->tx_syncp2);
4096
4081 return 0; 4097 return 0;
4082} 4098}
4083 4099
@@ -4214,16 +4230,22 @@ static void igb_reset_task(struct work_struct *work)
4214} 4230}
4215 4231
4216/** 4232/**
4217 * igb_get_stats - Get System Network Statistics 4233 * igb_get_stats64 - Get System Network Statistics
4218 * @netdev: network interface device structure 4234 * @netdev: network interface device structure
4235 * @stats: rtnl_link_stats64 pointer
4219 * 4236 *
4220 * Returns the address of the device statistics structure.
4221 * The statistics are actually updated from the timer callback.
4222 **/ 4237 **/
4223static struct net_device_stats *igb_get_stats(struct net_device *netdev) 4238static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4239 struct rtnl_link_stats64 *stats)
4224{ 4240{
4225 /* only return the current stats */ 4241 struct igb_adapter *adapter = netdev_priv(netdev);
4226 return &netdev->stats; 4242
4243 spin_lock(&adapter->stats64_lock);
4244 igb_update_stats(adapter, &adapter->stats64);
4245 memcpy(stats, &adapter->stats64, sizeof(*stats));
4246 spin_unlock(&adapter->stats64_lock);
4247
4248 return stats;
4227} 4249}
4228 4250
4229/** 4251/**
@@ -4305,15 +4327,17 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4305 * @adapter: board private structure 4327 * @adapter: board private structure
4306 **/ 4328 **/
4307 4329
4308void igb_update_stats(struct igb_adapter *adapter) 4330void igb_update_stats(struct igb_adapter *adapter,
4331 struct rtnl_link_stats64 *net_stats)
4309{ 4332{
4310 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
4311 struct e1000_hw *hw = &adapter->hw; 4333 struct e1000_hw *hw = &adapter->hw;
4312 struct pci_dev *pdev = adapter->pdev; 4334 struct pci_dev *pdev = adapter->pdev;
4313 u32 reg, mpc; 4335 u32 reg, mpc;
4314 u16 phy_tmp; 4336 u16 phy_tmp;
4315 int i; 4337 int i;
4316 u64 bytes, packets; 4338 u64 bytes, packets;
4339 unsigned int start;
4340 u64 _bytes, _packets;
4317 4341
4318#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 4342#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4319 4343
@@ -4331,10 +4355,17 @@ void igb_update_stats(struct igb_adapter *adapter)
4331 for (i = 0; i < adapter->num_rx_queues; i++) { 4355 for (i = 0; i < adapter->num_rx_queues; i++) {
4332 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; 4356 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
4333 struct igb_ring *ring = adapter->rx_ring[i]; 4357 struct igb_ring *ring = adapter->rx_ring[i];
4358
4334 ring->rx_stats.drops += rqdpc_tmp; 4359 ring->rx_stats.drops += rqdpc_tmp;
4335 net_stats->rx_fifo_errors += rqdpc_tmp; 4360 net_stats->rx_fifo_errors += rqdpc_tmp;
4336 bytes += ring->rx_stats.bytes; 4361
4337 packets += ring->rx_stats.packets; 4362 do {
4363 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4364 _bytes = ring->rx_stats.bytes;
4365 _packets = ring->rx_stats.packets;
4366 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4367 bytes += _bytes;
4368 packets += _packets;
4338 } 4369 }
4339 4370
4340 net_stats->rx_bytes = bytes; 4371 net_stats->rx_bytes = bytes;
@@ -4344,8 +4375,13 @@ void igb_update_stats(struct igb_adapter *adapter)
4344 packets = 0; 4375 packets = 0;
4345 for (i = 0; i < adapter->num_tx_queues; i++) { 4376 for (i = 0; i < adapter->num_tx_queues; i++) {
4346 struct igb_ring *ring = adapter->tx_ring[i]; 4377 struct igb_ring *ring = adapter->tx_ring[i];
4347 bytes += ring->tx_stats.bytes; 4378 do {
4348 packets += ring->tx_stats.packets; 4379 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4380 _bytes = ring->tx_stats.bytes;
4381 _packets = ring->tx_stats.packets;
4382 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4383 bytes += _bytes;
4384 packets += _packets;
4349 } 4385 }
4350 net_stats->tx_bytes = bytes; 4386 net_stats->tx_bytes = bytes;
4351 net_stats->tx_packets = packets; 4387 net_stats->tx_packets = packets;
@@ -5397,7 +5433,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5397 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 5433 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
5398 !(test_bit(__IGB_DOWN, &adapter->state))) { 5434 !(test_bit(__IGB_DOWN, &adapter->state))) {
5399 netif_wake_subqueue(netdev, tx_ring->queue_index); 5435 netif_wake_subqueue(netdev, tx_ring->queue_index);
5436
5437 u64_stats_update_begin(&tx_ring->tx_syncp);
5400 tx_ring->tx_stats.restart_queue++; 5438 tx_ring->tx_stats.restart_queue++;
5439 u64_stats_update_end(&tx_ring->tx_syncp);
5401 } 5440 }
5402 } 5441 }
5403 5442
@@ -5437,8 +5476,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5437 } 5476 }
5438 tx_ring->total_bytes += total_bytes; 5477 tx_ring->total_bytes += total_bytes;
5439 tx_ring->total_packets += total_packets; 5478 tx_ring->total_packets += total_packets;
5479 u64_stats_update_begin(&tx_ring->tx_syncp);
5440 tx_ring->tx_stats.bytes += total_bytes; 5480 tx_ring->tx_stats.bytes += total_bytes;
5441 tx_ring->tx_stats.packets += total_packets; 5481 tx_ring->tx_stats.packets += total_packets;
5482 u64_stats_update_end(&tx_ring->tx_syncp);
5442 return count < tx_ring->count; 5483 return count < tx_ring->count;
5443} 5484}
5444 5485
@@ -5480,9 +5521,11 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
5480 * packets, (aka let the stack check the crc32c) 5521 * packets, (aka let the stack check the crc32c)
5481 */ 5522 */
5482 if ((skb->len == 60) && 5523 if ((skb->len == 60) &&
5483 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) 5524 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) {
5525 u64_stats_update_begin(&ring->rx_syncp);
5484 ring->rx_stats.csum_err++; 5526 ring->rx_stats.csum_err++;
5485 5527 u64_stats_update_end(&ring->rx_syncp);
5528 }
5486 /* let the stack verify checksum errors */ 5529 /* let the stack verify checksum errors */
5487 return; 5530 return;
5488 } 5531 }
@@ -5669,8 +5712,10 @@ next_desc:
5669 5712
5670 rx_ring->total_packets += total_packets; 5713 rx_ring->total_packets += total_packets;
5671 rx_ring->total_bytes += total_bytes; 5714 rx_ring->total_bytes += total_bytes;
5715 u64_stats_update_begin(&rx_ring->rx_syncp);
5672 rx_ring->rx_stats.packets += total_packets; 5716 rx_ring->rx_stats.packets += total_packets;
5673 rx_ring->rx_stats.bytes += total_bytes; 5717 rx_ring->rx_stats.bytes += total_bytes;
5718 u64_stats_update_end(&rx_ring->rx_syncp);
5674 return cleaned; 5719 return cleaned;
5675} 5720}
5676 5721
@@ -5698,8 +5743,10 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5698 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) { 5743 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
5699 if (!buffer_info->page) { 5744 if (!buffer_info->page) {
5700 buffer_info->page = netdev_alloc_page(netdev); 5745 buffer_info->page = netdev_alloc_page(netdev);
5701 if (!buffer_info->page) { 5746 if (unlikely(!buffer_info->page)) {
5747 u64_stats_update_begin(&rx_ring->rx_syncp);
5702 rx_ring->rx_stats.alloc_failed++; 5748 rx_ring->rx_stats.alloc_failed++;
5749 u64_stats_update_end(&rx_ring->rx_syncp);
5703 goto no_buffers; 5750 goto no_buffers;
5704 } 5751 }
5705 buffer_info->page_offset = 0; 5752 buffer_info->page_offset = 0;
@@ -5714,7 +5761,9 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5714 if (dma_mapping_error(rx_ring->dev, 5761 if (dma_mapping_error(rx_ring->dev,
5715 buffer_info->page_dma)) { 5762 buffer_info->page_dma)) {
5716 buffer_info->page_dma = 0; 5763 buffer_info->page_dma = 0;
5764 u64_stats_update_begin(&rx_ring->rx_syncp);
5717 rx_ring->rx_stats.alloc_failed++; 5765 rx_ring->rx_stats.alloc_failed++;
5766 u64_stats_update_end(&rx_ring->rx_syncp);
5718 goto no_buffers; 5767 goto no_buffers;
5719 } 5768 }
5720 } 5769 }
@@ -5722,8 +5771,10 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5722 skb = buffer_info->skb; 5771 skb = buffer_info->skb;
5723 if (!skb) { 5772 if (!skb) {
5724 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 5773 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
5725 if (!skb) { 5774 if (unlikely(!skb)) {
5775 u64_stats_update_begin(&rx_ring->rx_syncp);
5726 rx_ring->rx_stats.alloc_failed++; 5776 rx_ring->rx_stats.alloc_failed++;
5777 u64_stats_update_end(&rx_ring->rx_syncp);
5727 goto no_buffers; 5778 goto no_buffers;
5728 } 5779 }
5729 5780
@@ -5737,7 +5788,9 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5737 if (dma_mapping_error(rx_ring->dev, 5788 if (dma_mapping_error(rx_ring->dev,
5738 buffer_info->dma)) { 5789 buffer_info->dma)) {
5739 buffer_info->dma = 0; 5790 buffer_info->dma = 0;
5791 u64_stats_update_begin(&rx_ring->rx_syncp);
5740 rx_ring->rx_stats.alloc_failed++; 5792 rx_ring->rx_stats.alloc_failed++;
5793 u64_stats_update_end(&rx_ring->rx_syncp);
5741 goto no_buffers; 5794 goto no_buffers;
5742 } 5795 }
5743 } 5796 }