diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2009-10-27 11:52:27 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-10-28 04:20:29 -0400 |
commit | 04a5fcaaf0e12d066411aa54e42591952aa18da7 (patch) | |
tree | e528d31a9aef81301340a9ef2d6367b2832ab2e7 | |
parent | 4c844851d15cc08d995179ab5118172711be6eb0 (diff) |
igb: move alloc_failed and csum_err stats into per rx-ring stat
The allocation failed and checksum error stats are currently kept as a
global stat. If we end up allocating the queues to multiple netdevs then
the global counter doesn't make much sense. For this reason I felt it
necessary to move the alloc_rx_buff_failed stat into the rx_stats
portion of the rx_ring.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/igb/igb.h | 6 | ||||
-rw-r--r-- | drivers/net/igb/igb_ethtool.c | 9 | ||||
-rw-r--r-- | drivers/net/igb/igb_main.c | 17 |
3 files changed, 17 insertions, 15 deletions
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index 00ff274b16db..6a67fa2e6007 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h | |||
@@ -145,12 +145,15 @@ struct igb_buffer { | |||
145 | struct igb_tx_queue_stats { | 145 | struct igb_tx_queue_stats { |
146 | u64 packets; | 146 | u64 packets; |
147 | u64 bytes; | 147 | u64 bytes; |
148 | u64 restart_queue; | ||
148 | }; | 149 | }; |
149 | 150 | ||
150 | struct igb_rx_queue_stats { | 151 | struct igb_rx_queue_stats { |
151 | u64 packets; | 152 | u64 packets; |
152 | u64 bytes; | 153 | u64 bytes; |
153 | u64 drops; | 154 | u64 drops; |
155 | u64 csum_err; | ||
156 | u64 alloc_failed; | ||
154 | }; | 157 | }; |
155 | 158 | ||
156 | struct igb_q_vector { | 159 | struct igb_q_vector { |
@@ -241,7 +244,6 @@ struct igb_adapter { | |||
241 | 244 | ||
242 | /* TX */ | 245 | /* TX */ |
243 | struct igb_ring *tx_ring; /* One per active queue */ | 246 | struct igb_ring *tx_ring; /* One per active queue */ |
244 | unsigned int restart_queue; | ||
245 | unsigned long tx_queue_len; | 247 | unsigned long tx_queue_len; |
246 | u32 txd_cmd; | 248 | u32 txd_cmd; |
247 | u32 gotc; | 249 | u32 gotc; |
@@ -255,8 +257,6 @@ struct igb_adapter { | |||
255 | int num_tx_queues; | 257 | int num_tx_queues; |
256 | int num_rx_queues; | 258 | int num_rx_queues; |
257 | 259 | ||
258 | u64 hw_csum_err; | ||
259 | u32 alloc_rx_buff_failed; | ||
260 | u32 gorc; | 260 | u32 gorc; |
261 | u64 gorc_old; | 261 | u64 gorc_old; |
262 | u32 max_frame_size; | 262 | u32 max_frame_size; |
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index c48a555bda2c..f62430b1f759 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c | |||
@@ -84,7 +84,6 @@ static const struct igb_stats igb_gstrings_stats[] = { | |||
84 | { "tx_single_coll_ok", IGB_STAT(stats.scc) }, | 84 | { "tx_single_coll_ok", IGB_STAT(stats.scc) }, |
85 | { "tx_multi_coll_ok", IGB_STAT(stats.mcc) }, | 85 | { "tx_multi_coll_ok", IGB_STAT(stats.mcc) }, |
86 | { "tx_timeout_count", IGB_STAT(tx_timeout_count) }, | 86 | { "tx_timeout_count", IGB_STAT(tx_timeout_count) }, |
87 | { "tx_restart_queue", IGB_STAT(restart_queue) }, | ||
88 | { "rx_long_length_errors", IGB_STAT(stats.roc) }, | 87 | { "rx_long_length_errors", IGB_STAT(stats.roc) }, |
89 | { "rx_short_length_errors", IGB_STAT(stats.ruc) }, | 88 | { "rx_short_length_errors", IGB_STAT(stats.ruc) }, |
90 | { "rx_align_errors", IGB_STAT(stats.algnerrc) }, | 89 | { "rx_align_errors", IGB_STAT(stats.algnerrc) }, |
@@ -95,9 +94,7 @@ static const struct igb_stats igb_gstrings_stats[] = { | |||
95 | { "tx_flow_control_xon", IGB_STAT(stats.xontxc) }, | 94 | { "tx_flow_control_xon", IGB_STAT(stats.xontxc) }, |
96 | { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) }, | 95 | { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) }, |
97 | { "rx_long_byte_count", IGB_STAT(stats.gorc) }, | 96 | { "rx_long_byte_count", IGB_STAT(stats.gorc) }, |
98 | { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) }, | ||
99 | { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) }, | 97 | { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) }, |
100 | { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) }, | ||
101 | { "tx_smbus", IGB_STAT(stats.mgptc) }, | 98 | { "tx_smbus", IGB_STAT(stats.mgptc) }, |
102 | { "rx_smbus", IGB_STAT(stats.mgprc) }, | 99 | { "rx_smbus", IGB_STAT(stats.mgprc) }, |
103 | { "dropped_smbus", IGB_STAT(stats.mgpdc) }, | 100 | { "dropped_smbus", IGB_STAT(stats.mgpdc) }, |
@@ -2031,6 +2028,8 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |||
2031 | p += ETH_GSTRING_LEN; | 2028 | p += ETH_GSTRING_LEN; |
2032 | sprintf(p, "tx_queue_%u_bytes", i); | 2029 | sprintf(p, "tx_queue_%u_bytes", i); |
2033 | p += ETH_GSTRING_LEN; | 2030 | p += ETH_GSTRING_LEN; |
2031 | sprintf(p, "tx_queue_%u_restart", i); | ||
2032 | p += ETH_GSTRING_LEN; | ||
2034 | } | 2033 | } |
2035 | for (i = 0; i < adapter->num_rx_queues; i++) { | 2034 | for (i = 0; i < adapter->num_rx_queues; i++) { |
2036 | sprintf(p, "rx_queue_%u_packets", i); | 2035 | sprintf(p, "rx_queue_%u_packets", i); |
@@ -2039,6 +2038,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |||
2039 | p += ETH_GSTRING_LEN; | 2038 | p += ETH_GSTRING_LEN; |
2040 | sprintf(p, "rx_queue_%u_drops", i); | 2039 | sprintf(p, "rx_queue_%u_drops", i); |
2041 | p += ETH_GSTRING_LEN; | 2040 | p += ETH_GSTRING_LEN; |
2041 | sprintf(p, "rx_queue_%u_csum_err", i); | ||
2042 | p += ETH_GSTRING_LEN; | ||
2043 | sprintf(p, "rx_queue_%u_alloc_failed", i); | ||
2044 | p += ETH_GSTRING_LEN; | ||
2042 | } | 2045 | } |
2043 | /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ | 2046 | /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ |
2044 | break; | 2047 | break; |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 04e860d4e080..bdd7bf099363 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -3562,8 +3562,6 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter, | |||
3562 | static int __igb_maybe_stop_tx(struct net_device *netdev, | 3562 | static int __igb_maybe_stop_tx(struct net_device *netdev, |
3563 | struct igb_ring *tx_ring, int size) | 3563 | struct igb_ring *tx_ring, int size) |
3564 | { | 3564 | { |
3565 | struct igb_adapter *adapter = netdev_priv(netdev); | ||
3566 | |||
3567 | netif_stop_subqueue(netdev, tx_ring->queue_index); | 3565 | netif_stop_subqueue(netdev, tx_ring->queue_index); |
3568 | 3566 | ||
3569 | /* Herbert's original patch had: | 3567 | /* Herbert's original patch had: |
@@ -3578,7 +3576,7 @@ static int __igb_maybe_stop_tx(struct net_device *netdev, | |||
3578 | 3576 | ||
3579 | /* A reprieve! */ | 3577 | /* A reprieve! */ |
3580 | netif_wake_subqueue(netdev, tx_ring->queue_index); | 3578 | netif_wake_subqueue(netdev, tx_ring->queue_index); |
3581 | ++adapter->restart_queue; | 3579 | tx_ring->tx_stats.restart_queue++; |
3582 | return 0; | 3580 | return 0; |
3583 | } | 3581 | } |
3584 | 3582 | ||
@@ -4734,7 +4732,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) | |||
4734 | if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && | 4732 | if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && |
4735 | !(test_bit(__IGB_DOWN, &adapter->state))) { | 4733 | !(test_bit(__IGB_DOWN, &adapter->state))) { |
4736 | netif_wake_subqueue(netdev, tx_ring->queue_index); | 4734 | netif_wake_subqueue(netdev, tx_ring->queue_index); |
4737 | ++adapter->restart_queue; | 4735 | tx_ring->tx_stats.restart_queue++; |
4738 | } | 4736 | } |
4739 | } | 4737 | } |
4740 | 4738 | ||
@@ -4801,7 +4799,8 @@ static void igb_receive_skb(struct igb_q_vector *q_vector, | |||
4801 | napi_gro_receive(&q_vector->napi, skb); | 4799 | napi_gro_receive(&q_vector->napi, skb); |
4802 | } | 4800 | } |
4803 | 4801 | ||
4804 | static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, | 4802 | static inline void igb_rx_checksum_adv(struct igb_ring *ring, |
4803 | struct igb_adapter *adapter, | ||
4805 | u32 status_err, struct sk_buff *skb) | 4804 | u32 status_err, struct sk_buff *skb) |
4806 | { | 4805 | { |
4807 | skb->ip_summed = CHECKSUM_NONE; | 4806 | skb->ip_summed = CHECKSUM_NONE; |
@@ -4820,7 +4819,7 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, | |||
4820 | */ | 4819 | */ |
4821 | if (!((adapter->hw.mac.type == e1000_82576) && | 4820 | if (!((adapter->hw.mac.type == e1000_82576) && |
4822 | (skb->len == 60))) | 4821 | (skb->len == 60))) |
4823 | adapter->hw_csum_err++; | 4822 | ring->rx_stats.csum_err++; |
4824 | /* let the stack verify checksum errors */ | 4823 | /* let the stack verify checksum errors */ |
4825 | return; | 4824 | return; |
4826 | } | 4825 | } |
@@ -4979,7 +4978,7 @@ send_up: | |||
4979 | total_bytes += skb->len; | 4978 | total_bytes += skb->len; |
4980 | total_packets++; | 4979 | total_packets++; |
4981 | 4980 | ||
4982 | igb_rx_checksum_adv(adapter, staterr, skb); | 4981 | igb_rx_checksum_adv(rx_ring, adapter, staterr, skb); |
4983 | 4982 | ||
4984 | skb->protocol = eth_type_trans(skb, netdev); | 4983 | skb->protocol = eth_type_trans(skb, netdev); |
4985 | skb_record_rx_queue(skb, rx_ring->queue_index); | 4984 | skb_record_rx_queue(skb, rx_ring->queue_index); |
@@ -5046,7 +5045,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, | |||
5046 | if (!buffer_info->page) { | 5045 | if (!buffer_info->page) { |
5047 | buffer_info->page = alloc_page(GFP_ATOMIC); | 5046 | buffer_info->page = alloc_page(GFP_ATOMIC); |
5048 | if (!buffer_info->page) { | 5047 | if (!buffer_info->page) { |
5049 | adapter->alloc_rx_buff_failed++; | 5048 | rx_ring->rx_stats.alloc_failed++; |
5050 | goto no_buffers; | 5049 | goto no_buffers; |
5051 | } | 5050 | } |
5052 | buffer_info->page_offset = 0; | 5051 | buffer_info->page_offset = 0; |
@@ -5063,7 +5062,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, | |||
5063 | if (!buffer_info->skb) { | 5062 | if (!buffer_info->skb) { |
5064 | skb = netdev_alloc_skb_ip_align(netdev, bufsz); | 5063 | skb = netdev_alloc_skb_ip_align(netdev, bufsz); |
5065 | if (!skb) { | 5064 | if (!skb) { |
5066 | adapter->alloc_rx_buff_failed++; | 5065 | rx_ring->rx_stats.alloc_failed++; |
5067 | goto no_buffers; | 5066 | goto no_buffers; |
5068 | } | 5067 | } |
5069 | 5068 | ||