diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2010-11-16 22:26:50 -0500 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2010-11-16 22:26:50 -0500 |
commit | 5b7da51547cc3ab5461e45a8ee0ca73051416fda (patch) | |
tree | bb50d8e80412310a4adec5765f53d94909ac6f30 /drivers/net | |
parent | b6ec895ecd32c0070c3b2b17918c030275cd834d (diff) |
ixgbe: combine some stats into a union to allow for Tx/Rx stats overlap
This change moved some of the RX and TX stats into separate structures and
them placed those structures in a union in order to help reduce the size of
the ring structure.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Ross Brattain <ross.b.brattain@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/ixgbe/ixgbe.h | 24 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 68 |
2 files changed, 63 insertions, 29 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 70ccab074658..3c63ee6be2ee 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -146,6 +146,19 @@ struct ixgbe_queue_stats { | |||
146 | u64 bytes; | 146 | u64 bytes; |
147 | }; | 147 | }; |
148 | 148 | ||
149 | struct ixgbe_tx_queue_stats { | ||
150 | u64 restart_queue; | ||
151 | u64 tx_busy; | ||
152 | }; | ||
153 | |||
154 | struct ixgbe_rx_queue_stats { | ||
155 | u64 rsc_count; | ||
156 | u64 rsc_flush; | ||
157 | u64 non_eop_descs; | ||
158 | u64 alloc_rx_page_failed; | ||
159 | u64 alloc_rx_buff_failed; | ||
160 | }; | ||
161 | |||
149 | struct ixgbe_ring { | 162 | struct ixgbe_ring { |
150 | void *desc; /* descriptor ring memory */ | 163 | void *desc; /* descriptor ring memory */ |
151 | struct device *dev; /* device for DMA mapping */ | 164 | struct device *dev; /* device for DMA mapping */ |
@@ -183,13 +196,12 @@ struct ixgbe_ring { | |||
183 | 196 | ||
184 | struct ixgbe_queue_stats stats; | 197 | struct ixgbe_queue_stats stats; |
185 | struct u64_stats_sync syncp; | 198 | struct u64_stats_sync syncp; |
186 | int numa_node; | 199 | union { |
200 | struct ixgbe_tx_queue_stats tx_stats; | ||
201 | struct ixgbe_rx_queue_stats rx_stats; | ||
202 | }; | ||
187 | unsigned long reinit_state; | 203 | unsigned long reinit_state; |
188 | u64 rsc_count; /* stat for coalesced packets */ | 204 | int numa_node; |
189 | u64 rsc_flush; /* stats for flushed packets */ | ||
190 | u32 restart_queue; /* track tx queue restarts */ | ||
191 | u32 non_eop_descs; /* track hardware descriptor chaining */ | ||
192 | |||
193 | unsigned int size; /* length in bytes */ | 205 | unsigned int size; /* length in bytes */ |
194 | dma_addr_t dma; /* phys. address of descriptor ring */ | 206 | dma_addr_t dma; /* phys. address of descriptor ring */ |
195 | struct rcu_head rcu; | 207 | struct rcu_head rcu; |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index be76dd9b94a9..a47e09098166 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -783,7 +783,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
783 | if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && | 783 | if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && |
784 | !test_bit(__IXGBE_DOWN, &adapter->state)) { | 784 | !test_bit(__IXGBE_DOWN, &adapter->state)) { |
785 | netif_wake_subqueue(netdev, tx_ring->queue_index); | 785 | netif_wake_subqueue(netdev, tx_ring->queue_index); |
786 | ++tx_ring->restart_queue; | 786 | ++tx_ring->tx_stats.restart_queue; |
787 | } | 787 | } |
788 | } | 788 | } |
789 | 789 | ||
@@ -1024,7 +1024,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | |||
1024 | skb = netdev_alloc_skb_ip_align(adapter->netdev, | 1024 | skb = netdev_alloc_skb_ip_align(adapter->netdev, |
1025 | rx_ring->rx_buf_len); | 1025 | rx_ring->rx_buf_len); |
1026 | if (!skb) { | 1026 | if (!skb) { |
1027 | adapter->alloc_rx_buff_failed++; | 1027 | rx_ring->rx_stats.alloc_rx_buff_failed++; |
1028 | goto no_buffers; | 1028 | goto no_buffers; |
1029 | } | 1029 | } |
1030 | /* initialize queue mapping */ | 1030 | /* initialize queue mapping */ |
@@ -1038,7 +1038,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | |||
1038 | rx_ring->rx_buf_len, | 1038 | rx_ring->rx_buf_len, |
1039 | DMA_FROM_DEVICE); | 1039 | DMA_FROM_DEVICE); |
1040 | if (dma_mapping_error(rx_ring->dev, bi->dma)) { | 1040 | if (dma_mapping_error(rx_ring->dev, bi->dma)) { |
1041 | adapter->alloc_rx_buff_failed++; | 1041 | rx_ring->rx_stats.alloc_rx_buff_failed++; |
1042 | bi->dma = 0; | 1042 | bi->dma = 0; |
1043 | goto no_buffers; | 1043 | goto no_buffers; |
1044 | } | 1044 | } |
@@ -1048,7 +1048,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | |||
1048 | if (!bi->page) { | 1048 | if (!bi->page) { |
1049 | bi->page = netdev_alloc_page(adapter->netdev); | 1049 | bi->page = netdev_alloc_page(adapter->netdev); |
1050 | if (!bi->page) { | 1050 | if (!bi->page) { |
1051 | adapter->alloc_rx_page_failed++; | 1051 | rx_ring->rx_stats.alloc_rx_page_failed++; |
1052 | goto no_buffers; | 1052 | goto no_buffers; |
1053 | } | 1053 | } |
1054 | } | 1054 | } |
@@ -1063,7 +1063,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | |||
1063 | DMA_FROM_DEVICE); | 1063 | DMA_FROM_DEVICE); |
1064 | if (dma_mapping_error(rx_ring->dev, | 1064 | if (dma_mapping_error(rx_ring->dev, |
1065 | bi->page_dma)) { | 1065 | bi->page_dma)) { |
1066 | adapter->alloc_rx_page_failed++; | 1066 | rx_ring->rx_stats.alloc_rx_page_failed++; |
1067 | bi->page_dma = 0; | 1067 | bi->page_dma = 0; |
1068 | goto no_buffers; | 1068 | goto no_buffers; |
1069 | } | 1069 | } |
@@ -1258,7 +1258,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1258 | if (staterr & IXGBE_RXD_STAT_EOP) { | 1258 | if (staterr & IXGBE_RXD_STAT_EOP) { |
1259 | if (skb->prev) | 1259 | if (skb->prev) |
1260 | skb = ixgbe_transform_rsc_queue(skb, | 1260 | skb = ixgbe_transform_rsc_queue(skb, |
1261 | &(rx_ring->rsc_count)); | 1261 | &(rx_ring->rx_stats.rsc_count)); |
1262 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { | 1262 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { |
1263 | if (IXGBE_RSC_CB(skb)->delay_unmap) { | 1263 | if (IXGBE_RSC_CB(skb)->delay_unmap) { |
1264 | dma_unmap_single(rx_ring->dev, | 1264 | dma_unmap_single(rx_ring->dev, |
@@ -1269,11 +1269,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1269 | IXGBE_RSC_CB(skb)->delay_unmap = false; | 1269 | IXGBE_RSC_CB(skb)->delay_unmap = false; |
1270 | } | 1270 | } |
1271 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) | 1271 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) |
1272 | rx_ring->rsc_count += | 1272 | rx_ring->rx_stats.rsc_count += |
1273 | skb_shinfo(skb)->nr_frags; | 1273 | skb_shinfo(skb)->nr_frags; |
1274 | else | 1274 | else |
1275 | rx_ring->rsc_count++; | 1275 | rx_ring->rx_stats.rsc_count++; |
1276 | rx_ring->rsc_flush++; | 1276 | rx_ring->rx_stats.rsc_flush++; |
1277 | } | 1277 | } |
1278 | u64_stats_update_begin(&rx_ring->syncp); | 1278 | u64_stats_update_begin(&rx_ring->syncp); |
1279 | rx_ring->stats.packets++; | 1279 | rx_ring->stats.packets++; |
@@ -1289,7 +1289,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1289 | skb->next = next_buffer->skb; | 1289 | skb->next = next_buffer->skb; |
1290 | skb->next->prev = skb; | 1290 | skb->next->prev = skb; |
1291 | } | 1291 | } |
1292 | rx_ring->non_eop_descs++; | 1292 | rx_ring->rx_stats.non_eop_descs++; |
1293 | goto next_desc; | 1293 | goto next_desc; |
1294 | } | 1294 | } |
1295 | 1295 | ||
@@ -5406,10 +5406,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
5406 | { | 5406 | { |
5407 | struct net_device *netdev = adapter->netdev; | 5407 | struct net_device *netdev = adapter->netdev; |
5408 | struct ixgbe_hw *hw = &adapter->hw; | 5408 | struct ixgbe_hw *hw = &adapter->hw; |
5409 | struct ixgbe_hw_stats *hwstats = &adapter->stats; | ||
5409 | u64 total_mpc = 0; | 5410 | u64 total_mpc = 0; |
5410 | u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; | 5411 | u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; |
5411 | u64 non_eop_descs = 0, restart_queue = 0; | 5412 | u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; |
5412 | struct ixgbe_hw_stats *hwstats = &adapter->stats; | 5413 | u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; |
5414 | u64 bytes = 0, packets = 0; | ||
5413 | 5415 | ||
5414 | if (test_bit(__IXGBE_DOWN, &adapter->state) || | 5416 | if (test_bit(__IXGBE_DOWN, &adapter->state) || |
5415 | test_bit(__IXGBE_RESETTING, &adapter->state)) | 5417 | test_bit(__IXGBE_RESETTING, &adapter->state)) |
@@ -5422,21 +5424,41 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
5422 | adapter->hw_rx_no_dma_resources += | 5424 | adapter->hw_rx_no_dma_resources += |
5423 | IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); | 5425 | IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); |
5424 | for (i = 0; i < adapter->num_rx_queues; i++) { | 5426 | for (i = 0; i < adapter->num_rx_queues; i++) { |
5425 | rsc_count += adapter->rx_ring[i]->rsc_count; | 5427 | rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; |
5426 | rsc_flush += adapter->rx_ring[i]->rsc_flush; | 5428 | rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; |
5427 | } | 5429 | } |
5428 | adapter->rsc_total_count = rsc_count; | 5430 | adapter->rsc_total_count = rsc_count; |
5429 | adapter->rsc_total_flush = rsc_flush; | 5431 | adapter->rsc_total_flush = rsc_flush; |
5430 | } | 5432 | } |
5431 | 5433 | ||
5434 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
5435 | struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; | ||
5436 | non_eop_descs += rx_ring->rx_stats.non_eop_descs; | ||
5437 | alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; | ||
5438 | alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; | ||
5439 | bytes += rx_ring->stats.bytes; | ||
5440 | packets += rx_ring->stats.packets; | ||
5441 | } | ||
5442 | adapter->non_eop_descs = non_eop_descs; | ||
5443 | adapter->alloc_rx_page_failed = alloc_rx_page_failed; | ||
5444 | adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; | ||
5445 | netdev->stats.rx_bytes = bytes; | ||
5446 | netdev->stats.rx_packets = packets; | ||
5447 | |||
5448 | bytes = 0; | ||
5449 | packets = 0; | ||
5432 | /* gather some stats to the adapter struct that are per queue */ | 5450 | /* gather some stats to the adapter struct that are per queue */ |
5433 | for (i = 0; i < adapter->num_tx_queues; i++) | 5451 | for (i = 0; i < adapter->num_tx_queues; i++) { |
5434 | restart_queue += adapter->tx_ring[i]->restart_queue; | 5452 | struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; |
5453 | restart_queue += tx_ring->tx_stats.restart_queue; | ||
5454 | tx_busy += tx_ring->tx_stats.tx_busy; | ||
5455 | bytes += tx_ring->stats.bytes; | ||
5456 | packets += tx_ring->stats.packets; | ||
5457 | } | ||
5435 | adapter->restart_queue = restart_queue; | 5458 | adapter->restart_queue = restart_queue; |
5436 | 5459 | adapter->tx_busy = tx_busy; | |
5437 | for (i = 0; i < adapter->num_rx_queues; i++) | 5460 | netdev->stats.tx_bytes = bytes; |
5438 | non_eop_descs += adapter->rx_ring[i]->non_eop_descs; | 5461 | netdev->stats.tx_packets = packets; |
5439 | adapter->non_eop_descs = non_eop_descs; | ||
5440 | 5462 | ||
5441 | hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); | 5463 | hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); |
5442 | for (i = 0; i < 8; i++) { | 5464 | for (i = 0; i < 8; i++) { |
@@ -6223,7 +6245,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev, | |||
6223 | 6245 | ||
6224 | /* A reprieve! - use start_queue because it doesn't call schedule */ | 6246 | /* A reprieve! - use start_queue because it doesn't call schedule */ |
6225 | netif_start_subqueue(netdev, tx_ring->queue_index); | 6247 | netif_start_subqueue(netdev, tx_ring->queue_index); |
6226 | ++tx_ring->restart_queue; | 6248 | ++tx_ring->tx_stats.restart_queue; |
6227 | return 0; | 6249 | return 0; |
6228 | } | 6250 | } |
6229 | 6251 | ||
@@ -6339,7 +6361,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev | |||
6339 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); | 6361 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); |
6340 | 6362 | ||
6341 | if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) { | 6363 | if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) { |
6342 | adapter->tx_busy++; | 6364 | tx_ring->tx_stats.tx_busy++; |
6343 | return NETDEV_TX_BUSY; | 6365 | return NETDEV_TX_BUSY; |
6344 | } | 6366 | } |
6345 | 6367 | ||