diff options
author | stephen hemminger <shemminger@vyatta.com> | 2011-06-20 06:35:07 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-06-21 18:55:52 -0400 |
commit | 62ea05577ed3ea4f542f9bc17cb716787316e2ea (patch) | |
tree | a94688d35de2f98fd07e027c8ef15d67906f1c42 /drivers | |
parent | 3b0c9cbb6e5fe25a4162be68ed1459b6f7432da9 (diff) |
vxge: fix 64 bit access on 32 bit platforms
Need to add stat_sync wrapper around 64 bit statistic values.
Fix wraparound bug in lockup detector where it is unsafely comparing
64 bit value that is not atomic. Since only care about detecting activity
just looking at current low order bits will work.
Remove unused entries in old vxge_sw_stats structure.
Change the error counters to unsigned long since they won't grow so large
as to have to be 64 bits.
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/vxge/vxge-main.c | 51 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-main.h | 48 |
2 files changed, 59 insertions, 40 deletions
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index e658edd1c959..54ca74806bb6 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c | |||
@@ -296,11 +296,13 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan, | |||
296 | skb_record_rx_queue(skb, ring->driver_id); | 296 | skb_record_rx_queue(skb, ring->driver_id); |
297 | skb->protocol = eth_type_trans(skb, ring->ndev); | 297 | skb->protocol = eth_type_trans(skb, ring->ndev); |
298 | 298 | ||
299 | u64_stats_update_begin(&ring->stats.syncp); | ||
299 | ring->stats.rx_frms++; | 300 | ring->stats.rx_frms++; |
300 | ring->stats.rx_bytes += pkt_length; | 301 | ring->stats.rx_bytes += pkt_length; |
301 | 302 | ||
302 | if (skb->pkt_type == PACKET_MULTICAST) | 303 | if (skb->pkt_type == PACKET_MULTICAST) |
303 | ring->stats.rx_mcast++; | 304 | ring->stats.rx_mcast++; |
305 | u64_stats_update_end(&ring->stats.syncp); | ||
304 | 306 | ||
305 | vxge_debug_rx(VXGE_TRACE, | 307 | vxge_debug_rx(VXGE_TRACE, |
306 | "%s: %s:%d skb protocol = %d", | 308 | "%s: %s:%d skb protocol = %d", |
@@ -592,8 +594,10 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, | |||
592 | vxge_hw_fifo_txdl_free(fifo_hw, dtr); | 594 | vxge_hw_fifo_txdl_free(fifo_hw, dtr); |
593 | 595 | ||
594 | /* Updating the statistics block */ | 596 | /* Updating the statistics block */ |
597 | u64_stats_update_begin(&fifo->stats.syncp); | ||
595 | fifo->stats.tx_frms++; | 598 | fifo->stats.tx_frms++; |
596 | fifo->stats.tx_bytes += skb->len; | 599 | fifo->stats.tx_bytes += skb->len; |
600 | u64_stats_update_end(&fifo->stats.syncp); | ||
597 | 601 | ||
598 | *done_skb++ = skb; | 602 | *done_skb++ = skb; |
599 | 603 | ||
@@ -2630,11 +2634,16 @@ static void vxge_poll_vp_lockup(unsigned long data) | |||
2630 | struct vxge_vpath *vpath; | 2634 | struct vxge_vpath *vpath; |
2631 | struct vxge_ring *ring; | 2635 | struct vxge_ring *ring; |
2632 | int i; | 2636 | int i; |
2637 | unsigned long rx_frms; | ||
2633 | 2638 | ||
2634 | for (i = 0; i < vdev->no_of_vpath; i++) { | 2639 | for (i = 0; i < vdev->no_of_vpath; i++) { |
2635 | ring = &vdev->vpaths[i].ring; | 2640 | ring = &vdev->vpaths[i].ring; |
2641 | |||
2642 | /* Truncated to machine word size number of frames */ | ||
2643 | rx_frms = ACCESS_ONCE(ring->stats.rx_frms); | ||
2644 | |||
2636 | /* Did this vpath received any packets */ | 2645 | /* Did this vpath received any packets */ |
2637 | if (ring->stats.prev_rx_frms == ring->stats.rx_frms) { | 2646 | if (ring->stats.prev_rx_frms == rx_frms) { |
2638 | status = vxge_hw_vpath_check_leak(ring->handle); | 2647 | status = vxge_hw_vpath_check_leak(ring->handle); |
2639 | 2648 | ||
2640 | /* Did it received any packets last time */ | 2649 | /* Did it received any packets last time */ |
@@ -2654,7 +2663,7 @@ static void vxge_poll_vp_lockup(unsigned long data) | |||
2654 | } | 2663 | } |
2655 | } | 2664 | } |
2656 | } | 2665 | } |
2657 | ring->stats.prev_rx_frms = ring->stats.rx_frms; | 2666 | ring->stats.prev_rx_frms = rx_frms; |
2658 | ring->last_status = status; | 2667 | ring->last_status = status; |
2659 | } | 2668 | } |
2660 | 2669 | ||
@@ -3125,14 +3134,36 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) | |||
3125 | 3134 | ||
3126 | /* net_stats already zeroed by caller */ | 3135 | /* net_stats already zeroed by caller */ |
3127 | for (k = 0; k < vdev->no_of_vpath; k++) { | 3136 | for (k = 0; k < vdev->no_of_vpath; k++) { |
3128 | net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms; | 3137 | struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats; |
3129 | net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes; | 3138 | struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats; |
3130 | net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors; | 3139 | unsigned int start; |
3131 | net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast; | 3140 | u64 packets, bytes, multicast; |
3132 | net_stats->rx_dropped += vdev->vpaths[k].ring.stats.rx_dropped; | 3141 | |
3133 | net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms; | 3142 | do { |
3134 | net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes; | 3143 | start = u64_stats_fetch_begin(&rxstats->syncp); |
3135 | net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors; | 3144 | |
3145 | packets = rxstats->rx_frms; | ||
3146 | multicast = rxstats->rx_mcast; | ||
3147 | bytes = rxstats->rx_bytes; | ||
3148 | } while (u64_stats_fetch_retry(&rxstats->syncp, start)); | ||
3149 | |||
3150 | net_stats->rx_packets += packets; | ||
3151 | net_stats->rx_bytes += bytes; | ||
3152 | net_stats->multicast += multicast; | ||
3153 | |||
3154 | net_stats->rx_errors += rxstats->rx_errors; | ||
3155 | net_stats->rx_dropped += rxstats->rx_dropped; | ||
3156 | |||
3157 | do { | ||
3158 | start = u64_stats_fetch_begin(&txstats->syncp); | ||
3159 | |||
3160 | packets = txstats->tx_frms; | ||
3161 | bytes = txstats->tx_bytes; | ||
3162 | } while (u64_stats_fetch_retry(&txstats->syncp, start)); | ||
3163 | |||
3164 | net_stats->tx_packets += packets; | ||
3165 | net_stats->tx_bytes += bytes; | ||
3166 | net_stats->tx_errors += txstats->tx_errors; | ||
3136 | } | 3167 | } |
3137 | 3168 | ||
3138 | return net_stats; | 3169 | return net_stats; |
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h index ed120aba443d..66e6de86ff0c 100644 --- a/drivers/net/vxge/vxge-main.h +++ b/drivers/net/vxge/vxge-main.h | |||
@@ -201,30 +201,14 @@ struct vxge_msix_entry { | |||
201 | /* Software Statistics */ | 201 | /* Software Statistics */ |
202 | 202 | ||
203 | struct vxge_sw_stats { | 203 | struct vxge_sw_stats { |
204 | /* Network Stats (interface stats) */ | ||
205 | |||
206 | /* Tx */ | ||
207 | u64 tx_frms; | ||
208 | u64 tx_errors; | ||
209 | u64 tx_bytes; | ||
210 | u64 txd_not_free; | ||
211 | u64 txd_out_of_desc; | ||
212 | 204 | ||
213 | /* Virtual Path */ | 205 | /* Virtual Path */ |
214 | u64 vpaths_open; | 206 | unsigned long vpaths_open; |
215 | u64 vpath_open_fail; | 207 | unsigned long vpath_open_fail; |
216 | |||
217 | /* Rx */ | ||
218 | u64 rx_frms; | ||
219 | u64 rx_errors; | ||
220 | u64 rx_bytes; | ||
221 | u64 rx_mcast; | ||
222 | 208 | ||
223 | /* Misc. */ | 209 | /* Misc. */ |
224 | u64 link_up; | 210 | unsigned long link_up; |
225 | u64 link_down; | 211 | unsigned long link_down; |
226 | u64 pci_map_fail; | ||
227 | u64 skb_alloc_fail; | ||
228 | }; | 212 | }; |
229 | 213 | ||
230 | struct vxge_mac_addrs { | 214 | struct vxge_mac_addrs { |
@@ -237,12 +221,14 @@ struct vxge_mac_addrs { | |||
237 | struct vxgedev; | 221 | struct vxgedev; |
238 | 222 | ||
239 | struct vxge_fifo_stats { | 223 | struct vxge_fifo_stats { |
224 | struct u64_stats_sync syncp; | ||
240 | u64 tx_frms; | 225 | u64 tx_frms; |
241 | u64 tx_errors; | ||
242 | u64 tx_bytes; | 226 | u64 tx_bytes; |
243 | u64 txd_not_free; | 227 | |
244 | u64 txd_out_of_desc; | 228 | unsigned long tx_errors; |
245 | u64 pci_map_fail; | 229 | unsigned long txd_not_free; |
230 | unsigned long txd_out_of_desc; | ||
231 | unsigned long pci_map_fail; | ||
246 | }; | 232 | }; |
247 | 233 | ||
248 | struct vxge_fifo { | 234 | struct vxge_fifo { |
@@ -264,14 +250,16 @@ struct vxge_fifo { | |||
264 | } ____cacheline_aligned; | 250 | } ____cacheline_aligned; |
265 | 251 | ||
266 | struct vxge_ring_stats { | 252 | struct vxge_ring_stats { |
267 | u64 prev_rx_frms; | 253 | struct u64_stats_sync syncp; |
268 | u64 rx_frms; | 254 | u64 rx_frms; |
269 | u64 rx_errors; | ||
270 | u64 rx_dropped; | ||
271 | u64 rx_bytes; | ||
272 | u64 rx_mcast; | 255 | u64 rx_mcast; |
273 | u64 pci_map_fail; | 256 | u64 rx_bytes; |
274 | u64 skb_alloc_fail; | 257 | |
258 | unsigned long rx_errors; | ||
259 | unsigned long rx_dropped; | ||
260 | unsigned long prev_rx_frms; | ||
261 | unsigned long pci_map_fail; | ||
262 | unsigned long skb_alloc_fail; | ||
275 | }; | 263 | }; |
276 | 264 | ||
277 | struct vxge_ring { | 265 | struct vxge_ring { |