diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-06-22 08:44:11 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-06-26 00:33:16 -0400 |
commit | 5eaa0bd81f93225b6d1972b373ed00ca763052b2 (patch) | |
tree | 44b8ddc198dfe097de65b5a13ffb9c9a65499fa1 | |
parent | 4b4194c40f4ac8d03a700845f8978cba53246b5a (diff) |
loopback: use u64_stats_sync infrastructure
Commit 6b10de38f0ef (loopback: Implement 64bit stats on 32bit arches)
introduced 64bit stats in loopback driver, using a private seqcount and
private helpers.
David suggested to introduce a generic infrastructure, added in (net:
Introduce u64_stats_sync infrastructure)
This patch reimplements loopback 64bit stats using the u64_stats_sync
infrastructure.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/loopback.c | 62 |
1 files changed, 16 insertions, 46 deletions
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 09334f8f148b..4dd0510d7a99 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c | |||
@@ -58,53 +58,15 @@ | |||
58 | #include <linux/tcp.h> | 58 | #include <linux/tcp.h> |
59 | #include <linux/percpu.h> | 59 | #include <linux/percpu.h> |
60 | #include <net/net_namespace.h> | 60 | #include <net/net_namespace.h> |
61 | #include <linux/u64_stats_sync.h> | ||
61 | 62 | ||
62 | struct pcpu_lstats { | 63 | struct pcpu_lstats { |
63 | u64 packets; | 64 | u64 packets; |
64 | u64 bytes; | 65 | u64 bytes; |
65 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) | 66 | struct u64_stats_sync syncp; |
66 | seqcount_t seq; | 67 | unsigned long drops; |
67 | #endif | ||
68 | unsigned long drops; | ||
69 | }; | 68 | }; |
70 | 69 | ||
71 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) | ||
72 | static void inline lstats_update_begin(struct pcpu_lstats *lstats) | ||
73 | { | ||
74 | write_seqcount_begin(&lstats->seq); | ||
75 | } | ||
76 | static void inline lstats_update_end(struct pcpu_lstats *lstats) | ||
77 | { | ||
78 | write_seqcount_end(&lstats->seq); | ||
79 | } | ||
80 | static void inline lstats_fetch_and_add(u64 *packets, u64 *bytes, const struct pcpu_lstats *lstats) | ||
81 | { | ||
82 | u64 tpackets, tbytes; | ||
83 | unsigned int seq; | ||
84 | |||
85 | do { | ||
86 | seq = read_seqcount_begin(&lstats->seq); | ||
87 | tpackets = lstats->packets; | ||
88 | tbytes = lstats->bytes; | ||
89 | } while (read_seqcount_retry(&lstats->seq, seq)); | ||
90 | |||
91 | *packets += tpackets; | ||
92 | *bytes += tbytes; | ||
93 | } | ||
94 | #else | ||
95 | static void inline lstats_update_begin(struct pcpu_lstats *lstats) | ||
96 | { | ||
97 | } | ||
98 | static void inline lstats_update_end(struct pcpu_lstats *lstats) | ||
99 | { | ||
100 | } | ||
101 | static void inline lstats_fetch_and_add(u64 *packets, u64 *bytes, const struct pcpu_lstats *lstats) | ||
102 | { | ||
103 | *packets += lstats->packets; | ||
104 | *bytes += lstats->bytes; | ||
105 | } | ||
106 | #endif | ||
107 | |||
108 | /* | 70 | /* |
109 | * The higher levels take care of making this non-reentrant (it's | 71 | * The higher levels take care of making this non-reentrant (it's |
110 | * called with bh's disabled). | 72 | * called with bh's disabled). |
@@ -126,10 +88,10 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, | |||
126 | 88 | ||
127 | len = skb->len; | 89 | len = skb->len; |
128 | if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { | 90 | if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { |
129 | lstats_update_begin(lb_stats); | 91 | u64_stats_update_begin(&lb_stats->syncp); |
130 | lb_stats->bytes += len; | 92 | lb_stats->bytes += len; |
131 | lb_stats->packets++; | 93 | lb_stats->packets++; |
132 | lstats_update_end(lb_stats); | 94 | u64_stats_update_end(&lb_stats->syncp); |
133 | } else | 95 | } else |
134 | lb_stats->drops++; | 96 | lb_stats->drops++; |
135 | 97 | ||
@@ -148,10 +110,18 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev) | |||
148 | pcpu_lstats = (void __percpu __force *)dev->ml_priv; | 110 | pcpu_lstats = (void __percpu __force *)dev->ml_priv; |
149 | for_each_possible_cpu(i) { | 111 | for_each_possible_cpu(i) { |
150 | const struct pcpu_lstats *lb_stats; | 112 | const struct pcpu_lstats *lb_stats; |
113 | u64 tbytes, tpackets; | ||
114 | unsigned int start; | ||
151 | 115 | ||
152 | lb_stats = per_cpu_ptr(pcpu_lstats, i); | 116 | lb_stats = per_cpu_ptr(pcpu_lstats, i); |
153 | lstats_fetch_and_add(&packets, &bytes, lb_stats); | 117 | do { |
118 | start = u64_stats_fetch_begin(&lb_stats->syncp); | ||
119 | tbytes = lb_stats->bytes; | ||
120 | tpackets = lb_stats->packets; | ||
121 | } while (u64_stats_fetch_retry(&lb_stats->syncp, start)); | ||
154 | drops += lb_stats->drops; | 122 | drops += lb_stats->drops; |
123 | bytes += tbytes; | ||
124 | packets += tpackets; | ||
155 | } | 125 | } |
156 | stats->rx_packets = packets; | 126 | stats->rx_packets = packets; |
157 | stats->tx_packets = packets; | 127 | stats->tx_packets = packets; |