aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-06-23 20:54:06 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-29 02:24:30 -0400
commit33d91f00c73ba0012bce18c1690cb8313ca7adaa (patch)
tree71a927098b248685af4dede30d443ef73c344d16 /include/linux
parent7a9b2d59507d85569b8a456688ef40cf2ac73e48 (diff)
net: u64_stats_fetch_begin_bh() and u64_stats_fetch_retry_bh()
- Must disable preemption in case of 32bit UP in u64_stats_fetch_begin() and u64_stats_fetch_retry() - Add new u64_stats_fetch_begin_bh() and u64_stats_fetch_retry_bh() for network usage, disabling BH on 32bit UP only. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/u64_stats_sync.h59
1 files changed, 44 insertions, 15 deletions
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index b38e3a58de83..fa261a0da280 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -27,6 +27,9 @@
27 * (On UP, there is no seqcount_t protection, a reader allowing interrupts could 27 * (On UP, there is no seqcount_t protection, a reader allowing interrupts could
28 * read partial values) 28 * read partial values)
29 * 29 *
30 * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and
31 * u64_stats_fetch_retry_bh() helpers
32 *
30 * Usage : 33 * Usage :
31 * 34 *
32 * Stats producer (writer) should use following template granted it already got 35 * Stats producer (writer) should use following template granted it already got
@@ -58,54 +61,80 @@
58 */ 61 */
59#include <linux/seqlock.h> 62#include <linux/seqlock.h>
60 63
61#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
62struct u64_stats_sync { 64struct u64_stats_sync {
65#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
63 seqcount_t seq; 66 seqcount_t seq;
67#endif
64}; 68};
65 69
66static void inline u64_stats_update_begin(struct u64_stats_sync *syncp) 70static void inline u64_stats_update_begin(struct u64_stats_sync *syncp)
67{ 71{
72#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
68 write_seqcount_begin(&syncp->seq); 73 write_seqcount_begin(&syncp->seq);
74#endif
69} 75}
70 76
71static void inline u64_stats_update_end(struct u64_stats_sync *syncp) 77static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
72{ 78{
79#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
73 write_seqcount_end(&syncp->seq); 80 write_seqcount_end(&syncp->seq);
81#endif
74} 82}
75 83
76static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp) 84static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
77{ 85{
86#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
78 return read_seqcount_begin(&syncp->seq); 87 return read_seqcount_begin(&syncp->seq);
88#else
89#if BITS_PER_LONG==32
90 preempt_disable();
91#endif
92 return 0;
93#endif
79} 94}
80 95
81static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp, 96static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
82 unsigned int start) 97 unsigned int start)
83{ 98{
99#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
84 return read_seqcount_retry(&syncp->seq, start); 100 return read_seqcount_retry(&syncp->seq, start);
85}
86
87#else 101#else
88struct u64_stats_sync { 102#if BITS_PER_LONG==32
89}; 103 preempt_enable();
90 104#endif
91static void inline u64_stats_update_begin(struct u64_stats_sync *syncp) 105 return false;
92{ 106#endif
93}
94
95static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
96{
97} 107}
98 108
99static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp) 109/*
110 * In case softirq handlers can update u64 counters, readers can use following helpers
111 * - SMP 32bit arches use seqcount protection, irq safe.
112 * - UP 32bit must disable BH.
113 * - 64bit have no problem atomically reading u64 values, irq safe.
114 */
115static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
100{ 116{
117#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
118 return read_seqcount_begin(&syncp->seq);
119#else
120#if BITS_PER_LONG==32
121 local_bh_disable();
122#endif
101 return 0; 123 return 0;
124#endif
102} 125}
103 126
104static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp, 127static bool inline u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
105 unsigned int start) 128 unsigned int start)
106{ 129{
130#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
131 return read_seqcount_retry(&syncp->seq, start);
132#else
133#if BITS_PER_LONG==32
134 local_bh_enable();
135#endif
107 return false; 136 return false;
108}
109#endif 137#endif
138}
110 139
111#endif /* _LINUX_U64_STATS_SYNC_H */ 140#endif /* _LINUX_U64_STATS_SYNC_H */