summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2016-09-25 20:29:19 -0400
committerIngo Molnar <mingo@kernel.org>2016-09-30 05:46:40 -0400
commit68107df5f2cb5dc3785be40162bfe2f19a178bbb (patch)
tree12b6a4660458f192213eeac3071c4acb90dbbe63
parent2810f611f908112ea1b30bc016d25205acb3d486 (diff)
u64_stats: Introduce IRQs disabled helpers
Introduce light versions of u64_stats helpers for context where either preempt or IRQs are disabled. This way we can make this library usable by scheduler irqtime accounting which currenty implement its ad-hoc version. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Wanpeng Li <wanpeng.li@hotmail.com> Link: http://lkml.kernel.org/r/1474849761-12678-4-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/u64_stats_sync.h45
1 files changed, 24 insertions, 21 deletions
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index d3a2bb712af3..650f3dd6b800 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -103,31 +103,42 @@ static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp)
103#endif 103#endif
104} 104}
105 105
106static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) 106static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
107{ 107{
108#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 108#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
109 return read_seqcount_begin(&syncp->seq); 109 return read_seqcount_begin(&syncp->seq);
110#else 110#else
111#if BITS_PER_LONG==32
112 preempt_disable();
113#endif
114 return 0; 111 return 0;
115#endif 112#endif
116} 113}
117 114
118static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, 115static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
116{
117#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
118 preempt_disable();
119#endif
120 return __u64_stats_fetch_begin(syncp);
121}
122
123static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
119 unsigned int start) 124 unsigned int start)
120{ 125{
121#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 126#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
122 return read_seqcount_retry(&syncp->seq, start); 127 return read_seqcount_retry(&syncp->seq, start);
123#else 128#else
124#if BITS_PER_LONG==32
125 preempt_enable();
126#endif
127 return false; 129 return false;
128#endif 130#endif
129} 131}
130 132
133static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
134 unsigned int start)
135{
136#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
137 preempt_enable();
138#endif
139 return __u64_stats_fetch_retry(syncp, start);
140}
141
131/* 142/*
132 * In case irq handlers can update u64 counters, readers can use following helpers 143 * In case irq handlers can update u64 counters, readers can use following helpers
133 * - SMP 32bit arches use seqcount protection, irq safe. 144 * - SMP 32bit arches use seqcount protection, irq safe.
@@ -136,27 +147,19 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
136 */ 147 */
137static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) 148static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
138{ 149{
139#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 150#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
140 return read_seqcount_begin(&syncp->seq);
141#else
142#if BITS_PER_LONG==32
143 local_irq_disable(); 151 local_irq_disable();
144#endif 152#endif
145 return 0; 153 return __u64_stats_fetch_begin(syncp);
146#endif
147} 154}
148 155
149static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, 156static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
150 unsigned int start) 157 unsigned int start)
151{ 158{
152#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 159#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
153 return read_seqcount_retry(&syncp->seq, start);
154#else
155#if BITS_PER_LONG==32
156 local_irq_enable(); 160 local_irq_enable();
157#endif 161#endif
158 return false; 162 return __u64_stats_fetch_retry(syncp, start);
159#endif
160} 163}
161 164
162#endif /* _LINUX_U64_STATS_SYNC_H */ 165#endif /* _LINUX_U64_STATS_SYNC_H */