aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2014-03-14 00:26:42 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-14 22:41:36 -0400
commit57a7744e09867ebcfa0ccf1d6d529caa7728d552 (patch)
tree6407fee7138787a24bf9251abfeeae69a239028a /include/linux
parent85dcce7a73f1cc59f7a96fe52713b1630f4ca272 (diff)
net: Replace u64_stats_fetch_begin_bh to u64_stats_fetch_begin_irq
Replace the bh safe variant with the hard irq safe variant. We need a hard irq safe variant to deal with netpoll transmitting packets from hard irq context, and we need it in most if not all of the places using the bh safe variant. Except on 32bit uni-processor the code is exactly the same so don't bother with a bh variant, just have a hard irq safe variant that everyone can use. Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/u64_stats_sync.h16
1 files changed, 8 insertions, 8 deletions
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 7bfabd20204c..4b4439e75f45 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -27,8 +27,8 @@
27 * (On UP, there is no seqcount_t protection, a reader allowing interrupts could 27 * (On UP, there is no seqcount_t protection, a reader allowing interrupts could
28 * read partial values) 28 * read partial values)
29 * 29 *
30 * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and 30 * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and
31 * u64_stats_fetch_retry_bh() helpers 31 * u64_stats_fetch_retry_irq() helpers
32 * 32 *
33 * Usage : 33 * Usage :
34 * 34 *
@@ -114,31 +114,31 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
114} 114}
115 115
116/* 116/*
117 * In case softirq handlers can update u64 counters, readers can use following helpers 117 * In case irq handlers can update u64 counters, readers can use following helpers
118 * - SMP 32bit arches use seqcount protection, irq safe. 118 * - SMP 32bit arches use seqcount protection, irq safe.
119 * - UP 32bit must disable BH. 119 * - UP 32bit must disable irqs.
120 * - 64bit have no problem atomically reading u64 values, irq safe. 120 * - 64bit have no problem atomically reading u64 values, irq safe.
121 */ 121 */
122static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp) 122static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
123{ 123{
124#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 124#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
125 return read_seqcount_begin(&syncp->seq); 125 return read_seqcount_begin(&syncp->seq);
126#else 126#else
127#if BITS_PER_LONG==32 127#if BITS_PER_LONG==32
128 local_bh_disable(); 128 local_irq_disable();
129#endif 129#endif
130 return 0; 130 return 0;
131#endif 131#endif
132} 132}
133 133
134static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp, 134static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
135 unsigned int start) 135 unsigned int start)
136{ 136{
137#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 137#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
138 return read_seqcount_retry(&syncp->seq, start); 138 return read_seqcount_retry(&syncp->seq, start);
139#else 139#else
140#if BITS_PER_LONG==32 140#if BITS_PER_LONG==32
141 local_bh_enable(); 141 local_irq_enable();
142#endif 142#endif
143 return false; 143 return false;
144#endif 144#endif