aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorNicolas Pitre <nico@cam.org>2008-11-09 00:27:53 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-11-09 14:17:33 -0500
commit058e3739f6b0753696db1952378de9e8d2a11735 (patch)
tree2261c23bf8c77e51628310057b4dde718e487c53 /include
parent02cabab4a8a7ef2d51189d5dda84516d36662910 (diff)
clarify usage expectations for cnt32_to_63()
Currently, all existing users of cnt32_to_63() are fine since the CPU architectures where it is used don't do read access reordering, and user mode preemption is disabled already. It is nevertheless a good idea to better elaborate usage requirements wrt preemption, and use an explicit memory barrier on SMP to avoid different CPUs accessing the counter value in the wrong order. On UP a simple compiler barrier is sufficient. Signed-off-by: Nicolas Pitre <nico@marvell.com> Acked-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/cnt32_to_63.h22
1 files changed, 16 insertions, 6 deletions
diff --git a/include/linux/cnt32_to_63.h b/include/linux/cnt32_to_63.h
index 8c0f9505b48c..7605fdd1eb65 100644
--- a/include/linux/cnt32_to_63.h
+++ b/include/linux/cnt32_to_63.h
@@ -16,6 +16,7 @@
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <asm/byteorder.h> 18#include <asm/byteorder.h>
19#include <asm/system.h>
19 20
20/* this is used only to give gcc a clue about good code generation */ 21/* this is used only to give gcc a clue about good code generation */
21union cnt32_to_63 { 22union cnt32_to_63 {
@@ -53,11 +54,19 @@ union cnt32_to_63 {
53 * needed increment. And any race in updating the value in memory is harmless 54 * needed increment. And any race in updating the value in memory is harmless
54 * as the same value would simply be stored more than once. 55 * as the same value would simply be stored more than once.
55 * 56 *
56 * The only restriction for the algorithm to work properly is that this 57 * The restrictions for the algorithm to work properly are:
57 * code must be executed at least once per each half period of the 32-bit 58 *
58 * counter to properly update the state bit in memory. This is usually not a 59 * 1) this code must be called at least once per each half period of the
59 * problem in practice, but if it is then a kernel timer could be scheduled 60 * 32-bit counter;
60 * to manage for this code to be executed often enough. 61 *
62 * 2) this code must not be preempted for a duration longer than the
63 * 32-bit counter half period minus the longest period between two
64 * calls to this code.
65 *
66 * Those requirements ensure proper update to the state bit in memory.
67 * This is usually not a problem in practice, but if it is then a kernel
68 * timer should be scheduled to manage for this code to be executed often
69 * enough.
61 * 70 *
62 * Note that the top bit (bit 63) in the returned value should be considered 71 * Note that the top bit (bit 63) in the returned value should be considered
63 * as garbage. It is not cleared here because callers are likely to use a 72 * as garbage. It is not cleared here because callers are likely to use a
@@ -68,9 +77,10 @@ union cnt32_to_63 {
68 */ 77 */
69#define cnt32_to_63(cnt_lo) \ 78#define cnt32_to_63(cnt_lo) \
70({ \ 79({ \
71 static volatile u32 __m_cnt_hi; \ 80 static u32 __m_cnt_hi; \
72 union cnt32_to_63 __x; \ 81 union cnt32_to_63 __x; \
73 __x.hi = __m_cnt_hi; \ 82 __x.hi = __m_cnt_hi; \
83 smp_rmb(); \
74 __x.lo = (cnt_lo); \ 84 __x.lo = (cnt_lo); \
75 if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \ 85 if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \
76 __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \ 86 __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \