aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/local_ops.txt163
1 files changed, 163 insertions, 0 deletions
diff --git a/Documentation/local_ops.txt b/Documentation/local_ops.txt
new file mode 100644
index 000000000000..b0aca0705d1e
--- /dev/null
+++ b/Documentation/local_ops.txt
@@ -0,0 +1,163 @@
1 Semantics and Behavior of Local Atomic Operations
2
3 Mathieu Desnoyers
4
5
6 This document explains the purpose of the local atomic operations, how
7to implement them for any given architecture and shows how they can be used
8properly. It also stresses on the precautions that must be taken when reading
9those local variables across CPUs when the order of memory writes matters.
10
11
12
13* Purpose of local atomic operations
14
15Local atomic operations are meant to provide fast and highly reentrant per CPU
16counters. They minimize the performance cost of standard atomic operations by
17removing the LOCK prefix and memory barriers normally required to synchronize
18across CPUs.
19
20Having fast per CPU atomic counters is interesting in many cases : it does not
21require disabling interrupts to protect from interrupt handlers and it permits
22coherent counters in NMI handlers. It is especially useful for tracing purposes
23and for various performance monitoring counters.
24
25Local atomic operations only guarantee variable modification atomicity wrt the
26CPU which owns the data. Therefore, care must taken to make sure that only one
27CPU writes to the local_t data. This is done by using per cpu data and making
28sure that we modify it from within a preemption safe context. It is however
29permitted to read local_t data from any CPU : it will then appear to be written
30out of order wrt other memory writes on the owner CPU.
31
32
33* Implementation for a given architecture
34
35It can be done by slightly modifying the standard atomic operations : only
36their UP variant must be kept. It typically means removing LOCK prefix (on
37i386 and x86_64) and any SMP sychronization barrier. If the architecture does
38not have a different behavior between SMP and UP, including asm-generic/local.h
39in your archtecture's local.h is sufficient.
40
41The local_t type is defined as an opaque signed long by embedding an
42atomic_long_t inside a structure. This is made so a cast from this type to a
43long fails. The definition looks like :
44
45typedef struct { atomic_long_t a; } local_t;
46
47
48* How to use local atomic operations
49
50#include <linux/percpu.h>
51#include <asm/local.h>
52
53static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0);
54
55
56* Counting
57
58Counting is done on all the bits of a signed long.
59
60In preemptible context, use get_cpu_var() and put_cpu_var() around local atomic
61operations : it makes sure that preemption is disabled around write access to
62the per cpu variable. For instance :
63
64 local_inc(&get_cpu_var(counters));
65 put_cpu_var(counters);
66
67If you are already in a preemption-safe context, you can directly use
68__get_cpu_var() instead.
69
70 local_inc(&__get_cpu_var(counters));
71
72
73
74* Reading the counters
75
76Those local counters can be read from foreign CPUs to sum the count. Note that
77the data seen by local_read across CPUs must be considered to be out of order
78relatively to other memory writes happening on the CPU that owns the data.
79
80 long sum = 0;
81 for_each_online_cpu(cpu)
82 sum += local_read(&per_cpu(counters, cpu));
83
84If you want to use a remote local_read to synchronize access to a resource
85between CPUs, explicit smp_wmb() and smp_rmb() memory barriers must be used
86respectively on the writer and the reader CPUs. It would be the case if you use
87the local_t variable as a counter of bytes written in a buffer : there should
88be a smp_wmb() between the buffer write and the counter increment and also a
89smp_rmb() between the counter read and the buffer read.
90
91
92Here is a sample module which implements a basic per cpu counter using local.h.
93
94--- BEGIN ---
95/* test-local.c
96 *
97 * Sample module for local.h usage.
98 */
99
100
101#include <asm/local.h>
102#include <linux/module.h>
103#include <linux/timer.h>
104
105static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0);
106
107static struct timer_list test_timer;
108
109/* IPI called on each CPU. */
110static void test_each(void *info)
111{
112 /* Increment the counter from a non preemptible context */
113 printk("Increment on cpu %d\n", smp_processor_id());
114 local_inc(&__get_cpu_var(counters));
115
116 /* This is what incrementing the variable would look like within a
117 * preemptible context (it disables preemption) :
118 *
119 * local_inc(&get_cpu_var(counters));
120 * put_cpu_var(counters);
121 */
122}
123
124static void do_test_timer(unsigned long data)
125{
126 int cpu;
127
128 /* Increment the counters */
129 on_each_cpu(test_each, NULL, 0, 1);
130 /* Read all the counters */
131 printk("Counters read from CPU %d\n", smp_processor_id());
132 for_each_online_cpu(cpu) {
133 printk("Read : CPU %d, count %ld\n", cpu,
134 local_read(&per_cpu(counters, cpu)));
135 }
136 del_timer(&test_timer);
137 test_timer.expires = jiffies + 1000;
138 add_timer(&test_timer);
139}
140
141static int __init test_init(void)
142{
143 /* initialize the timer that will increment the counter */
144 init_timer(&test_timer);
145 test_timer.function = do_test_timer;
146 test_timer.expires = jiffies + 1;
147 add_timer(&test_timer);
148
149 return 0;
150}
151
152static void __exit test_exit(void)
153{
154 del_timer_sync(&test_timer);
155}
156
157module_init(test_init);
158module_exit(test_exit);
159
160MODULE_LICENSE("GPL");
161MODULE_AUTHOR("Mathieu Desnoyers");
162MODULE_DESCRIPTION("Local Atomic Ops");
163--- END ---