diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-06 12:30:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-06 12:30:52 -0400 |
commit | 4aed2fd8e3181fea7c09ba79cf64e7e3f4413bf9 (patch) | |
tree | 1f69733e5daab4915a76a41de0e4d1dc61e12cfb /include/asm-generic | |
parent | 3a3527b6461b1298cc53ce72f336346739297ac8 (diff) | |
parent | fc9ea5a1e53ee54f681e226d735008e2a6f8f470 (diff) |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (162 commits)
tracing/kprobes: unregister_trace_probe needs to be called under mutex
perf: expose event__process function
perf events: Fix mmap offset determination
perf, powerpc: fsl_emb: Restore setting perf_sample_data.period
perf, powerpc: Convert the FSL driver to use local64_t
perf tools: Don't keep unreferenced maps when unmaps are detected
perf session: Invalidate last_match when removing threads from rb_tree
perf session: Free the ref_reloc_sym memory at the right place
x86,mmiotrace: Add support for tracing STOS instruction
perf, sched migration: Librarize task states and event headers helpers
perf, sched migration: Librarize the GUI class
perf, sched migration: Make the GUI class client agnostic
perf, sched migration: Make it vertically scrollable
perf, sched migration: Parameterize cpu height and spacing
perf, sched migration: Fix key bindings
perf, sched migration: Ignore unhandled task states
perf, sched migration: Handle ignored migrate out events
perf: New migration tool overview
tracing: Drop cpparg() macro
perf: Use tracepoint_synchronize_unregister() to flush any pending tracepoint call
...
Fix up trivial conflicts in Makefile and drivers/cpufreq/cpufreq.c
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/local64.h | 96 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 4 |
2 files changed, 96 insertions, 4 deletions
diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h new file mode 100644 index 000000000000..02ac760c1a8b --- /dev/null +++ b/include/asm-generic/local64.h | |||
@@ -0,0 +1,96 @@ | |||
1 | #ifndef _ASM_GENERIC_LOCAL64_H | ||
2 | #define _ASM_GENERIC_LOCAL64_H | ||
3 | |||
4 | #include <linux/percpu.h> | ||
5 | #include <asm/types.h> | ||
6 | |||
7 | /* | ||
8 | * A signed long type for operations which are atomic for a single CPU. | ||
9 | * Usually used in combination with per-cpu variables. | ||
10 | * | ||
11 | * This is the default implementation, which uses atomic64_t. Which is | ||
12 | * rather pointless. The whole point behind local64_t is that some processors | ||
13 | * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs | ||
14 | * running on this CPU. local64_t allows exploitation of such capabilities. | ||
15 | */ | ||
16 | |||
17 | /* Implement in terms of atomics. */ | ||
18 | |||
19 | #if BITS_PER_LONG == 64 | ||
20 | |||
21 | #include <asm/local.h> | ||
22 | |||
23 | typedef struct { | ||
24 | local_t a; | ||
25 | } local64_t; | ||
26 | |||
27 | #define LOCAL64_INIT(i) { LOCAL_INIT(i) } | ||
28 | |||
29 | #define local64_read(l) local_read(&(l)->a) | ||
30 | #define local64_set(l,i) local_set((&(l)->a),(i)) | ||
31 | #define local64_inc(l) local_inc(&(l)->a) | ||
32 | #define local64_dec(l) local_dec(&(l)->a) | ||
33 | #define local64_add(i,l) local_add((i),(&(l)->a)) | ||
34 | #define local64_sub(i,l) local_sub((i),(&(l)->a)) | ||
35 | |||
36 | #define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a)) | ||
37 | #define local64_dec_and_test(l) local_dec_and_test(&(l)->a) | ||
38 | #define local64_inc_and_test(l) local_inc_and_test(&(l)->a) | ||
39 | #define local64_add_negative(i, l) local_add_negative((i), (&(l)->a)) | ||
40 | #define local64_add_return(i, l) local_add_return((i), (&(l)->a)) | ||
41 | #define local64_sub_return(i, l) local_sub_return((i), (&(l)->a)) | ||
42 | #define local64_inc_return(l) local_inc_return(&(l)->a) | ||
43 | |||
44 | #define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n)) | ||
45 | #define local64_xchg(l, n) local_xchg((&(l)->a), (n)) | ||
46 | #define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u)) | ||
47 | #define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a) | ||
48 | |||
49 | /* Non-atomic variants, ie. preemption disabled and won't be touched | ||
50 | * in interrupt, etc. Some archs can optimize this case well. */ | ||
51 | #define __local64_inc(l) local64_set((l), local64_read(l) + 1) | ||
52 | #define __local64_dec(l) local64_set((l), local64_read(l) - 1) | ||
53 | #define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) | ||
54 | #define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) | ||
55 | |||
56 | #else /* BITS_PER_LONG != 64 */ | ||
57 | |||
58 | #include <asm/atomic.h> | ||
59 | |||
60 | /* Don't use typedef: don't want them to be mixed with atomic_t's. */ | ||
61 | typedef struct { | ||
62 | atomic64_t a; | ||
63 | } local64_t; | ||
64 | |||
65 | #define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) } | ||
66 | |||
67 | #define local64_read(l) atomic64_read(&(l)->a) | ||
68 | #define local64_set(l,i) atomic64_set((&(l)->a),(i)) | ||
69 | #define local64_inc(l) atomic64_inc(&(l)->a) | ||
70 | #define local64_dec(l) atomic64_dec(&(l)->a) | ||
71 | #define local64_add(i,l) atomic64_add((i),(&(l)->a)) | ||
72 | #define local64_sub(i,l) atomic64_sub((i),(&(l)->a)) | ||
73 | |||
74 | #define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a)) | ||
75 | #define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a) | ||
76 | #define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a) | ||
77 | #define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a)) | ||
78 | #define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a)) | ||
79 | #define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a)) | ||
80 | #define local64_inc_return(l) atomic64_inc_return(&(l)->a) | ||
81 | |||
82 | #define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n)) | ||
83 | #define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n)) | ||
84 | #define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u)) | ||
85 | #define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a) | ||
86 | |||
87 | /* Non-atomic variants, ie. preemption disabled and won't be touched | ||
88 | * in interrupt, etc. Some archs can optimize this case well. */ | ||
89 | #define __local64_inc(l) local64_set((l), local64_read(l) + 1) | ||
90 | #define __local64_dec(l) local64_set((l), local64_read(l) - 1) | ||
91 | #define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) | ||
92 | #define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) | ||
93 | |||
94 | #endif /* BITS_PER_LONG != 64 */ | ||
95 | |||
96 | #endif /* _ASM_GENERIC_LOCAL64_H */ | ||
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 4e7ae6002056..8a92a170fb7d 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -156,10 +156,6 @@ | |||
156 | CPU_KEEP(exit.data) \ | 156 | CPU_KEEP(exit.data) \ |
157 | MEM_KEEP(init.data) \ | 157 | MEM_KEEP(init.data) \ |
158 | MEM_KEEP(exit.data) \ | 158 | MEM_KEEP(exit.data) \ |
159 | . = ALIGN(8); \ | ||
160 | VMLINUX_SYMBOL(__start___markers) = .; \ | ||
161 | *(__markers) \ | ||
162 | VMLINUX_SYMBOL(__stop___markers) = .; \ | ||
163 | . = ALIGN(32); \ | 159 | . = ALIGN(32); \ |
164 | VMLINUX_SYMBOL(__start___tracepoints) = .; \ | 160 | VMLINUX_SYMBOL(__start___tracepoints) = .; \ |
165 | *(__tracepoints) \ | 161 | *(__tracepoints) \ |