diff options
author | travis@sgi.com <travis@sgi.com> | 2008-01-30 07:32:52 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:32:52 -0500 |
commit | acdac87202a408133ee8f7985076de9d2e0dc5ab (patch) | |
tree | 55948448c7f179713fc8d1d1ae01ad53adec9c2b /include/asm-generic | |
parent | 5280e004fc22314122c84978c0b6a741cf96dc0f (diff) |
percpu: make the asm-generic/percpu.h more "generic"
- add support for PER_CPU_ATTRIBUTES
- fix generic smp percpu_modcopy to use per_cpu_offset() macro.
Add the ability to use generic/percpu even if the arch needs to override
several aspects of its operations. This will enable the use of generic
percpu.h for all arches.
An arch may define:
__per_cpu_offset Do not use the generic pointer array. Arch must
define per_cpu_offset(cpu) (used by x86_64, s390).
__my_cpu_offset Can be defined to provide an optimized way to determine
the offset for variables of the currently executing
processor. Used by ia64, x86_64, x86_32, sparc64, s/390.
SHIFT_PTR(ptr, offset) If an arch defines it then special handling
of pointer arithmentic may be implemented. Used
by s/390.
(Some of these special percpu arch implementations may be later consolidated
so that there are less cases to deal with.)
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/percpu.h | 74 |
1 files changed, 63 insertions, 11 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index e038f13594e5..c41b1a731129 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h | |||
@@ -3,35 +3,87 @@ | |||
3 | #include <linux/compiler.h> | 3 | #include <linux/compiler.h> |
4 | #include <linux/threads.h> | 4 | #include <linux/threads.h> |
5 | 5 | ||
6 | /* | ||
7 | * Determine the real variable name from the name visible in the | ||
8 | * kernel sources. | ||
9 | */ | ||
10 | #define per_cpu_var(var) per_cpu__##var | ||
11 | |||
6 | #ifdef CONFIG_SMP | 12 | #ifdef CONFIG_SMP |
7 | 13 | ||
14 | /* | ||
15 | * per_cpu_offset() is the offset that has to be added to a | ||
16 | * percpu variable to get to the instance for a certain processor. | ||
17 | * | ||
18 | * Most arches use the __per_cpu_offset array for those offsets but | ||
19 | * some arches have their own ways of determining the offset (x86_64, s390). | ||
20 | */ | ||
21 | #ifndef __per_cpu_offset | ||
8 | extern unsigned long __per_cpu_offset[NR_CPUS]; | 22 | extern unsigned long __per_cpu_offset[NR_CPUS]; |
9 | 23 | ||
10 | #define per_cpu_offset(x) (__per_cpu_offset[x]) | 24 | #define per_cpu_offset(x) (__per_cpu_offset[x]) |
25 | #endif | ||
26 | |||
27 | /* | ||
28 | * Determine the offset for the currently active processor. | ||
29 | * An arch may define __my_cpu_offset to provide a more effective | ||
30 | * means of obtaining the offset to the per cpu variables of the | ||
31 | * current processor. | ||
32 | */ | ||
33 | #ifndef __my_cpu_offset | ||
34 | #define __my_cpu_offset per_cpu_offset(raw_smp_processor_id()) | ||
35 | #define my_cpu_offset per_cpu_offset(smp_processor_id()) | ||
36 | #else | ||
37 | #define my_cpu_offset __my_cpu_offset | ||
38 | #endif | ||
39 | |||
40 | /* | ||
41 | * Add a offset to a pointer but keep the pointer as is. | ||
42 | * | ||
43 | * Only S390 provides its own means of moving the pointer. | ||
44 | */ | ||
45 | #ifndef SHIFT_PERCPU_PTR | ||
46 | #define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset)) | ||
47 | #endif | ||
11 | 48 | ||
12 | /* var is in discarded region: offset to particular copy we want */ | 49 | /* |
13 | #define per_cpu(var, cpu) (*({ \ | 50 | * A percpu variable may point to a discarded reghions. The following are |
14 | extern int simple_identifier_##var(void); \ | 51 | * established ways to produce a usable pointer from the percpu variable |
15 | RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); })) | 52 | * offset. |
16 | #define __get_cpu_var(var) per_cpu(var, smp_processor_id()) | 53 | */ |
17 | #define __raw_get_cpu_var(var) per_cpu(var, raw_smp_processor_id()) | 54 | #define per_cpu(var, cpu) \ |
55 | (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu))) | ||
56 | #define __get_cpu_var(var) \ | ||
57 | (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset)) | ||
58 | #define __raw_get_cpu_var(var) \ | ||
59 | (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset)) | ||
60 | |||
61 | |||
62 | #ifdef CONFIG_ARCH_SETS_UP_PER_CPU_AREA | ||
63 | extern void setup_per_cpu_areas(void); | ||
64 | #endif | ||
18 | 65 | ||
19 | /* A macro to avoid #include hell... */ | 66 | /* A macro to avoid #include hell... */ |
20 | #define percpu_modcopy(pcpudst, src, size) \ | 67 | #define percpu_modcopy(pcpudst, src, size) \ |
21 | do { \ | 68 | do { \ |
22 | unsigned int __i; \ | 69 | unsigned int __i; \ |
23 | for_each_possible_cpu(__i) \ | 70 | for_each_possible_cpu(__i) \ |
24 | memcpy((pcpudst)+__per_cpu_offset[__i], \ | 71 | memcpy((pcpudst)+per_cpu_offset(__i), \ |
25 | (src), (size)); \ | 72 | (src), (size)); \ |
26 | } while (0) | 73 | } while (0) |
27 | #else /* ! SMP */ | 74 | #else /* ! SMP */ |
28 | 75 | ||
29 | #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) | 76 | #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) |
30 | #define __get_cpu_var(var) per_cpu__##var | 77 | #define __get_cpu_var(var) per_cpu_var(var) |
31 | #define __raw_get_cpu_var(var) per_cpu__##var | 78 | #define __raw_get_cpu_var(var) per_cpu_var(var) |
32 | 79 | ||
33 | #endif /* SMP */ | 80 | #endif /* SMP */ |
34 | 81 | ||
35 | #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name | 82 | #ifndef PER_CPU_ATTRIBUTES |
83 | #define PER_CPU_ATTRIBUTES | ||
84 | #endif | ||
85 | |||
86 | #define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \ | ||
87 | __typeof__(type) per_cpu_var(name) | ||
36 | 88 | ||
37 | #endif /* _ASM_GENERIC_PERCPU_H_ */ | 89 | #endif /* _ASM_GENERIC_PERCPU_H_ */ |