diff options
author | travis@sgi.com <travis@sgi.com> | 2008-01-30 07:32:52 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:32:52 -0500 |
commit | 0af8a5ccc51ee2269712c90ab09c91b0150f4224 (patch) | |
tree | 449771ee3ee0a681712f41378ce6ff8d4433edef | |
parent | acdac87202a408133ee8f7985076de9d2e0dc5ab (diff) |
x86_32: use generic percpu.h
x86_32 only provides a special way to obtain the local per cpu area offset
via x86_read_percpu. Otherwise it can fully use the generic handling.
Cc: ak@suse.de
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/asm-x86/percpu_32.h | 30 |
1 files changed, 9 insertions, 21 deletions
diff --git a/include/asm-x86/percpu_32.h b/include/asm-x86/percpu_32.h index 77bd0045f331..e62ce2fe2c9c 100644 --- a/include/asm-x86/percpu_32.h +++ b/include/asm-x86/percpu_32.h | |||
@@ -42,26 +42,7 @@ | |||
42 | */ | 42 | */ |
43 | #ifdef CONFIG_SMP | 43 | #ifdef CONFIG_SMP |
44 | 44 | ||
45 | /* This is used for other cpus to find our section. */ | 45 | #define __my_cpu_offset x86_read_percpu(this_cpu_off) |
46 | extern unsigned long __per_cpu_offset[]; | ||
47 | |||
48 | #define per_cpu_offset(x) (__per_cpu_offset[x]) | ||
49 | |||
50 | #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name | ||
51 | /* We can use this directly for local CPU (faster). */ | ||
52 | DECLARE_PER_CPU(unsigned long, this_cpu_off); | ||
53 | |||
54 | /* var is in discarded region: offset to particular copy we want */ | ||
55 | #define per_cpu(var, cpu) (*({ \ | ||
56 | extern int simple_indentifier_##var(void); \ | ||
57 | RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); })) | ||
58 | |||
59 | #define __raw_get_cpu_var(var) (*({ \ | ||
60 | extern int simple_indentifier_##var(void); \ | ||
61 | RELOC_HIDE(&per_cpu__##var, x86_read_percpu(this_cpu_off)); \ | ||
62 | })) | ||
63 | |||
64 | #define __get_cpu_var(var) __raw_get_cpu_var(var) | ||
65 | 46 | ||
66 | /* A macro to avoid #include hell... */ | 47 | /* A macro to avoid #include hell... */ |
67 | #define percpu_modcopy(pcpudst, src, size) \ | 48 | #define percpu_modcopy(pcpudst, src, size) \ |
@@ -74,11 +55,18 @@ do { \ | |||
74 | 55 | ||
75 | /* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */ | 56 | /* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */ |
76 | #define __percpu_seg "%%fs:" | 57 | #define __percpu_seg "%%fs:" |
58 | |||
77 | #else /* !SMP */ | 59 | #else /* !SMP */ |
78 | #include <asm-generic/percpu.h> | 60 | |
79 | #define __percpu_seg "" | 61 | #define __percpu_seg "" |
62 | |||
80 | #endif /* SMP */ | 63 | #endif /* SMP */ |
81 | 64 | ||
65 | #include <asm-generic/percpu.h> | ||
66 | |||
67 | /* We can use this directly for local CPU (faster). */ | ||
68 | DECLARE_PER_CPU(unsigned long, this_cpu_off); | ||
69 | |||
82 | /* For arch-specific code, we can use direct single-insn ops (they | 70 | /* For arch-specific code, we can use direct single-insn ops (they |
83 | * don't give an lvalue though). */ | 71 | * don't give an lvalue though). */ |
84 | extern void __bad_percpu_size(void); | 72 | extern void __bad_percpu_size(void); |