aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-26 16:39:17 -0400
committerIngo Molnar <mingo@elte.hu>2009-03-27 12:28:43 -0400
commit6e15cf04860074ad032e88c306bea656bbdd0f22 (patch)
treec346383bb7563e8d66b2f4a502f875b259c34870 /include/asm-generic
parentbe0ea69674ed95e1e98cb3687a241badc756d228 (diff)
parent60db56422043aaa455ac7f858ce23c273220f9d9 (diff)
Merge branch 'core/percpu' into percpu-cpumask-x86-for-linus-2
Conflicts: arch/parisc/kernel/irq.c arch/x86/include/asm/fixmap_64.h arch/x86/include/asm/setup.h kernel/irq/handle.c Semantic merge: arch/x86/include/asm/fixmap.h Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/percpu.h52
-rw-r--r--include/asm-generic/sections.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h55
3 files changed, 104 insertions, 5 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index b0e63c672ebd..00f45ff081a6 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -80,4 +80,56 @@ extern void setup_per_cpu_areas(void);
80#define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \ 80#define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \
81 __typeof__(type) per_cpu_var(name) 81 __typeof__(type) per_cpu_var(name)
82 82
83/*
84 * Optional methods for optimized non-lvalue per-cpu variable access.
85 *
86 * @var can be a percpu variable or a field of it and its size should
87 * equal char, int or long. percpu_read() evaluates to a lvalue and
88 * all others to void.
89 *
90 * These operations are guaranteed to be atomic w.r.t. preemption.
91 * The generic versions use plain get/put_cpu_var(). Archs are
92 * encouraged to implement single-instruction alternatives which don't
93 * require preemption protection.
94 */
95#ifndef percpu_read
96# define percpu_read(var) \
97 ({ \
98 typeof(per_cpu_var(var)) __tmp_var__; \
99 __tmp_var__ = get_cpu_var(var); \
100 put_cpu_var(var); \
101 __tmp_var__; \
102 })
103#endif
104
105#define __percpu_generic_to_op(var, val, op) \
106do { \
107 get_cpu_var(var) op val; \
108 put_cpu_var(var); \
109} while (0)
110
111#ifndef percpu_write
112# define percpu_write(var, val) __percpu_generic_to_op(var, (val), =)
113#endif
114
115#ifndef percpu_add
116# define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=)
117#endif
118
119#ifndef percpu_sub
120# define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=)
121#endif
122
123#ifndef percpu_and
124# define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=)
125#endif
126
127#ifndef percpu_or
128# define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=)
129#endif
130
131#ifndef percpu_xor
132# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
133#endif
134
83#endif /* _ASM_GENERIC_PERCPU_H_ */ 135#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 79a7ff925bf8..4ce48e878530 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -9,7 +9,7 @@ extern char __bss_start[], __bss_stop[];
9extern char __init_begin[], __init_end[]; 9extern char __init_begin[], __init_end[];
10extern char _sinittext[], _einittext[]; 10extern char _sinittext[], _einittext[];
11extern char _end[]; 11extern char _end[];
12extern char __per_cpu_start[], __per_cpu_end[]; 12extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
13extern char __kprobes_text_start[], __kprobes_text_end[]; 13extern char __kprobes_text_start[], __kprobes_text_end[];
14extern char __initdata_begin[], __initdata_end[]; 14extern char __initdata_begin[], __initdata_end[];
15extern char __start_rodata[], __end_rodata[]; 15extern char __start_rodata[], __end_rodata[];
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index aca40b93bd28..a654d724d3b0 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -427,12 +427,59 @@
427 *(.initcall7.init) \ 427 *(.initcall7.init) \
428 *(.initcall7s.init) 428 *(.initcall7s.init)
429 429
430/**
431 * PERCPU_VADDR - define output section for percpu area
432 * @vaddr: explicit base address (optional)
433 * @phdr: destination PHDR (optional)
434 *
435 * Macro which expands to output section for percpu area. If @vaddr
436 * is not blank, it specifies explicit base address and all percpu
437 * symbols will be offset from the given address. If blank, @vaddr
438 * always equals @laddr + LOAD_OFFSET.
439 *
440 * @phdr defines the output PHDR to use if not blank. Be warned that
441 * output PHDR is sticky. If @phdr is specified, the next output
442 * section in the linker script will go there too. @phdr should have
443 * a leading colon.
444 *
445 * Note that this macros defines __per_cpu_load as an absolute symbol.
446 * If there is no need to put the percpu section at a predetermined
447 * address, use PERCPU().
448 */
449#define PERCPU_VADDR(vaddr, phdr) \
450 VMLINUX_SYMBOL(__per_cpu_load) = .; \
451 .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
452 - LOAD_OFFSET) { \
453 VMLINUX_SYMBOL(__per_cpu_start) = .; \
454 *(.data.percpu.first) \
455 *(.data.percpu.page_aligned) \
456 *(.data.percpu) \
457 *(.data.percpu.shared_aligned) \
458 VMLINUX_SYMBOL(__per_cpu_end) = .; \
459 } phdr \
460 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
461
462/**
463 * PERCPU - define output section for percpu area, simple version
464 * @align: required alignment
465 *
466 * Align to @align and outputs output section for percpu area. This
467 * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
468 * __per_cpu_start will be identical.
469 *
470 * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
471 * that __per_cpu_load is defined as a relative symbol against
472 * .data.percpu which is required for relocatable x86_32
473 * configuration.
474 */
430#define PERCPU(align) \ 475#define PERCPU(align) \
431 . = ALIGN(align); \ 476 . = ALIGN(align); \
432 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 477 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
433 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ 478 VMLINUX_SYMBOL(__per_cpu_load) = .; \
479 VMLINUX_SYMBOL(__per_cpu_start) = .; \
480 *(.data.percpu.first) \
434 *(.data.percpu.page_aligned) \ 481 *(.data.percpu.page_aligned) \
435 *(.data.percpu) \ 482 *(.data.percpu) \
436 *(.data.percpu.shared_aligned) \ 483 *(.data.percpu.shared_aligned) \
437 } \ 484 VMLINUX_SYMBOL(__per_cpu_end) = .; \
438 VMLINUX_SYMBOL(__per_cpu_end) = .; 485 }