aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-05-24 03:59:36 -0400
committerTejun Heo <tj@kernel.org>2011-05-24 03:59:36 -0400
commit6988f20fe04e9ef3aea488cb8ab57fbeb78e12f0 (patch)
treec9d7fc50a2e2147a5ca07e3096e7eeb916ad2da9 /include/asm-generic
parent0415b00d175e0d8945e6785aad21b5f157976ce0 (diff)
parent6ea0c34dac89611126455537552cffe6c7e832ad (diff)
Merge branch 'fixes-2.6.39' into for-2.6.40
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/bug.h28
-rw-r--r--include/asm-generic/vmlinux.lds.h44
2 files changed, 51 insertions, 21 deletions
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index c2c9ba032d46..f2d2faf4d9ae 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -165,10 +165,36 @@ extern void warn_slowpath_null(const char *file, const int line);
165#define WARN_ON_RATELIMIT(condition, state) \ 165#define WARN_ON_RATELIMIT(condition, state) \
166 WARN_ON((condition) && __ratelimit(state)) 166 WARN_ON((condition) && __ratelimit(state))
167 167
168/*
169 * WARN_ON_SMP() is for cases that the warning is either
170 * meaningless for !SMP or may even cause failures.
171 * This is usually used for cases that we have
172 * WARN_ON(!spin_is_locked(&lock)) checks, as spin_is_locked()
173 * returns 0 for uniprocessor settings.
174 * It can also be used with values that are only defined
175 * on SMP:
176 *
177 * struct foo {
178 * [...]
179 * #ifdef CONFIG_SMP
180 * int bar;
181 * #endif
182 * };
183 *
184 * void func(struct foo *zoot)
185 * {
186 * WARN_ON_SMP(!zoot->bar);
187 *
188 * For CONFIG_SMP, WARN_ON_SMP() should act the same as WARN_ON(),
189 * and should be a nop and return false for uniprocessor.
190 *
191 * if (WARN_ON_SMP(x)) returns true only when CONFIG_SMP is set
192 * and x is true.
193 */
168#ifdef CONFIG_SMP 194#ifdef CONFIG_SMP
169# define WARN_ON_SMP(x) WARN_ON(x) 195# define WARN_ON_SMP(x) WARN_ON(x)
170#else 196#else
171# define WARN_ON_SMP(x) do { } while (0) 197# define WARN_ON_SMP(x) ({0;})
172#endif 198#endif
173 199
174#endif 200#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index f301cea5ca2d..ebdaafa866a7 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -688,6 +688,28 @@
688 } 688 }
689 689
690/** 690/**
691 * PERCPU_INPUT - the percpu input sections
692 * @cacheline: cacheline size
693 *
694 * The core percpu section names and core symbols which do not rely
695 * directly upon load addresses.
696 *
697 * @cacheline is used to align subsections to avoid false cacheline
698 * sharing between subsections for different purposes.
699 */
700#define PERCPU_INPUT(cacheline) \
701 VMLINUX_SYMBOL(__per_cpu_start) = .; \
702 *(.data..percpu..first) \
703 . = ALIGN(PAGE_SIZE); \
704 *(.data..percpu..page_aligned) \
705 . = ALIGN(cacheline); \
706 *(.data..percpu..readmostly) \
707 . = ALIGN(cacheline); \
708 *(.data..percpu) \
709 *(.data..percpu..shared_aligned) \
710 VMLINUX_SYMBOL(__per_cpu_end) = .;
711
712/**
691 * PERCPU_VADDR - define output section for percpu area 713 * PERCPU_VADDR - define output section for percpu area
692 * @cacheline: cacheline size 714 * @cacheline: cacheline size
693 * @vaddr: explicit base address (optional) 715 * @vaddr: explicit base address (optional)
@@ -715,16 +737,7 @@
715 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 737 VMLINUX_SYMBOL(__per_cpu_load) = .; \
716 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ 738 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
717 - LOAD_OFFSET) { \ 739 - LOAD_OFFSET) { \
718 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 740 PERCPU_INPUT(cacheline) \
719 *(.data..percpu..first) \
720 . = ALIGN(PAGE_SIZE); \
721 *(.data..percpu..page_aligned) \
722 . = ALIGN(cacheline); \
723 *(.data..percpu..readmostly) \
724 . = ALIGN(cacheline); \
725 *(.data..percpu) \
726 *(.data..percpu..shared_aligned) \
727 VMLINUX_SYMBOL(__per_cpu_end) = .; \
728 } phdr \ 741 } phdr \
729 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); 742 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
730 743
@@ -744,16 +757,7 @@
744 . = ALIGN(PAGE_SIZE); \ 757 . = ALIGN(PAGE_SIZE); \
745 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 758 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
746 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 759 VMLINUX_SYMBOL(__per_cpu_load) = .; \
747 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 760 PERCPU_INPUT(cacheline) \
748 *(.data..percpu..first) \
749 . = ALIGN(PAGE_SIZE); \
750 *(.data..percpu..page_aligned) \
751 . = ALIGN(cacheline); \
752 *(.data..percpu..readmostly) \
753 . = ALIGN(cacheline); \
754 *(.data..percpu) \
755 *(.data..percpu..shared_aligned) \
756 VMLINUX_SYMBOL(__per_cpu_end) = .; \
757 } 761 }
758 762
759 763