aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/bitops/ext2-non-atomic.h2
-rw-r--r--include/asm-generic/bitops/le.h4
-rw-r--r--include/asm-generic/bug.h17
-rw-r--r--include/asm-generic/percpu.h97
-rw-r--r--include/asm-generic/resource.h5
-rw-r--r--include/asm-generic/tlb.h1
-rw-r--r--include/asm-generic/vmlinux.lds.h90
7 files changed, 169 insertions, 47 deletions
diff --git a/include/asm-generic/bitops/ext2-non-atomic.h b/include/asm-generic/bitops/ext2-non-atomic.h
index 1697404afa05..63cf822431a2 100644
--- a/include/asm-generic/bitops/ext2-non-atomic.h
+++ b/include/asm-generic/bitops/ext2-non-atomic.h
@@ -14,5 +14,7 @@
14 generic_find_first_zero_le_bit((unsigned long *)(addr), (size)) 14 generic_find_first_zero_le_bit((unsigned long *)(addr), (size))
15#define ext2_find_next_zero_bit(addr, size, off) \ 15#define ext2_find_next_zero_bit(addr, size, off) \
16 generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off)) 16 generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off))
17#define ext2_find_next_bit(addr, size, off) \
18 generic_find_next_le_bit((unsigned long *)(addr), (size), (off))
17 19
18#endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */ 20#endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
index b9c7e5d2d2ad..80e3bf13b2b9 100644
--- a/include/asm-generic/bitops/le.h
+++ b/include/asm-generic/bitops/le.h
@@ -20,6 +20,8 @@
20#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr) 20#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr)
21 21
22#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset) 22#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset)
23#define generic_find_next_le_bit(addr, size, offset) \
24 find_next_bit(addr, size, offset)
23 25
24#elif defined(__BIG_ENDIAN) 26#elif defined(__BIG_ENDIAN)
25 27
@@ -42,6 +44,8 @@
42 44
43extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, 45extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
44 unsigned long size, unsigned long offset); 46 unsigned long size, unsigned long offset);
47extern unsigned long generic_find_next_le_bit(const unsigned long *addr,
48 unsigned long size, unsigned long offset);
45 49
46#else 50#else
47#error "Please fix <asm/byteorder.h>" 51#error "Please fix <asm/byteorder.h>"
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index d56fedbb457a..2632328d8646 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -31,14 +31,19 @@ struct bug_entry {
31#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0) 31#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
32#endif 32#endif
33 33
34#ifndef HAVE_ARCH_WARN_ON 34#ifndef __WARN
35#ifndef __ASSEMBLY__
36extern void warn_on_slowpath(const char *file, const int line);
37#define WANT_WARN_ON_SLOWPATH
38#endif
39#define __WARN() warn_on_slowpath(__FILE__, __LINE__)
40#endif
41
42#ifndef WARN_ON
35#define WARN_ON(condition) ({ \ 43#define WARN_ON(condition) ({ \
36 int __ret_warn_on = !!(condition); \ 44 int __ret_warn_on = !!(condition); \
37 if (unlikely(__ret_warn_on)) { \ 45 if (unlikely(__ret_warn_on)) \
38 printk("WARNING: at %s:%d %s()\n", __FILE__, \ 46 __WARN(); \
39 __LINE__, __FUNCTION__); \
40 dump_stack(); \
41 } \
42 unlikely(__ret_warn_on); \ 47 unlikely(__ret_warn_on); \
43}) 48})
44#endif 49#endif
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index d85172e9ed45..4b8d31cda1a0 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -3,54 +3,79 @@
3#include <linux/compiler.h> 3#include <linux/compiler.h>
4#include <linux/threads.h> 4#include <linux/threads.h>
5 5
6#define __GENERIC_PER_CPU 6/*
7 * Determine the real variable name from the name visible in the
8 * kernel sources.
9 */
10#define per_cpu_var(var) per_cpu__##var
11
7#ifdef CONFIG_SMP 12#ifdef CONFIG_SMP
8 13
14/*
15 * per_cpu_offset() is the offset that has to be added to a
16 * percpu variable to get to the instance for a certain processor.
17 *
18 * Most arches use the __per_cpu_offset array for those offsets but
19 * some arches have their own ways of determining the offset (x86_64, s390).
20 */
21#ifndef __per_cpu_offset
9extern unsigned long __per_cpu_offset[NR_CPUS]; 22extern unsigned long __per_cpu_offset[NR_CPUS];
10 23
11#define per_cpu_offset(x) (__per_cpu_offset[x]) 24#define per_cpu_offset(x) (__per_cpu_offset[x])
25#endif
12 26
13/* Separate out the type, so (int[3], foo) works. */ 27/*
14#define DEFINE_PER_CPU(type, name) \ 28 * Determine the offset for the currently active processor.
15 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name 29 * An arch may define __my_cpu_offset to provide a more effective
16 30 * means of obtaining the offset to the per cpu variables of the
17#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ 31 * current processor.
18 __attribute__((__section__(".data.percpu.shared_aligned"))) \ 32 */
19 __typeof__(type) per_cpu__##name \ 33#ifndef __my_cpu_offset
20 ____cacheline_aligned_in_smp 34#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
21 35#define my_cpu_offset per_cpu_offset(smp_processor_id())
22/* var is in discarded region: offset to particular copy we want */ 36#else
23#define per_cpu(var, cpu) (*({ \ 37#define my_cpu_offset __my_cpu_offset
24 extern int simple_identifier_##var(void); \ 38#endif
25 RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); })) 39
26#define __get_cpu_var(var) per_cpu(var, smp_processor_id()) 40/*
27#define __raw_get_cpu_var(var) per_cpu(var, raw_smp_processor_id()) 41 * Add a offset to a pointer but keep the pointer as is.
28 42 *
29/* A macro to avoid #include hell... */ 43 * Only S390 provides its own means of moving the pointer.
30#define percpu_modcopy(pcpudst, src, size) \ 44 */
31do { \ 45#ifndef SHIFT_PERCPU_PTR
32 unsigned int __i; \ 46#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset))
33 for_each_possible_cpu(__i) \ 47#endif
34 memcpy((pcpudst)+__per_cpu_offset[__i], \
35 (src), (size)); \
36} while (0)
37#else /* ! SMP */
38 48
39#define DEFINE_PER_CPU(type, name) \ 49/*
40 __typeof__(type) per_cpu__##name 50 * A percpu variable may point to a discarded regions. The following are
51 * established ways to produce a usable pointer from the percpu variable
52 * offset.
53 */
54#define per_cpu(var, cpu) \
55 (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu)))
56#define __get_cpu_var(var) \
57 (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset))
58#define __raw_get_cpu_var(var) \
59 (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
41 60
42#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
43 DEFINE_PER_CPU(type, name)
44 61
45#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) 62#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
46#define __get_cpu_var(var) per_cpu__##var 63extern void setup_per_cpu_areas(void);
47#define __raw_get_cpu_var(var) per_cpu__##var 64#endif
65
66#else /* ! SMP */
67
68#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
69#define __get_cpu_var(var) per_cpu_var(var)
70#define __raw_get_cpu_var(var) per_cpu_var(var)
48 71
49#endif /* SMP */ 72#endif /* SMP */
50 73
51#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name 74#ifndef PER_CPU_ATTRIBUTES
75#define PER_CPU_ATTRIBUTES
76#endif
52 77
53#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) 78#define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \
54#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) 79 __typeof__(type) per_cpu_var(name)
55 80
56#endif /* _ASM_GENERIC_PERCPU_H_ */ 81#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
index a4a22cc35898..587566f95f6c 100644
--- a/include/asm-generic/resource.h
+++ b/include/asm-generic/resource.h
@@ -44,8 +44,8 @@
44#define RLIMIT_NICE 13 /* max nice prio allowed to raise to 44#define RLIMIT_NICE 13 /* max nice prio allowed to raise to
45 0-39 for nice level 19 .. -20 */ 45 0-39 for nice level 19 .. -20 */
46#define RLIMIT_RTPRIO 14 /* maximum realtime priority */ 46#define RLIMIT_RTPRIO 14 /* maximum realtime priority */
47 47#define RLIMIT_RTTIME 15 /* timeout for RT tasks in us */
48#define RLIM_NLIMITS 15 48#define RLIM_NLIMITS 16
49 49
50/* 50/*
51 * SuS says limits have to be unsigned. 51 * SuS says limits have to be unsigned.
@@ -86,6 +86,7 @@
86 [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \ 86 [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \
87 [RLIMIT_NICE] = { 0, 0 }, \ 87 [RLIMIT_NICE] = { 0, 0 }, \
88 [RLIMIT_RTPRIO] = { 0, 0 }, \ 88 [RLIMIT_RTPRIO] = { 0, 0 }, \
89 [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \
89} 90}
90 91
91#endif /* __KERNEL__ */ 92#endif /* __KERNEL__ */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 75f2bfab614f..6ce9f3ab928d 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -15,7 +15,6 @@
15 15
16#include <linux/swap.h> 16#include <linux/swap.h>
17#include <linux/quicklist.h> 17#include <linux/quicklist.h>
18#include <asm/pgalloc.h>
19#include <asm/tlbflush.h> 18#include <asm/tlbflush.h>
20 19
21/* 20/*
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 9f584cc5c5fb..f784d2f34149 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -9,10 +9,46 @@
9/* Align . to a 8 byte boundary equals to maximum function alignment. */ 9/* Align . to a 8 byte boundary equals to maximum function alignment. */
10#define ALIGN_FUNCTION() . = ALIGN(8) 10#define ALIGN_FUNCTION() . = ALIGN(8)
11 11
12/* The actual configuration determine if the init/exit sections
13 * are handled as text/data or they can be discarded (which
14 * often happens at runtime)
15 */
16#ifdef CONFIG_HOTPLUG
17#define DEV_KEEP(sec) *(.dev##sec)
18#define DEV_DISCARD(sec)
19#else
20#define DEV_KEEP(sec)
21#define DEV_DISCARD(sec) *(.dev##sec)
22#endif
23
24#ifdef CONFIG_HOTPLUG_CPU
25#define CPU_KEEP(sec) *(.cpu##sec)
26#define CPU_DISCARD(sec)
27#else
28#define CPU_KEEP(sec)
29#define CPU_DISCARD(sec) *(.cpu##sec)
30#endif
31
32#if defined(CONFIG_MEMORY_HOTPLUG)
33#define MEM_KEEP(sec) *(.mem##sec)
34#define MEM_DISCARD(sec)
35#else
36#define MEM_KEEP(sec)
37#define MEM_DISCARD(sec) *(.mem##sec)
38#endif
39
40
12/* .data section */ 41/* .data section */
13#define DATA_DATA \ 42#define DATA_DATA \
14 *(.data) \ 43 *(.data) \
15 *(.data.init.refok) \ 44 *(.data.init.refok) \
45 *(.ref.data) \
46 DEV_KEEP(init.data) \
47 DEV_KEEP(exit.data) \
48 CPU_KEEP(init.data) \
49 CPU_KEEP(exit.data) \
50 MEM_KEEP(init.data) \
51 MEM_KEEP(exit.data) \
16 . = ALIGN(8); \ 52 . = ALIGN(8); \
17 VMLINUX_SYMBOL(__start___markers) = .; \ 53 VMLINUX_SYMBOL(__start___markers) = .; \
18 *(__markers) \ 54 *(__markers) \
@@ -132,14 +168,25 @@
132 *(__ksymtab_strings) \ 168 *(__ksymtab_strings) \
133 } \ 169 } \
134 \ 170 \
171 /* __*init sections */ \
172 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
173 *(.ref.rodata) \
174 DEV_KEEP(init.rodata) \
175 DEV_KEEP(exit.rodata) \
176 CPU_KEEP(init.rodata) \
177 CPU_KEEP(exit.rodata) \
178 MEM_KEEP(init.rodata) \
179 MEM_KEEP(exit.rodata) \
180 } \
181 \
135 /* Built-in module parameters. */ \ 182 /* Built-in module parameters. */ \
136 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 183 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
137 VMLINUX_SYMBOL(__start___param) = .; \ 184 VMLINUX_SYMBOL(__start___param) = .; \
138 *(__param) \ 185 *(__param) \
139 VMLINUX_SYMBOL(__stop___param) = .; \ 186 VMLINUX_SYMBOL(__stop___param) = .; \
187 . = ALIGN((align)); \
140 VMLINUX_SYMBOL(__end_rodata) = .; \ 188 VMLINUX_SYMBOL(__end_rodata) = .; \
141 } \ 189 } \
142 \
143 . = ALIGN((align)); 190 . = ALIGN((align));
144 191
145/* RODATA provided for backward compatibility. 192/* RODATA provided for backward compatibility.
@@ -158,8 +205,16 @@
158#define TEXT_TEXT \ 205#define TEXT_TEXT \
159 ALIGN_FUNCTION(); \ 206 ALIGN_FUNCTION(); \
160 *(.text) \ 207 *(.text) \
208 *(.ref.text) \
161 *(.text.init.refok) \ 209 *(.text.init.refok) \
162 *(.exit.text.refok) 210 *(.exit.text.refok) \
211 DEV_KEEP(init.text) \
212 DEV_KEEP(exit.text) \
213 CPU_KEEP(init.text) \
214 CPU_KEEP(exit.text) \
215 MEM_KEEP(init.text) \
216 MEM_KEEP(exit.text)
217
163 218
164/* sched.text is aling to function alignment to secure we have same 219/* sched.text is aling to function alignment to secure we have same
165 * address even at second ld pass when generating System.map */ 220 * address even at second ld pass when generating System.map */
@@ -183,6 +238,37 @@
183 *(.kprobes.text) \ 238 *(.kprobes.text) \
184 VMLINUX_SYMBOL(__kprobes_text_end) = .; 239 VMLINUX_SYMBOL(__kprobes_text_end) = .;
185 240
241/* init and exit section handling */
242#define INIT_DATA \
243 *(.init.data) \
244 DEV_DISCARD(init.data) \
245 DEV_DISCARD(init.rodata) \
246 CPU_DISCARD(init.data) \
247 CPU_DISCARD(init.rodata) \
248 MEM_DISCARD(init.data) \
249 MEM_DISCARD(init.rodata)
250
251#define INIT_TEXT \
252 *(.init.text) \
253 DEV_DISCARD(init.text) \
254 CPU_DISCARD(init.text) \
255 MEM_DISCARD(init.text)
256
257#define EXIT_DATA \
258 *(.exit.data) \
259 DEV_DISCARD(exit.data) \
260 DEV_DISCARD(exit.rodata) \
261 CPU_DISCARD(exit.data) \
262 CPU_DISCARD(exit.rodata) \
263 MEM_DISCARD(exit.data) \
264 MEM_DISCARD(exit.rodata)
265
266#define EXIT_TEXT \
267 *(.exit.text) \
268 DEV_DISCARD(exit.text) \
269 CPU_DISCARD(exit.text) \
270 MEM_DISCARD(exit.text)
271
186 /* DWARF debug sections. 272 /* DWARF debug sections.
187 Symbols in the DWARF debugging sections are relative to 273 Symbols in the DWARF debugging sections are relative to
188 the beginning of the section so we begin them at 0. */ 274 the beginning of the section so we begin them at 0. */