aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/atomic64.h42
-rw-r--r--include/asm-generic/errno.h2
-rw-r--r--include/asm-generic/kmap_types.h7
-rw-r--r--include/asm-generic/vmlinux.lds.h241
4 files changed, 280 insertions, 12 deletions
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
new file mode 100644
index 000000000000..b18ce4f9ee3d
--- /dev/null
+++ b/include/asm-generic/atomic64.h
@@ -0,0 +1,42 @@
1/*
2 * Generic implementation of 64-bit atomics using spinlocks,
3 * useful on processors that don't have 64-bit atomic instructions.
4 *
5 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#ifndef _ASM_GENERIC_ATOMIC64_H
13#define _ASM_GENERIC_ATOMIC64_H
14
15typedef struct {
16 long long counter;
17} atomic64_t;
18
19#define ATOMIC64_INIT(i) { (i) }
20
21extern long long atomic64_read(const atomic64_t *v);
22extern void atomic64_set(atomic64_t *v, long long i);
23extern void atomic64_add(long long a, atomic64_t *v);
24extern long long atomic64_add_return(long long a, atomic64_t *v);
25extern void atomic64_sub(long long a, atomic64_t *v);
26extern long long atomic64_sub_return(long long a, atomic64_t *v);
27extern long long atomic64_dec_if_positive(atomic64_t *v);
28extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
29extern long long atomic64_xchg(atomic64_t *v, long long new);
30extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
31
32#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
33#define atomic64_inc(v) atomic64_add(1LL, (v))
34#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
35#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
36#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
37#define atomic64_dec(v) atomic64_sub(1LL, (v))
38#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
39#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
40#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
41
42#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/include/asm-generic/errno.h b/include/asm-generic/errno.h
index e8852c092fea..28cc03bf19e6 100644
--- a/include/asm-generic/errno.h
+++ b/include/asm-generic/errno.h
@@ -106,4 +106,6 @@
106#define EOWNERDEAD 130 /* Owner died */ 106#define EOWNERDEAD 130 /* Owner died */
107#define ENOTRECOVERABLE 131 /* State not recoverable */ 107#define ENOTRECOVERABLE 131 /* State not recoverable */
108 108
109#define ERFKILL 132 /* Operation not possible due to RF-kill */
110
109#endif 111#endif
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
index 58c33055c304..eddbce0f9fb9 100644
--- a/include/asm-generic/kmap_types.h
+++ b/include/asm-generic/kmap_types.h
@@ -1,7 +1,7 @@
1#ifndef _ASM_GENERIC_KMAP_TYPES_H 1#ifndef _ASM_GENERIC_KMAP_TYPES_H
2#define _ASM_GENERIC_KMAP_TYPES_H 2#define _ASM_GENERIC_KMAP_TYPES_H
3 3
4#ifdef CONFIG_DEBUG_HIGHMEM 4#ifdef __WITH_KM_FENCE
5# define D(n) __KM_FENCE_##n , 5# define D(n) __KM_FENCE_##n ,
6#else 6#else
7# define D(n) 7# define D(n)
@@ -24,7 +24,10 @@ D(12) KM_SOFTIRQ1,
24D(13) KM_SYNC_ICACHE, 24D(13) KM_SYNC_ICACHE,
25D(14) KM_SYNC_DCACHE, 25D(14) KM_SYNC_DCACHE,
26D(15) KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */ 26D(15) KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */
27D(16) KM_TYPE_NR 27D(16) KM_IRQ_PTE,
28D(17) KM_NMI,
29D(18) KM_NMI_PTE,
30D(19) KM_TYPE_NR
28}; 31};
29 32
30#undef D 33#undef D
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index f1736ca7922c..6bdba10fef4a 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -1,4 +1,57 @@
1#include <linux/section-names.h> 1/*
2 * Helper macros to support writing architecture specific
3 * linker scripts.
4 *
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
7 *
8 * OUTPUT_FORMAT(...)
9 * OUTPUT_ARCH(...)
10 * ENTRY(...)
11 * SECTIONS
12 * {
13 * . = START;
14 * __init_begin = .;
15 * HEAD_TEXT_SECTION
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU(PAGE_SIZE)
19 * __init_end = .;
20 *
21 * _stext = .;
22 * TEXT_SECTION = 0
23 * _etext = .;
24 *
25 * _sdata = .;
26 * RO_DATA_SECTION(PAGE_SIZE)
27 * RW_DATA_SECTION(...)
28 * _edata = .;
29 *
30 * EXCEPTION_TABLE(...)
31 * NOTES
32 *
33 * __bss_start = .;
34 * BSS_SECTION(0, 0)
35 * __bss_stop = .;
36 * _end = .;
37 *
38 * /DISCARD/ : {
39 * EXIT_TEXT
40 * EXIT_DATA
41 * EXIT_CALL
42 * }
43 * STABS_DEBUG
44 * DWARF_DEBUG
45 * }
46 *
47 * [__init_begin, __init_end] is the init section that may be freed after init
48 * [_stext, _etext] is the text section
49 * [_sdata, _edata] is the data section
50 *
51 * Some of the included output section have their own set of constants.
52 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
53 * [__nosave_begin, __nosave_end] for the nosave data
54 */
2 55
3#ifndef LOAD_OFFSET 56#ifndef LOAD_OFFSET
4#define LOAD_OFFSET 0 57#define LOAD_OFFSET 0
@@ -116,7 +169,36 @@
116 FTRACE_EVENTS() \ 169 FTRACE_EVENTS() \
117 TRACE_SYSCALLS() 170 TRACE_SYSCALLS()
118 171
119#define RO_DATA(align) \ 172/*
173 * Data section helpers
174 */
175#define NOSAVE_DATA \
176 . = ALIGN(PAGE_SIZE); \
177 VMLINUX_SYMBOL(__nosave_begin) = .; \
178 *(.data.nosave) \
179 . = ALIGN(PAGE_SIZE); \
180 VMLINUX_SYMBOL(__nosave_end) = .;
181
182#define PAGE_ALIGNED_DATA(page_align) \
183 . = ALIGN(page_align); \
184 *(.data.page_aligned)
185
186#define READ_MOSTLY_DATA(align) \
187 . = ALIGN(align); \
188 *(.data.read_mostly)
189
190#define CACHELINE_ALIGNED_DATA(align) \
191 . = ALIGN(align); \
192 *(.data.cacheline_aligned)
193
194#define INIT_TASK(align) \
195 . = ALIGN(align); \
196 *(.data.init_task)
197
198/*
199 * Read only Data
200 */
201#define RO_DATA_SECTION(align) \
120 . = ALIGN((align)); \ 202 . = ALIGN((align)); \
121 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 203 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
122 VMLINUX_SYMBOL(__start_rodata) = .; \ 204 VMLINUX_SYMBOL(__start_rodata) = .; \
@@ -270,9 +352,10 @@
270 } \ 352 } \
271 . = ALIGN((align)); 353 . = ALIGN((align));
272 354
273/* RODATA provided for backward compatibility. 355/* RODATA & RO_DATA provided for backward compatibility.
274 * All archs are supposed to use RO_DATA() */ 356 * All archs are supposed to use RO_DATA() */
275#define RODATA RO_DATA(4096) 357#define RODATA RO_DATA_SECTION(4096)
358#define RO_DATA(align) RO_DATA_SECTION(align)
276 359
277#define SECURITY_INIT \ 360#define SECURITY_INIT \
278 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 361 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
@@ -330,16 +413,42 @@
330#endif 413#endif
331 414
332/* Section used for early init (in .S files) */ 415/* Section used for early init (in .S files) */
333#define HEAD_TEXT *(HEAD_TEXT_SECTION) 416#define HEAD_TEXT *(.head.text)
417
418#define HEAD_TEXT_SECTION \
419 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
420 HEAD_TEXT \
421 }
422
423/*
424 * Exception table
425 */
426#define EXCEPTION_TABLE(align) \
427 . = ALIGN(align); \
428 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
429 VMLINUX_SYMBOL(__start___ex_table) = .; \
430 *(__ex_table) \
431 VMLINUX_SYMBOL(__stop___ex_table) = .; \
432 }
433
434/*
435 * Init task
436 */
437#define INIT_TASK_DATA(align) \
438 . = ALIGN(align); \
439 .data.init_task : { \
440 INIT_TASK \
441 }
334 442
335/* init and exit section handling */ 443/* init and exit section handling */
336#define INIT_DATA \ 444#define INIT_DATA \
337 *(.init.data) \ 445 *(.init.data) \
338 DEV_DISCARD(init.data) \ 446 DEV_DISCARD(init.data) \
339 DEV_DISCARD(init.rodata) \
340 CPU_DISCARD(init.data) \ 447 CPU_DISCARD(init.data) \
341 CPU_DISCARD(init.rodata) \
342 MEM_DISCARD(init.data) \ 448 MEM_DISCARD(init.data) \
449 *(.init.rodata) \
450 DEV_DISCARD(init.rodata) \
451 CPU_DISCARD(init.rodata) \
343 MEM_DISCARD(init.rodata) 452 MEM_DISCARD(init.rodata)
344 453
345#define INIT_TEXT \ 454#define INIT_TEXT \
@@ -363,9 +472,35 @@
363 CPU_DISCARD(exit.text) \ 472 CPU_DISCARD(exit.text) \
364 MEM_DISCARD(exit.text) 473 MEM_DISCARD(exit.text)
365 474
366 /* DWARF debug sections. 475#define EXIT_CALL \
367 Symbols in the DWARF debugging sections are relative to 476 *(.exitcall.exit)
368 the beginning of the section so we begin them at 0. */ 477
478/*
479 * bss (Block Started by Symbol) - uninitialized data
480 * zeroed during startup
481 */
482#define SBSS \
483 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
484 *(.sbss) \
485 *(.scommon) \
486 }
487
488#define BSS(bss_align) \
489 . = ALIGN(bss_align); \
490 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
491 VMLINUX_SYMBOL(__bss_start) = .; \
492 *(.bss.page_aligned) \
493 *(.dynbss) \
494 *(.bss) \
495 *(COMMON) \
496 VMLINUX_SYMBOL(__bss_stop) = .; \
497 }
498
499/*
500 * DWARF debug sections.
501 * Symbols in the DWARF debugging sections are relative to
502 * the beginning of the section so we begin them at 0.
503 */
369#define DWARF_DEBUG \ 504#define DWARF_DEBUG \
370 /* DWARF 1 */ \ 505 /* DWARF 1 */ \
371 .debug 0 : { *(.debug) } \ 506 .debug 0 : { *(.debug) } \
@@ -432,6 +567,12 @@
432 VMLINUX_SYMBOL(__stop_notes) = .; \ 567 VMLINUX_SYMBOL(__stop_notes) = .; \
433 } 568 }
434 569
570#define INIT_SETUP(initsetup_align) \
571 . = ALIGN(initsetup_align); \
572 VMLINUX_SYMBOL(__setup_start) = .; \
573 *(.init.setup) \
574 VMLINUX_SYMBOL(__setup_end) = .;
575
435#define INITCALLS \ 576#define INITCALLS \
436 *(.initcallearly.init) \ 577 *(.initcallearly.init) \
437 VMLINUX_SYMBOL(__early_initcall_end) = .; \ 578 VMLINUX_SYMBOL(__early_initcall_end) = .; \
@@ -453,6 +594,31 @@
453 *(.initcall7.init) \ 594 *(.initcall7.init) \
454 *(.initcall7s.init) 595 *(.initcall7s.init)
455 596
597#define INIT_CALLS \
598 VMLINUX_SYMBOL(__initcall_start) = .; \
599 INITCALLS \
600 VMLINUX_SYMBOL(__initcall_end) = .;
601
602#define CON_INITCALL \
603 VMLINUX_SYMBOL(__con_initcall_start) = .; \
604 *(.con_initcall.init) \
605 VMLINUX_SYMBOL(__con_initcall_end) = .;
606
607#define SECURITY_INITCALL \
608 VMLINUX_SYMBOL(__security_initcall_start) = .; \
609 *(.security_initcall.init) \
610 VMLINUX_SYMBOL(__security_initcall_end) = .;
611
612#ifdef CONFIG_BLK_DEV_INITRD
613#define INIT_RAM_FS \
614 . = ALIGN(PAGE_SIZE); \
615 VMLINUX_SYMBOL(__initramfs_start) = .; \
616 *(.init.ramfs) \
617 VMLINUX_SYMBOL(__initramfs_end) = .;
618#else
619#define INITRAMFS
620#endif
621
456/** 622/**
457 * PERCPU_VADDR - define output section for percpu area 623 * PERCPU_VADDR - define output section for percpu area
458 * @vaddr: explicit base address (optional) 624 * @vaddr: explicit base address (optional)
@@ -509,3 +675,58 @@
509 *(.data.percpu.shared_aligned) \ 675 *(.data.percpu.shared_aligned) \
510 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 676 VMLINUX_SYMBOL(__per_cpu_end) = .; \
511 } 677 }
678
679
680/*
681 * Definition of the high level *_SECTION macros
682 * They will fit only a subset of the architectures
683 */
684
685
686/*
687 * Writeable data.
688 * All sections are combined in a single .data section.
689 * The sections following CONSTRUCTORS are arranged so their
690 * typical alignment matches.
691 * A cacheline is typical/always less than a PAGE_SIZE so
692 * the sections that has this restriction (or similar)
693 * is located before the ones requiring PAGE_SIZE alignment.
694 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
695 * matches the requirment of PAGE_ALIGNED_DATA.
696 *
697 * use 0 as page_align if page_aligned data is not used */
698#define RW_DATA_SECTION(cacheline, nosave, pagealigned, inittask) \
699 . = ALIGN(PAGE_SIZE); \
700 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
701 INIT_TASK(inittask) \
702 CACHELINE_ALIGNED_DATA(cacheline) \
703 READ_MOSTLY_DATA(cacheline) \
704 DATA_DATA \
705 CONSTRUCTORS \
706 NOSAVE_DATA(nosave) \
707 PAGE_ALIGNED_DATA(pagealigned) \
708 }
709
710#define INIT_TEXT_SECTION(inittext_align) \
711 . = ALIGN(inittext_align); \
712 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
713 VMLINUX_SYMBOL(_sinittext) = .; \
714 INIT_TEXT \
715 VMLINUX_SYMBOL(_einittext) = .; \
716 }
717
718#define INIT_DATA_SECTION(initsetup_align) \
719 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
720 INIT_DATA \
721 INIT_SETUP(initsetup_align) \
722 INIT_CALLS \
723 CON_INITCALL \
724 SECURITY_INITCALL \
725 INIT_RAM_FS \
726 }
727
728#define BSS_SECTION(sbss_align, bss_align) \
729 SBSS \
730 BSS(bss_align) \
731 . = ALIGN(4);
732