diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-10-23 01:01:49 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-10-23 01:01:49 -0400 |
commit | 3dd41424090a0ca3a660218d06afe6ff4441bad3 (patch) | |
tree | 511ef1bb1799027fc5aad574adce49120ecadd87 /include/asm-generic | |
parent | 5c5456402d467969b217d7fdd6670f8c8600f5a8 (diff) | |
parent | f6f94e2ab1b33f0082ac22d71f66385a60d8157f (diff) |
Merge commit 'v2.6.36' into wip-merge-2.6.36
Conflicts:
Makefile
arch/x86/include/asm/unistd_32.h
arch/x86/kernel/syscall_table_32.S
kernel/sched.c
kernel/time/tick-sched.c
Relevant API and functions changes (solved in this commit):
- (API) .enqueue_task() (enqueue_task_litmus),
dequeue_task() (dequeue_task_litmus),
[litmus/sched_litmus.c]
- (API) .select_task_rq() (select_task_rq_litmus)
[litmus/sched_litmus.c]
- (API) sysrq_dump_trace_buffer() and sysrq_handle_kill_rt_tasks()
[litmus/sched_trace.c]
- struct kfifo internal buffer name changed (buffer -> buf)
[litmus/sched_trace.c]
- add_wait_queue_exclusive_locked -> __add_wait_queue_tail_exclusive
[litmus/fmlp.c]
- syscall numbers for both x86_32 and x86_64
Diffstat (limited to 'include/asm-generic')
23 files changed, 425 insertions, 152 deletions
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index eb62334cda29..53f91b1ae53a 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild | |||
@@ -3,6 +3,8 @@ header-y += bitsperlong.h | |||
3 | header-y += errno-base.h | 3 | header-y += errno-base.h |
4 | header-y += errno.h | 4 | header-y += errno.h |
5 | header-y += fcntl.h | 5 | header-y += fcntl.h |
6 | header-y += int-l64.h | ||
7 | header-y += int-ll64.h | ||
6 | header-y += ioctl.h | 8 | header-y += ioctl.h |
7 | header-y += ioctls.h | 9 | header-y += ioctls.h |
8 | header-y += ipcbuf.h | 10 | header-y += ipcbuf.h |
@@ -12,10 +14,12 @@ header-y += msgbuf.h | |||
12 | header-y += param.h | 14 | header-y += param.h |
13 | header-y += poll.h | 15 | header-y += poll.h |
14 | header-y += posix_types.h | 16 | header-y += posix_types.h |
17 | header-y += resource.h | ||
15 | header-y += sembuf.h | 18 | header-y += sembuf.h |
16 | header-y += setup.h | 19 | header-y += setup.h |
17 | header-y += shmbuf.h | 20 | header-y += shmbuf.h |
18 | header-y += shmparam.h | 21 | header-y += shmparam.h |
22 | header-y += siginfo.h | ||
19 | header-y += signal-defs.h | 23 | header-y += signal-defs.h |
20 | header-y += signal.h | 24 | header-y += signal.h |
21 | header-y += socket.h | 25 | header-y += socket.h |
@@ -28,8 +32,3 @@ header-y += termios.h | |||
28 | header-y += types.h | 32 | header-y += types.h |
29 | header-y += ucontext.h | 33 | header-y += ucontext.h |
30 | header-y += unistd.h | 34 | header-y += unistd.h |
31 | |||
32 | unifdef-y += int-l64.h | ||
33 | unifdef-y += int-ll64.h | ||
34 | unifdef-y += resource.h | ||
35 | unifdef-y += siginfo.h | ||
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm index 96d7c9804dc1..c5d2e5dd871b 100644 --- a/include/asm-generic/Kbuild.asm +++ b/include/asm-generic/Kbuild.asm | |||
@@ -1,5 +1,5 @@ | |||
1 | ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm.h \ | 1 | ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm.h \ |
2 | $(srctree)/include/asm-$(SRCARCH)/kvm.h),) | 2 | $(srctree)/include/asm-$(SRCARCH)/kvm.h),) |
3 | header-y += kvm.h | 3 | header-y += kvm.h |
4 | endif | 4 | endif |
5 | 5 | ||
@@ -9,36 +9,37 @@ header-y += kvm_para.h | |||
9 | endif | 9 | endif |
10 | 10 | ||
11 | ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h \ | 11 | ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h \ |
12 | $(srctree)/include/asm-$(SRCARCH)/a.out.h),) | 12 | $(srctree)/include/asm-$(SRCARCH)/a.out.h),) |
13 | unifdef-y += a.out.h | 13 | header-y += a.out.h |
14 | endif | 14 | endif |
15 | unifdef-y += auxvec.h | 15 | |
16 | unifdef-y += byteorder.h | 16 | header-y += auxvec.h |
17 | unifdef-y += bitsperlong.h | 17 | header-y += bitsperlong.h |
18 | unifdef-y += errno.h | 18 | header-y += byteorder.h |
19 | unifdef-y += fcntl.h | 19 | header-y += errno.h |
20 | unifdef-y += ioctl.h | 20 | header-y += fcntl.h |
21 | unifdef-y += ioctls.h | 21 | header-y += ioctl.h |
22 | unifdef-y += ipcbuf.h | 22 | header-y += ioctls.h |
23 | unifdef-y += mman.h | 23 | header-y += ipcbuf.h |
24 | unifdef-y += msgbuf.h | 24 | header-y += mman.h |
25 | unifdef-y += param.h | 25 | header-y += msgbuf.h |
26 | unifdef-y += poll.h | 26 | header-y += param.h |
27 | unifdef-y += posix_types.h | 27 | header-y += poll.h |
28 | unifdef-y += ptrace.h | 28 | header-y += posix_types.h |
29 | unifdef-y += resource.h | 29 | header-y += ptrace.h |
30 | unifdef-y += sembuf.h | 30 | header-y += resource.h |
31 | unifdef-y += setup.h | 31 | header-y += sembuf.h |
32 | unifdef-y += shmbuf.h | 32 | header-y += setup.h |
33 | unifdef-y += sigcontext.h | 33 | header-y += shmbuf.h |
34 | unifdef-y += siginfo.h | 34 | header-y += sigcontext.h |
35 | unifdef-y += signal.h | 35 | header-y += siginfo.h |
36 | unifdef-y += socket.h | 36 | header-y += signal.h |
37 | unifdef-y += sockios.h | 37 | header-y += socket.h |
38 | unifdef-y += stat.h | 38 | header-y += sockios.h |
39 | unifdef-y += statfs.h | 39 | header-y += stat.h |
40 | unifdef-y += swab.h | 40 | header-y += statfs.h |
41 | unifdef-y += termbits.h | 41 | header-y += swab.h |
42 | unifdef-y += termios.h | 42 | header-y += termbits.h |
43 | unifdef-y += types.h | 43 | header-y += termios.h |
44 | unifdef-y += unistd.h | 44 | header-y += types.h |
45 | header-y += unistd.h | ||
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index c99c64dc5f3d..e53347fbf1da 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h | |||
@@ -30,18 +30,16 @@ | |||
30 | * atomic_read - read atomic variable | 30 | * atomic_read - read atomic variable |
31 | * @v: pointer of type atomic_t | 31 | * @v: pointer of type atomic_t |
32 | * | 32 | * |
33 | * Atomically reads the value of @v. Note that the guaranteed | 33 | * Atomically reads the value of @v. |
34 | * useful range of an atomic_t is only 24 bits. | ||
35 | */ | 34 | */ |
36 | #define atomic_read(v) ((v)->counter) | 35 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
37 | 36 | ||
38 | /** | 37 | /** |
39 | * atomic_set - set atomic variable | 38 | * atomic_set - set atomic variable |
40 | * @v: pointer of type atomic_t | 39 | * @v: pointer of type atomic_t |
41 | * @i: required value | 40 | * @i: required value |
42 | * | 41 | * |
43 | * Atomically sets the value of @v to @i. Note that the guaranteed | 42 | * Atomically sets the value of @v to @i. |
44 | * useful range of an atomic_t is only 24 bits. | ||
45 | */ | 43 | */ |
46 | #define atomic_set(v, i) (((v)->counter) = (i)) | 44 | #define atomic_set(v, i) (((v)->counter) = (i)) |
47 | 45 | ||
@@ -53,18 +51,17 @@ | |||
53 | * @v: pointer of type atomic_t | 51 | * @v: pointer of type atomic_t |
54 | * | 52 | * |
55 | * Atomically adds @i to @v and returns the result | 53 | * Atomically adds @i to @v and returns the result |
56 | * Note that the guaranteed useful range of an atomic_t is only 24 bits. | ||
57 | */ | 54 | */ |
58 | static inline int atomic_add_return(int i, atomic_t *v) | 55 | static inline int atomic_add_return(int i, atomic_t *v) |
59 | { | 56 | { |
60 | unsigned long flags; | 57 | unsigned long flags; |
61 | int temp; | 58 | int temp; |
62 | 59 | ||
63 | local_irq_save(flags); | 60 | raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ |
64 | temp = v->counter; | 61 | temp = v->counter; |
65 | temp += i; | 62 | temp += i; |
66 | v->counter = temp; | 63 | v->counter = temp; |
67 | local_irq_restore(flags); | 64 | raw_local_irq_restore(flags); |
68 | 65 | ||
69 | return temp; | 66 | return temp; |
70 | } | 67 | } |
@@ -75,18 +72,17 @@ static inline int atomic_add_return(int i, atomic_t *v) | |||
75 | * @v: pointer of type atomic_t | 72 | * @v: pointer of type atomic_t |
76 | * | 73 | * |
77 | * Atomically subtracts @i from @v and returns the result | 74 | * Atomically subtracts @i from @v and returns the result |
78 | * Note that the guaranteed useful range of an atomic_t is only 24 bits. | ||
79 | */ | 75 | */ |
80 | static inline int atomic_sub_return(int i, atomic_t *v) | 76 | static inline int atomic_sub_return(int i, atomic_t *v) |
81 | { | 77 | { |
82 | unsigned long flags; | 78 | unsigned long flags; |
83 | int temp; | 79 | int temp; |
84 | 80 | ||
85 | local_irq_save(flags); | 81 | raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ |
86 | temp = v->counter; | 82 | temp = v->counter; |
87 | temp -= i; | 83 | temp -= i; |
88 | v->counter = temp; | 84 | v->counter = temp; |
89 | local_irq_restore(flags); | 85 | raw_local_irq_restore(flags); |
90 | 86 | ||
91 | return temp; | 87 | return temp; |
92 | } | 88 | } |
@@ -139,9 +135,9 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | |||
139 | unsigned long flags; | 135 | unsigned long flags; |
140 | 136 | ||
141 | mask = ~mask; | 137 | mask = ~mask; |
142 | local_irq_save(flags); | 138 | raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ |
143 | *addr &= mask; | 139 | *addr &= mask; |
144 | local_irq_restore(flags); | 140 | raw_local_irq_restore(flags); |
145 | } | 141 | } |
146 | 142 | ||
147 | #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) | 143 | #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) |
diff --git a/include/asm-generic/bitops/arch_hweight.h b/include/asm-generic/bitops/arch_hweight.h new file mode 100644 index 000000000000..6a211f40665c --- /dev/null +++ b/include/asm-generic/bitops/arch_hweight.h | |||
@@ -0,0 +1,25 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ | ||
2 | #define _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | |||
6 | static inline unsigned int __arch_hweight32(unsigned int w) | ||
7 | { | ||
8 | return __sw_hweight32(w); | ||
9 | } | ||
10 | |||
11 | static inline unsigned int __arch_hweight16(unsigned int w) | ||
12 | { | ||
13 | return __sw_hweight16(w); | ||
14 | } | ||
15 | |||
16 | static inline unsigned int __arch_hweight8(unsigned int w) | ||
17 | { | ||
18 | return __sw_hweight8(w); | ||
19 | } | ||
20 | |||
21 | static inline unsigned long __arch_hweight64(__u64 w) | ||
22 | { | ||
23 | return __sw_hweight64(w); | ||
24 | } | ||
25 | #endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ | ||
diff --git a/include/asm-generic/bitops/const_hweight.h b/include/asm-generic/bitops/const_hweight.h new file mode 100644 index 000000000000..fa2a50b7ee66 --- /dev/null +++ b/include/asm-generic/bitops/const_hweight.h | |||
@@ -0,0 +1,42 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ | ||
2 | #define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ | ||
3 | |||
4 | /* | ||
5 | * Compile time versions of __arch_hweightN() | ||
6 | */ | ||
7 | #define __const_hweight8(w) \ | ||
8 | ( (!!((w) & (1ULL << 0))) + \ | ||
9 | (!!((w) & (1ULL << 1))) + \ | ||
10 | (!!((w) & (1ULL << 2))) + \ | ||
11 | (!!((w) & (1ULL << 3))) + \ | ||
12 | (!!((w) & (1ULL << 4))) + \ | ||
13 | (!!((w) & (1ULL << 5))) + \ | ||
14 | (!!((w) & (1ULL << 6))) + \ | ||
15 | (!!((w) & (1ULL << 7))) ) | ||
16 | |||
17 | #define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 )) | ||
18 | #define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16)) | ||
19 | #define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32)) | ||
20 | |||
21 | /* | ||
22 | * Generic interface. | ||
23 | */ | ||
24 | #define hweight8(w) (__builtin_constant_p(w) ? __const_hweight8(w) : __arch_hweight8(w)) | ||
25 | #define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w)) | ||
26 | #define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w)) | ||
27 | #define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w)) | ||
28 | |||
29 | /* | ||
30 | * Interface for known constant arguments | ||
31 | */ | ||
32 | #define HWEIGHT8(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w)) | ||
33 | #define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w)) | ||
34 | #define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w)) | ||
35 | #define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w)) | ||
36 | |||
37 | /* | ||
38 | * Type invariant interface to the compile time constant hweight functions. | ||
39 | */ | ||
40 | #define HWEIGHT(w) HWEIGHT64((u64)w) | ||
41 | |||
42 | #endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */ | ||
diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h index fbbc383771da..a94d6519c7ed 100644 --- a/include/asm-generic/bitops/hweight.h +++ b/include/asm-generic/bitops/hweight.h | |||
@@ -1,11 +1,7 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ | 1 | #ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ |
2 | #define _ASM_GENERIC_BITOPS_HWEIGHT_H_ | 2 | #define _ASM_GENERIC_BITOPS_HWEIGHT_H_ |
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <asm-generic/bitops/arch_hweight.h> |
5 | 5 | #include <asm-generic/bitops/const_hweight.h> | |
6 | extern unsigned int hweight32(unsigned int w); | ||
7 | extern unsigned int hweight16(unsigned int w); | ||
8 | extern unsigned int hweight8(unsigned int w); | ||
9 | extern unsigned long hweight64(__u64 w); | ||
10 | 6 | ||
11 | #endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ | 7 | #endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ |
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 18c435d7c082..c2c9ba032d46 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h | |||
@@ -25,7 +25,10 @@ struct bug_entry { | |||
25 | }; | 25 | }; |
26 | #endif /* __ASSEMBLY__ */ | 26 | #endif /* __ASSEMBLY__ */ |
27 | 27 | ||
28 | #define BUGFLAG_WARNING (1<<0) | 28 | #define BUGFLAG_WARNING (1 << 0) |
29 | #define BUGFLAG_TAINT(taint) (BUGFLAG_WARNING | ((taint) << 8)) | ||
30 | #define BUG_GET_TAINT(bug) ((bug)->flags >> 8) | ||
31 | |||
29 | #endif /* CONFIG_GENERIC_BUG */ | 32 | #endif /* CONFIG_GENERIC_BUG */ |
30 | 33 | ||
31 | /* | 34 | /* |
@@ -56,17 +59,25 @@ struct bug_entry { | |||
56 | * appear at runtime. Use the versions with printk format strings | 59 | * appear at runtime. Use the versions with printk format strings |
57 | * to provide better diagnostics. | 60 | * to provide better diagnostics. |
58 | */ | 61 | */ |
59 | #ifndef __WARN | 62 | #ifndef __WARN_TAINT |
60 | #ifndef __ASSEMBLY__ | 63 | #ifndef __ASSEMBLY__ |
61 | extern void warn_slowpath_fmt(const char *file, const int line, | 64 | extern void warn_slowpath_fmt(const char *file, const int line, |
62 | const char *fmt, ...) __attribute__((format(printf, 3, 4))); | 65 | const char *fmt, ...) __attribute__((format(printf, 3, 4))); |
66 | extern void warn_slowpath_fmt_taint(const char *file, const int line, | ||
67 | unsigned taint, const char *fmt, ...) | ||
68 | __attribute__((format(printf, 4, 5))); | ||
63 | extern void warn_slowpath_null(const char *file, const int line); | 69 | extern void warn_slowpath_null(const char *file, const int line); |
64 | #define WANT_WARN_ON_SLOWPATH | 70 | #define WANT_WARN_ON_SLOWPATH |
65 | #endif | 71 | #endif |
66 | #define __WARN() warn_slowpath_null(__FILE__, __LINE__) | 72 | #define __WARN() warn_slowpath_null(__FILE__, __LINE__) |
67 | #define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg) | 73 | #define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg) |
74 | #define __WARN_printf_taint(taint, arg...) \ | ||
75 | warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg) | ||
68 | #else | 76 | #else |
77 | #define __WARN() __WARN_TAINT(TAINT_WARN) | ||
69 | #define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0) | 78 | #define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0) |
79 | #define __WARN_printf_taint(taint, arg...) \ | ||
80 | do { printk(arg); __WARN_TAINT(taint); } while (0) | ||
70 | #endif | 81 | #endif |
71 | 82 | ||
72 | #ifndef WARN_ON | 83 | #ifndef WARN_ON |
@@ -87,6 +98,13 @@ extern void warn_slowpath_null(const char *file, const int line); | |||
87 | }) | 98 | }) |
88 | #endif | 99 | #endif |
89 | 100 | ||
101 | #define WARN_TAINT(condition, taint, format...) ({ \ | ||
102 | int __ret_warn_on = !!(condition); \ | ||
103 | if (unlikely(__ret_warn_on)) \ | ||
104 | __WARN_printf_taint(taint, format); \ | ||
105 | unlikely(__ret_warn_on); \ | ||
106 | }) | ||
107 | |||
90 | #else /* !CONFIG_BUG */ | 108 | #else /* !CONFIG_BUG */ |
91 | #ifndef HAVE_ARCH_BUG | 109 | #ifndef HAVE_ARCH_BUG |
92 | #define BUG() do {} while(0) | 110 | #define BUG() do {} while(0) |
@@ -110,6 +128,8 @@ extern void warn_slowpath_null(const char *file, const int line); | |||
110 | }) | 128 | }) |
111 | #endif | 129 | #endif |
112 | 130 | ||
131 | #define WARN_TAINT(condition, taint, format...) WARN_ON(condition) | ||
132 | |||
113 | #endif | 133 | #endif |
114 | 134 | ||
115 | #define WARN_ON_ONCE(condition) ({ \ | 135 | #define WARN_ON_ONCE(condition) ({ \ |
@@ -132,6 +152,16 @@ extern void warn_slowpath_null(const char *file, const int line); | |||
132 | unlikely(__ret_warn_once); \ | 152 | unlikely(__ret_warn_once); \ |
133 | }) | 153 | }) |
134 | 154 | ||
155 | #define WARN_TAINT_ONCE(condition, taint, format...) ({ \ | ||
156 | static bool __warned; \ | ||
157 | int __ret_warn_once = !!(condition); \ | ||
158 | \ | ||
159 | if (unlikely(__ret_warn_once)) \ | ||
160 | if (WARN_TAINT(!__warned, taint, format)) \ | ||
161 | __warned = true; \ | ||
162 | unlikely(__ret_warn_once); \ | ||
163 | }) | ||
164 | |||
135 | #define WARN_ON_RATELIMIT(condition, state) \ | 165 | #define WARN_ON_RATELIMIT(condition, state) \ |
136 | WARN_ON((condition) && __ratelimit(state)) | 166 | WARN_ON((condition) && __ratelimit(state)) |
137 | 167 | ||
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h index 82cd0cb1c3fe..ccf7b4f34a3c 100644 --- a/include/asm-generic/dma-mapping-broken.h +++ b/include/asm-generic/dma-mapping-broken.h | |||
@@ -72,9 +72,6 @@ dma_set_mask(struct device *dev, u64 mask); | |||
72 | extern int | 72 | extern int |
73 | dma_get_cache_alignment(void); | 73 | dma_get_cache_alignment(void); |
74 | 74 | ||
75 | extern int | ||
76 | dma_is_consistent(struct device *dev, dma_addr_t dma_handle); | ||
77 | |||
78 | extern void | 75 | extern void |
79 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 76 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
80 | enum dma_data_direction direction); | 77 | enum dma_data_direction direction); |
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index 69206957b72c..0c80bb38773f 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h | |||
@@ -123,15 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, | |||
123 | size_t size, | 123 | size_t size, |
124 | enum dma_data_direction dir) | 124 | enum dma_data_direction dir) |
125 | { | 125 | { |
126 | struct dma_map_ops *ops = get_dma_ops(dev); | 126 | dma_sync_single_for_cpu(dev, addr + offset, size, dir); |
127 | |||
128 | BUG_ON(!valid_dma_direction(dir)); | ||
129 | if (ops->sync_single_range_for_cpu) { | ||
130 | ops->sync_single_range_for_cpu(dev, addr, offset, size, dir); | ||
131 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); | ||
132 | |||
133 | } else | ||
134 | dma_sync_single_for_cpu(dev, addr + offset, size, dir); | ||
135 | } | 127 | } |
136 | 128 | ||
137 | static inline void dma_sync_single_range_for_device(struct device *dev, | 129 | static inline void dma_sync_single_range_for_device(struct device *dev, |
@@ -140,15 +132,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, | |||
140 | size_t size, | 132 | size_t size, |
141 | enum dma_data_direction dir) | 133 | enum dma_data_direction dir) |
142 | { | 134 | { |
143 | struct dma_map_ops *ops = get_dma_ops(dev); | 135 | dma_sync_single_for_device(dev, addr + offset, size, dir); |
144 | |||
145 | BUG_ON(!valid_dma_direction(dir)); | ||
146 | if (ops->sync_single_range_for_device) { | ||
147 | ops->sync_single_range_for_device(dev, addr, offset, size, dir); | ||
148 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); | ||
149 | |||
150 | } else | ||
151 | dma_sync_single_for_device(dev, addr + offset, size, dir); | ||
152 | } | 136 | } |
153 | 137 | ||
154 | static inline void | 138 | static inline void |
diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h index fcd268ce0674..a70b2d2bfc14 100644 --- a/include/asm-generic/fcntl.h +++ b/include/asm-generic/fcntl.h | |||
@@ -3,6 +3,18 @@ | |||
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | 5 | ||
6 | /* | ||
7 | * FMODE_EXEC is 0x20 | ||
8 | * FMODE_NONOTIFY is 0x1000000 | ||
9 | * These cannot be used by userspace O_* until internal and external open | ||
10 | * flags are split. | ||
11 | * -Eric Paris | ||
12 | */ | ||
13 | |||
14 | /* | ||
15 | * When introducing new O_* bits, please check its uniqueness in fcntl_init(). | ||
16 | */ | ||
17 | |||
6 | #define O_ACCMODE 00000003 | 18 | #define O_ACCMODE 00000003 |
7 | #define O_RDONLY 00000000 | 19 | #define O_RDONLY 00000000 |
8 | #define O_WRONLY 00000001 | 20 | #define O_WRONLY 00000001 |
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index 979c6a57f2f1..8ca18e26d7e3 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h | |||
@@ -16,21 +16,34 @@ | |||
16 | * While the GPIO programming interface defines valid GPIO numbers | 16 | * While the GPIO programming interface defines valid GPIO numbers |
17 | * to be in the range 0..MAX_INT, this library restricts them to the | 17 | * to be in the range 0..MAX_INT, this library restricts them to the |
18 | * smaller range 0..ARCH_NR_GPIOS-1. | 18 | * smaller range 0..ARCH_NR_GPIOS-1. |
19 | * | ||
20 | * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of | ||
21 | * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is | ||
22 | * actually an estimate of a board-specific value. | ||
19 | */ | 23 | */ |
20 | 24 | ||
21 | #ifndef ARCH_NR_GPIOS | 25 | #ifndef ARCH_NR_GPIOS |
22 | #define ARCH_NR_GPIOS 256 | 26 | #define ARCH_NR_GPIOS 256 |
23 | #endif | 27 | #endif |
24 | 28 | ||
29 | /* | ||
30 | * "valid" GPIO numbers are nonnegative and may be passed to | ||
31 | * setup routines like gpio_request(). only some valid numbers | ||
32 | * can successfully be requested and used. | ||
33 | * | ||
34 | * Invalid GPIO numbers are useful for indicating no-such-GPIO in | ||
35 | * platform data and other tables. | ||
36 | */ | ||
37 | |||
25 | static inline int gpio_is_valid(int number) | 38 | static inline int gpio_is_valid(int number) |
26 | { | 39 | { |
27 | /* only some non-negative numbers are valid */ | ||
28 | return ((unsigned)number) < ARCH_NR_GPIOS; | 40 | return ((unsigned)number) < ARCH_NR_GPIOS; |
29 | } | 41 | } |
30 | 42 | ||
31 | struct device; | 43 | struct device; |
32 | struct seq_file; | 44 | struct seq_file; |
33 | struct module; | 45 | struct module; |
46 | struct device_node; | ||
34 | 47 | ||
35 | /** | 48 | /** |
36 | * struct gpio_chip - abstract a GPIO controller | 49 | * struct gpio_chip - abstract a GPIO controller |
@@ -60,7 +73,9 @@ struct module; | |||
60 | * @names: if set, must be an array of strings to use as alternative | 73 | * @names: if set, must be an array of strings to use as alternative |
61 | * names for the GPIOs in this chip. Any entry in the array | 74 | * names for the GPIOs in this chip. Any entry in the array |
62 | * may be NULL if there is no alias for the GPIO, however the | 75 | * may be NULL if there is no alias for the GPIO, however the |
63 | * array must be @ngpio entries long. | 76 | * array must be @ngpio entries long. A name can include a single printk |
77 | * format specifier for an unsigned int. It is substituted by the actual | ||
78 | * number of the gpio. | ||
64 | * | 79 | * |
65 | * A gpio_chip can help platforms abstract various sources of GPIOs so | 80 | * A gpio_chip can help platforms abstract various sources of GPIOs so |
66 | * they can all be accessed through a common programing interface. | 81 | * they can all be accessed through a common programing interface. |
@@ -88,6 +103,9 @@ struct gpio_chip { | |||
88 | unsigned offset); | 103 | unsigned offset); |
89 | int (*direction_output)(struct gpio_chip *chip, | 104 | int (*direction_output)(struct gpio_chip *chip, |
90 | unsigned offset, int value); | 105 | unsigned offset, int value); |
106 | int (*set_debounce)(struct gpio_chip *chip, | ||
107 | unsigned offset, unsigned debounce); | ||
108 | |||
91 | void (*set)(struct gpio_chip *chip, | 109 | void (*set)(struct gpio_chip *chip, |
92 | unsigned offset, int value); | 110 | unsigned offset, int value); |
93 | 111 | ||
@@ -98,9 +116,20 @@ struct gpio_chip { | |||
98 | struct gpio_chip *chip); | 116 | struct gpio_chip *chip); |
99 | int base; | 117 | int base; |
100 | u16 ngpio; | 118 | u16 ngpio; |
101 | char **names; | 119 | const char *const *names; |
102 | unsigned can_sleep:1; | 120 | unsigned can_sleep:1; |
103 | unsigned exported:1; | 121 | unsigned exported:1; |
122 | |||
123 | #if defined(CONFIG_OF_GPIO) | ||
124 | /* | ||
125 | * If CONFIG_OF is enabled, then all GPIO controllers described in the | ||
126 | * device tree automatically may have an OF translation | ||
127 | */ | ||
128 | struct device_node *of_node; | ||
129 | int of_gpio_n_cells; | ||
130 | int (*of_xlate)(struct gpio_chip *gc, struct device_node *np, | ||
131 | const void *gpio_spec, u32 *flags); | ||
132 | #endif | ||
104 | }; | 133 | }; |
105 | 134 | ||
106 | extern const char *gpiochip_is_requested(struct gpio_chip *chip, | 135 | extern const char *gpiochip_is_requested(struct gpio_chip *chip, |
@@ -110,6 +139,9 @@ extern int __must_check gpiochip_reserve(int start, int ngpio); | |||
110 | /* add/remove chips */ | 139 | /* add/remove chips */ |
111 | extern int gpiochip_add(struct gpio_chip *chip); | 140 | extern int gpiochip_add(struct gpio_chip *chip); |
112 | extern int __must_check gpiochip_remove(struct gpio_chip *chip); | 141 | extern int __must_check gpiochip_remove(struct gpio_chip *chip); |
142 | extern struct gpio_chip *gpiochip_find(void *data, | ||
143 | int (*match)(struct gpio_chip *chip, | ||
144 | void *data)); | ||
113 | 145 | ||
114 | 146 | ||
115 | /* Always use the library code for GPIO management calls, | 147 | /* Always use the library code for GPIO management calls, |
@@ -121,6 +153,8 @@ extern void gpio_free(unsigned gpio); | |||
121 | extern int gpio_direction_input(unsigned gpio); | 153 | extern int gpio_direction_input(unsigned gpio); |
122 | extern int gpio_direction_output(unsigned gpio, int value); | 154 | extern int gpio_direction_output(unsigned gpio, int value); |
123 | 155 | ||
156 | extern int gpio_set_debounce(unsigned gpio, unsigned debounce); | ||
157 | |||
124 | extern int gpio_get_value_cansleep(unsigned gpio); | 158 | extern int gpio_get_value_cansleep(unsigned gpio); |
125 | extern void gpio_set_value_cansleep(unsigned gpio, int value); | 159 | extern void gpio_set_value_cansleep(unsigned gpio, int value); |
126 | 160 | ||
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index bcee6365dca0..118601fce92d 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h | |||
@@ -188,11 +188,15 @@ static inline void outsl(unsigned long addr, const void *buffer, int count) | |||
188 | #ifndef CONFIG_GENERIC_IOMAP | 188 | #ifndef CONFIG_GENERIC_IOMAP |
189 | #define ioread8(addr) readb(addr) | 189 | #define ioread8(addr) readb(addr) |
190 | #define ioread16(addr) readw(addr) | 190 | #define ioread16(addr) readw(addr) |
191 | #define ioread16be(addr) be16_to_cpu(ioread16(addr)) | ||
191 | #define ioread32(addr) readl(addr) | 192 | #define ioread32(addr) readl(addr) |
193 | #define ioread32be(addr) be32_to_cpu(ioread32(addr)) | ||
192 | 194 | ||
193 | #define iowrite8(v, addr) writeb((v), (addr)) | 195 | #define iowrite8(v, addr) writeb((v), (addr)) |
194 | #define iowrite16(v, addr) writew((v), (addr)) | 196 | #define iowrite16(v, addr) writew((v), (addr)) |
197 | #define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr)) | ||
195 | #define iowrite32(v, addr) writel((v), (addr)) | 198 | #define iowrite32(v, addr) writel((v), (addr)) |
199 | #define iowrite32be(v, addr) iowrite32(be32_to_cpu(v), (addr)) | ||
196 | 200 | ||
197 | #define ioread8_rep(p, dst, count) \ | 201 | #define ioread8_rep(p, dst, count) \ |
198 | insb((unsigned long) (p), (dst), (count)) | 202 | insb((unsigned long) (p), (dst), (count)) |
diff --git a/include/asm-generic/ioctls.h b/include/asm-generic/ioctls.h index a799e20a769e..8554cb6a81b9 100644 --- a/include/asm-generic/ioctls.h +++ b/include/asm-generic/ioctls.h | |||
@@ -69,6 +69,7 @@ | |||
69 | #define TCSETX 0x5433 | 69 | #define TCSETX 0x5433 |
70 | #define TCSETXF 0x5434 | 70 | #define TCSETXF 0x5434 |
71 | #define TCSETXW 0x5435 | 71 | #define TCSETXW 0x5435 |
72 | #define TIOCSIG _IOW('T', 0x36, int) /* pty: generate signal */ | ||
72 | 73 | ||
73 | #define FIONCLEX 0x5450 | 74 | #define FIONCLEX 0x5450 |
74 | #define FIOCLEX 0x5451 | 75 | #define FIOCLEX 0x5451 |
@@ -87,12 +88,10 @@ | |||
87 | #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ | 88 | #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ |
88 | 89 | ||
89 | /* | 90 | /* |
90 | * some architectures define FIOQSIZE as 0x545E, which is used for | 91 | * Some arches already define FIOQSIZE due to a historical |
91 | * TIOCGHAYESESP on others | 92 | * conflict with a Hayes modem-specific ioctl value. |
92 | */ | 93 | */ |
93 | #ifndef FIOQSIZE | 94 | #ifndef FIOQSIZE |
94 | # define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ | ||
95 | # define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ | ||
96 | # define FIOQSIZE 0x5460 | 95 | # define FIOQSIZE 0x5460 |
97 | #endif | 96 | #endif |
98 | 97 | ||
@@ -104,6 +103,7 @@ | |||
104 | #define TIOCPKT_START 8 | 103 | #define TIOCPKT_START 8 |
105 | #define TIOCPKT_NOSTOP 16 | 104 | #define TIOCPKT_NOSTOP 16 |
106 | #define TIOCPKT_DOSTOP 32 | 105 | #define TIOCPKT_DOSTOP 32 |
106 | #define TIOCPKT_IOCTL 64 | ||
107 | 107 | ||
108 | #define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ | 108 | #define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ |
109 | 109 | ||
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h index e5f234a08540..0232ccb76f2b 100644 --- a/include/asm-generic/kmap_types.h +++ b/include/asm-generic/kmap_types.h | |||
@@ -28,7 +28,11 @@ KMAP_D(15) KM_UML_USERCOPY, | |||
28 | KMAP_D(16) KM_IRQ_PTE, | 28 | KMAP_D(16) KM_IRQ_PTE, |
29 | KMAP_D(17) KM_NMI, | 29 | KMAP_D(17) KM_NMI, |
30 | KMAP_D(18) KM_NMI_PTE, | 30 | KMAP_D(18) KM_NMI_PTE, |
31 | KMAP_D(19) KM_TYPE_NR | 31 | KMAP_D(19) KM_KDB, |
32 | /* | ||
33 | * Remember to update debug_kmap_atomic() when adding new kmap types! | ||
34 | */ | ||
35 | KMAP_D(20) KM_TYPE_NR | ||
32 | }; | 36 | }; |
33 | 37 | ||
34 | #undef KMAP_D | 38 | #undef KMAP_D |
diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h new file mode 100644 index 000000000000..02ac760c1a8b --- /dev/null +++ b/include/asm-generic/local64.h | |||
@@ -0,0 +1,96 @@ | |||
1 | #ifndef _ASM_GENERIC_LOCAL64_H | ||
2 | #define _ASM_GENERIC_LOCAL64_H | ||
3 | |||
4 | #include <linux/percpu.h> | ||
5 | #include <asm/types.h> | ||
6 | |||
7 | /* | ||
8 | * A signed long type for operations which are atomic for a single CPU. | ||
9 | * Usually used in combination with per-cpu variables. | ||
10 | * | ||
11 | * This is the default implementation, which uses atomic64_t. Which is | ||
12 | * rather pointless. The whole point behind local64_t is that some processors | ||
13 | * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs | ||
14 | * running on this CPU. local64_t allows exploitation of such capabilities. | ||
15 | */ | ||
16 | |||
17 | /* Implement in terms of atomics. */ | ||
18 | |||
19 | #if BITS_PER_LONG == 64 | ||
20 | |||
21 | #include <asm/local.h> | ||
22 | |||
23 | typedef struct { | ||
24 | local_t a; | ||
25 | } local64_t; | ||
26 | |||
27 | #define LOCAL64_INIT(i) { LOCAL_INIT(i) } | ||
28 | |||
29 | #define local64_read(l) local_read(&(l)->a) | ||
30 | #define local64_set(l,i) local_set((&(l)->a),(i)) | ||
31 | #define local64_inc(l) local_inc(&(l)->a) | ||
32 | #define local64_dec(l) local_dec(&(l)->a) | ||
33 | #define local64_add(i,l) local_add((i),(&(l)->a)) | ||
34 | #define local64_sub(i,l) local_sub((i),(&(l)->a)) | ||
35 | |||
36 | #define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a)) | ||
37 | #define local64_dec_and_test(l) local_dec_and_test(&(l)->a) | ||
38 | #define local64_inc_and_test(l) local_inc_and_test(&(l)->a) | ||
39 | #define local64_add_negative(i, l) local_add_negative((i), (&(l)->a)) | ||
40 | #define local64_add_return(i, l) local_add_return((i), (&(l)->a)) | ||
41 | #define local64_sub_return(i, l) local_sub_return((i), (&(l)->a)) | ||
42 | #define local64_inc_return(l) local_inc_return(&(l)->a) | ||
43 | |||
44 | #define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n)) | ||
45 | #define local64_xchg(l, n) local_xchg((&(l)->a), (n)) | ||
46 | #define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u)) | ||
47 | #define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a) | ||
48 | |||
49 | /* Non-atomic variants, ie. preemption disabled and won't be touched | ||
50 | * in interrupt, etc. Some archs can optimize this case well. */ | ||
51 | #define __local64_inc(l) local64_set((l), local64_read(l) + 1) | ||
52 | #define __local64_dec(l) local64_set((l), local64_read(l) - 1) | ||
53 | #define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) | ||
54 | #define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) | ||
55 | |||
56 | #else /* BITS_PER_LONG != 64 */ | ||
57 | |||
58 | #include <asm/atomic.h> | ||
59 | |||
60 | /* Don't use typedef: don't want them to be mixed with atomic_t's. */ | ||
61 | typedef struct { | ||
62 | atomic64_t a; | ||
63 | } local64_t; | ||
64 | |||
65 | #define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) } | ||
66 | |||
67 | #define local64_read(l) atomic64_read(&(l)->a) | ||
68 | #define local64_set(l,i) atomic64_set((&(l)->a),(i)) | ||
69 | #define local64_inc(l) atomic64_inc(&(l)->a) | ||
70 | #define local64_dec(l) atomic64_dec(&(l)->a) | ||
71 | #define local64_add(i,l) atomic64_add((i),(&(l)->a)) | ||
72 | #define local64_sub(i,l) atomic64_sub((i),(&(l)->a)) | ||
73 | |||
74 | #define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a)) | ||
75 | #define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a) | ||
76 | #define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a) | ||
77 | #define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a)) | ||
78 | #define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a)) | ||
79 | #define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a)) | ||
80 | #define local64_inc_return(l) atomic64_inc_return(&(l)->a) | ||
81 | |||
82 | #define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n)) | ||
83 | #define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n)) | ||
84 | #define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u)) | ||
85 | #define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a) | ||
86 | |||
87 | /* Non-atomic variants, ie. preemption disabled and won't be touched | ||
88 | * in interrupt, etc. Some archs can optimize this case well. */ | ||
89 | #define __local64_inc(l) local64_set((l), local64_read(l) + 1) | ||
90 | #define __local64_dec(l) local64_set((l), local64_read(l) - 1) | ||
91 | #define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) | ||
92 | #define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) | ||
93 | |||
94 | #endif /* BITS_PER_LONG != 64 */ | ||
95 | |||
96 | #endif /* _ASM_GENERIC_LOCAL64_H */ | ||
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 04f91c2d3f7b..08923b684768 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h | |||
@@ -70,17 +70,22 @@ extern void setup_per_cpu_areas(void); | |||
70 | 70 | ||
71 | #else /* ! SMP */ | 71 | #else /* ! SMP */ |
72 | 72 | ||
73 | #define per_cpu(var, cpu) (*((void)(cpu), &(var))) | 73 | #define VERIFY_PERCPU_PTR(__p) ({ \ |
74 | #define __get_cpu_var(var) (var) | 74 | __verify_pcpu_ptr((__p)); \ |
75 | #define __raw_get_cpu_var(var) (var) | 75 | (typeof(*(__p)) __kernel __force *)(__p); \ |
76 | #define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) | 76 | }) |
77 | #define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) | 77 | |
78 | #define per_cpu(var, cpu) (*((void)(cpu), VERIFY_PERCPU_PTR(&(var)))) | ||
79 | #define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) | ||
80 | #define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) | ||
81 | #define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) | ||
82 | #define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) | ||
78 | 83 | ||
79 | #endif /* SMP */ | 84 | #endif /* SMP */ |
80 | 85 | ||
81 | #ifndef PER_CPU_BASE_SECTION | 86 | #ifndef PER_CPU_BASE_SECTION |
82 | #ifdef CONFIG_SMP | 87 | #ifdef CONFIG_SMP |
83 | #define PER_CPU_BASE_SECTION ".data.percpu" | 88 | #define PER_CPU_BASE_SECTION ".data..percpu" |
84 | #else | 89 | #else |
85 | #define PER_CPU_BASE_SECTION ".data" | 90 | #define PER_CPU_BASE_SECTION ".data" |
86 | #endif | 91 | #endif |
@@ -92,15 +97,15 @@ extern void setup_per_cpu_areas(void); | |||
92 | #define PER_CPU_SHARED_ALIGNED_SECTION "" | 97 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
93 | #define PER_CPU_ALIGNED_SECTION "" | 98 | #define PER_CPU_ALIGNED_SECTION "" |
94 | #else | 99 | #else |
95 | #define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" | 100 | #define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" |
96 | #define PER_CPU_ALIGNED_SECTION ".shared_aligned" | 101 | #define PER_CPU_ALIGNED_SECTION "..shared_aligned" |
97 | #endif | 102 | #endif |
98 | #define PER_CPU_FIRST_SECTION ".first" | 103 | #define PER_CPU_FIRST_SECTION "..first" |
99 | 104 | ||
100 | #else | 105 | #else |
101 | 106 | ||
102 | #define PER_CPU_SHARED_ALIGNED_SECTION "" | 107 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
103 | #define PER_CPU_ALIGNED_SECTION ".shared_aligned" | 108 | #define PER_CPU_ALIGNED_SECTION "..shared_aligned" |
104 | #define PER_CPU_FIRST_SECTION "" | 109 | #define PER_CPU_FIRST_SECTION "" |
105 | 110 | ||
106 | #endif | 111 | #endif |
diff --git a/include/asm-generic/scatterlist.h b/include/asm-generic/scatterlist.h index 8b9454496a7c..5de07355fad4 100644 --- a/include/asm-generic/scatterlist.h +++ b/include/asm-generic/scatterlist.h | |||
@@ -11,7 +11,9 @@ struct scatterlist { | |||
11 | unsigned int offset; | 11 | unsigned int offset; |
12 | unsigned int length; | 12 | unsigned int length; |
13 | dma_addr_t dma_address; | 13 | dma_addr_t dma_address; |
14 | #ifdef CONFIG_NEED_SG_DMA_LENGTH | ||
14 | unsigned int dma_length; | 15 | unsigned int dma_length; |
16 | #endif | ||
15 | }; | 17 | }; |
16 | 18 | ||
17 | /* | 19 | /* |
@@ -22,22 +24,11 @@ struct scatterlist { | |||
22 | * is 0. | 24 | * is 0. |
23 | */ | 25 | */ |
24 | #define sg_dma_address(sg) ((sg)->dma_address) | 26 | #define sg_dma_address(sg) ((sg)->dma_address) |
25 | #ifndef sg_dma_len | 27 | |
26 | /* | 28 | #ifdef CONFIG_NEED_SG_DMA_LENGTH |
27 | * Normally, you have an iommu on 64 bit machines, but not on 32 bit | ||
28 | * machines. Architectures that are differnt should override this. | ||
29 | */ | ||
30 | #if __BITS_PER_LONG == 64 | ||
31 | #define sg_dma_len(sg) ((sg)->dma_length) | 29 | #define sg_dma_len(sg) ((sg)->dma_length) |
32 | #else | 30 | #else |
33 | #define sg_dma_len(sg) ((sg)->length) | 31 | #define sg_dma_len(sg) ((sg)->length) |
34 | #endif /* 64 bit */ | ||
35 | #endif /* sg_dma_len */ | ||
36 | |||
37 | #ifndef ISA_DMA_THRESHOLD | ||
38 | #define ISA_DMA_THRESHOLD (~0UL) | ||
39 | #endif | 32 | #endif |
40 | 33 | ||
41 | #define ARCH_HAS_SG_CHAIN | ||
42 | |||
43 | #endif /* __ASM_GENERIC_SCATTERLIST_H */ | 34 | #endif /* __ASM_GENERIC_SCATTERLIST_H */ |
diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h index 3b4fb3e52f0d..0fd28e028de1 100644 --- a/include/asm-generic/statfs.h +++ b/include/asm-generic/statfs.h | |||
@@ -33,7 +33,8 @@ struct statfs { | |||
33 | __kernel_fsid_t f_fsid; | 33 | __kernel_fsid_t f_fsid; |
34 | __statfs_word f_namelen; | 34 | __statfs_word f_namelen; |
35 | __statfs_word f_frsize; | 35 | __statfs_word f_frsize; |
36 | __statfs_word f_spare[5]; | 36 | __statfs_word f_flags; |
37 | __statfs_word f_spare[4]; | ||
37 | }; | 38 | }; |
38 | 39 | ||
39 | /* | 40 | /* |
@@ -55,7 +56,8 @@ struct statfs64 { | |||
55 | __kernel_fsid_t f_fsid; | 56 | __kernel_fsid_t f_fsid; |
56 | __statfs_word f_namelen; | 57 | __statfs_word f_namelen; |
57 | __statfs_word f_frsize; | 58 | __statfs_word f_frsize; |
58 | __statfs_word f_spare[5]; | 59 | __statfs_word f_flags; |
60 | __statfs_word f_spare[4]; | ||
59 | } ARCH_PACK_STATFS64; | 61 | } ARCH_PACK_STATFS64; |
60 | 62 | ||
61 | /* | 63 | /* |
@@ -77,7 +79,8 @@ struct compat_statfs64 { | |||
77 | __kernel_fsid_t f_fsid; | 79 | __kernel_fsid_t f_fsid; |
78 | __u32 f_namelen; | 80 | __u32 f_namelen; |
79 | __u32 f_frsize; | 81 | __u32 f_frsize; |
80 | __u32 f_spare[5]; | 82 | __u32 f_flags; |
83 | __u32 f_spare[4]; | ||
81 | } ARCH_PACK_COMPAT_STATFS64; | 84 | } ARCH_PACK_COMPAT_STATFS64; |
82 | 85 | ||
83 | #endif | 86 | #endif |
diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h index df84e3b04555..d89dec864d42 100644 --- a/include/asm-generic/syscalls.h +++ b/include/asm-generic/syscalls.h | |||
@@ -23,8 +23,10 @@ asmlinkage long sys_vfork(struct pt_regs *regs); | |||
23 | #endif | 23 | #endif |
24 | 24 | ||
25 | #ifndef sys_execve | 25 | #ifndef sys_execve |
26 | asmlinkage long sys_execve(char __user *filename, char __user * __user *argv, | 26 | asmlinkage long sys_execve(const char __user *filename, |
27 | char __user * __user *envp, struct pt_regs *regs); | 27 | const char __user *const __user *argv, |
28 | const char __user *const __user *envp, | ||
29 | struct pt_regs *regs); | ||
28 | #endif | 30 | #endif |
29 | 31 | ||
30 | #ifndef sys_mmap2 | 32 | #ifndef sys_mmap2 |
diff --git a/include/asm-generic/termbits.h b/include/asm-generic/termbits.h index 1c9773d48cb0..232b4781aef3 100644 --- a/include/asm-generic/termbits.h +++ b/include/asm-generic/termbits.h | |||
@@ -178,6 +178,7 @@ struct ktermios { | |||
178 | #define FLUSHO 0010000 | 178 | #define FLUSHO 0010000 |
179 | #define PENDIN 0040000 | 179 | #define PENDIN 0040000 |
180 | #define IEXTEN 0100000 | 180 | #define IEXTEN 0100000 |
181 | #define EXTPROC 0200000 | ||
181 | 182 | ||
182 | /* tcflow() and TCXONC use these */ | 183 | /* tcflow() and TCXONC use these */ |
183 | #define TCOOFF 0 | 184 | #define TCOOFF 0 |
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index 510df36dd5d4..fc824e2828f3 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Copyright (C) 2002, IBM Corp. | 6 | * Copyright (C) 2002, IBM Corp. |
7 | * | 7 | * |
8 | * All rights reserved. | 8 | * All rights reserved. |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 11 | * it under the terms of the GNU General Public License as published by |
@@ -34,6 +34,16 @@ | |||
34 | #ifndef cpu_to_node | 34 | #ifndef cpu_to_node |
35 | #define cpu_to_node(cpu) ((void)(cpu),0) | 35 | #define cpu_to_node(cpu) ((void)(cpu),0) |
36 | #endif | 36 | #endif |
37 | #ifndef set_numa_node | ||
38 | #define set_numa_node(node) | ||
39 | #endif | ||
40 | #ifndef set_cpu_numa_node | ||
41 | #define set_cpu_numa_node(cpu, node) | ||
42 | #endif | ||
43 | #ifndef cpu_to_mem | ||
44 | #define cpu_to_mem(cpu) ((void)(cpu),0) | ||
45 | #endif | ||
46 | |||
37 | #ifndef parent_node | 47 | #ifndef parent_node |
38 | #define parent_node(node) ((void)(node),0) | 48 | #define parent_node(node) ((void)(node),0) |
39 | #endif | 49 | #endif |
@@ -52,4 +62,15 @@ | |||
52 | 62 | ||
53 | #endif /* CONFIG_NUMA */ | 63 | #endif /* CONFIG_NUMA */ |
54 | 64 | ||
65 | #if !defined(CONFIG_NUMA) || !defined(CONFIG_HAVE_MEMORYLESS_NODES) | ||
66 | |||
67 | #ifndef set_numa_mem | ||
68 | #define set_numa_mem(node) | ||
69 | #endif | ||
70 | #ifndef set_cpu_numa_mem | ||
71 | #define set_cpu_numa_mem(cpu, node) | ||
72 | #endif | ||
73 | |||
74 | #endif /* !CONFIG_NUMA || !CONFIG_HAVE_MEMORYLESS_NODES */ | ||
75 | |||
55 | #endif /* _ASM_GENERIC_TOPOLOGY_H */ | 76 | #endif /* _ASM_GENERIC_TOPOLOGY_H */ |
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h index 6a0b30f78a62..b969770196c2 100644 --- a/include/asm-generic/unistd.h +++ b/include/asm-generic/unistd.h | |||
@@ -18,7 +18,7 @@ | |||
18 | #define __SYSCALL(x, y) | 18 | #define __SYSCALL(x, y) |
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | #if __BITS_PER_LONG == 32 | 21 | #if __BITS_PER_LONG == 32 || defined(__SYSCALL_COMPAT) |
22 | #define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _32) | 22 | #define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _32) |
23 | #else | 23 | #else |
24 | #define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64) | 24 | #define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64) |
@@ -241,8 +241,13 @@ __SYSCALL(__NR_sync, sys_sync) | |||
241 | __SYSCALL(__NR_fsync, sys_fsync) | 241 | __SYSCALL(__NR_fsync, sys_fsync) |
242 | #define __NR_fdatasync 83 | 242 | #define __NR_fdatasync 83 |
243 | __SYSCALL(__NR_fdatasync, sys_fdatasync) | 243 | __SYSCALL(__NR_fdatasync, sys_fdatasync) |
244 | #ifdef __ARCH_WANT_SYNC_FILE_RANGE2 | ||
245 | #define __NR_sync_file_range2 84 | ||
246 | __SYSCALL(__NR_sync_file_range2, sys_sync_file_range2) | ||
247 | #else | ||
244 | #define __NR_sync_file_range 84 | 248 | #define __NR_sync_file_range 84 |
245 | __SYSCALL(__NR_sync_file_range, sys_sync_file_range) /* .long sys_sync_file_range2, */ | 249 | __SYSCALL(__NR_sync_file_range, sys_sync_file_range) |
250 | #endif | ||
246 | 251 | ||
247 | /* fs/timerfd.c */ | 252 | /* fs/timerfd.c */ |
248 | #define __NR_timerfd_create 85 | 253 | #define __NR_timerfd_create 85 |
@@ -580,7 +585,7 @@ __SYSCALL(__NR_execve, sys_execve) /* .long sys_execve_wrapper */ | |||
580 | __SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap) | 585 | __SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap) |
581 | /* mm/fadvise.c */ | 586 | /* mm/fadvise.c */ |
582 | #define __NR3264_fadvise64 223 | 587 | #define __NR3264_fadvise64 223 |
583 | __SC_3264(__NR3264_fadvise64, sys_fadvise64_64, sys_fadvise64) | 588 | __SYSCALL(__NR3264_fadvise64, sys_fadvise64_64) |
584 | 589 | ||
585 | /* mm/, CONFIG_MMU only */ | 590 | /* mm/, CONFIG_MMU only */ |
586 | #ifndef __ARCH_NOMMU | 591 | #ifndef __ARCH_NOMMU |
@@ -627,8 +632,23 @@ __SYSCALL(__NR_accept4, sys_accept4) | |||
627 | #define __NR_recvmmsg 243 | 632 | #define __NR_recvmmsg 243 |
628 | __SYSCALL(__NR_recvmmsg, sys_recvmmsg) | 633 | __SYSCALL(__NR_recvmmsg, sys_recvmmsg) |
629 | 634 | ||
635 | /* | ||
636 | * Architectures may provide up to 16 syscalls of their own | ||
637 | * starting with this value. | ||
638 | */ | ||
639 | #define __NR_arch_specific_syscall 244 | ||
640 | |||
641 | #define __NR_wait4 260 | ||
642 | __SYSCALL(__NR_wait4, sys_wait4) | ||
643 | #define __NR_prlimit64 261 | ||
644 | __SYSCALL(__NR_prlimit64, sys_prlimit64) | ||
645 | #define __NR_fanotify_init 262 | ||
646 | __SYSCALL(__NR_fanotify_init, sys_fanotify_init) | ||
647 | #define __NR_fanotify_mark 263 | ||
648 | __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark) | ||
649 | |||
630 | #undef __NR_syscalls | 650 | #undef __NR_syscalls |
631 | #define __NR_syscalls 244 | 651 | #define __NR_syscalls 264 |
632 | 652 | ||
633 | /* | 653 | /* |
634 | * All syscalls below here should go away really, | 654 | * All syscalls below here should go away really, |
@@ -694,7 +714,8 @@ __SYSCALL(__NR_signalfd, sys_signalfd) | |||
694 | #define __NR_syscalls (__NR_signalfd+1) | 714 | #define __NR_syscalls (__NR_signalfd+1) |
695 | #endif /* __ARCH_WANT_SYSCALL_NO_FLAGS */ | 715 | #endif /* __ARCH_WANT_SYSCALL_NO_FLAGS */ |
696 | 716 | ||
697 | #if __BITS_PER_LONG == 32 && defined(__ARCH_WANT_SYSCALL_OFF_T) | 717 | #if (__BITS_PER_LONG == 32 || defined(__SYSCALL_COMPAT)) && \ |
718 | defined(__ARCH_WANT_SYSCALL_OFF_T) | ||
698 | #define __NR_sendfile 1046 | 719 | #define __NR_sendfile 1046 |
699 | __SYSCALL(__NR_sendfile, sys_sendfile) | 720 | __SYSCALL(__NR_sendfile, sys_sendfile) |
700 | #define __NR_ftruncate 1047 | 721 | #define __NR_ftruncate 1047 |
@@ -740,6 +761,7 @@ __SYSCALL(__NR_getpgrp, sys_getpgrp) | |||
740 | __SYSCALL(__NR_pause, sys_pause) | 761 | __SYSCALL(__NR_pause, sys_pause) |
741 | #define __NR_time 1062 | 762 | #define __NR_time 1062 |
742 | #define __ARCH_WANT_SYS_TIME | 763 | #define __ARCH_WANT_SYS_TIME |
764 | #define __ARCH_WANT_COMPAT_SYS_TIME | ||
743 | __SYSCALL(__NR_time, sys_time) | 765 | __SYSCALL(__NR_time, sys_time) |
744 | #define __NR_utime 1063 | 766 | #define __NR_utime 1063 |
745 | #define __ARCH_WANT_SYS_UTIME | 767 | #define __ARCH_WANT_SYS_UTIME |
@@ -763,8 +785,8 @@ __SYSCALL(__NR_epoll_wait, sys_epoll_wait) | |||
763 | __SYSCALL(__NR_ustat, sys_ustat) | 785 | __SYSCALL(__NR_ustat, sys_ustat) |
764 | #define __NR_vfork 1071 | 786 | #define __NR_vfork 1071 |
765 | __SYSCALL(__NR_vfork, sys_vfork) | 787 | __SYSCALL(__NR_vfork, sys_vfork) |
766 | #define __NR_wait4 1072 | 788 | #define __NR_oldwait4 1072 |
767 | __SYSCALL(__NR_wait4, sys_wait4) | 789 | __SYSCALL(__NR_oldwait4, sys_wait4) |
768 | #define __NR_recv 1073 | 790 | #define __NR_recv 1073 |
769 | __SYSCALL(__NR_recv, sys_recv) | 791 | __SYSCALL(__NR_recv, sys_recv) |
770 | #define __NR_send 1074 | 792 | #define __NR_send 1074 |
@@ -801,7 +823,7 @@ __SYSCALL(__NR_fork, sys_ni_syscall) | |||
801 | * Here we map the numbers so that both versions | 823 | * Here we map the numbers so that both versions |
802 | * use the same syscall table layout. | 824 | * use the same syscall table layout. |
803 | */ | 825 | */ |
804 | #if __BITS_PER_LONG == 64 | 826 | #if __BITS_PER_LONG == 64 && !defined(__SYSCALL_COMPAT) |
805 | #define __NR_fcntl __NR3264_fcntl | 827 | #define __NR_fcntl __NR3264_fcntl |
806 | #define __NR_statfs __NR3264_statfs | 828 | #define __NR_statfs __NR3264_statfs |
807 | #define __NR_fstatfs __NR3264_fstatfs | 829 | #define __NR_fstatfs __NR3264_fstatfs |
@@ -848,6 +870,7 @@ __SYSCALL(__NR_fork, sys_ni_syscall) | |||
848 | #endif | 870 | #endif |
849 | #define __ARCH_WANT_SYS_RT_SIGACTION | 871 | #define __ARCH_WANT_SYS_RT_SIGACTION |
850 | #define __ARCH_WANT_SYS_RT_SIGSUSPEND | 872 | #define __ARCH_WANT_SYS_RT_SIGSUSPEND |
873 | #define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND | ||
851 | 874 | ||
852 | /* | 875 | /* |
853 | * "Conditional" syscalls | 876 | * "Conditional" syscalls |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 67e652068e0e..8a92a170fb7d 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -63,6 +63,12 @@ | |||
63 | /* Align . to a 8 byte boundary equals to maximum function alignment. */ | 63 | /* Align . to a 8 byte boundary equals to maximum function alignment. */ |
64 | #define ALIGN_FUNCTION() . = ALIGN(8) | 64 | #define ALIGN_FUNCTION() . = ALIGN(8) |
65 | 65 | ||
66 | /* | ||
67 | * Align to a 32 byte boundary equal to the | ||
68 | * alignment gcc 4.5 uses for a struct | ||
69 | */ | ||
70 | #define STRUCT_ALIGN() . = ALIGN(32) | ||
71 | |||
66 | /* The actual configuration determine if the init/exit sections | 72 | /* The actual configuration determine if the init/exit sections |
67 | * are handled as text/data or they can be discarded (which | 73 | * are handled as text/data or they can be discarded (which |
68 | * often happens at runtime) | 74 | * often happens at runtime) |
@@ -150,10 +156,6 @@ | |||
150 | CPU_KEEP(exit.data) \ | 156 | CPU_KEEP(exit.data) \ |
151 | MEM_KEEP(init.data) \ | 157 | MEM_KEEP(init.data) \ |
152 | MEM_KEEP(exit.data) \ | 158 | MEM_KEEP(exit.data) \ |
153 | . = ALIGN(8); \ | ||
154 | VMLINUX_SYMBOL(__start___markers) = .; \ | ||
155 | *(__markers) \ | ||
156 | VMLINUX_SYMBOL(__stop___markers) = .; \ | ||
157 | . = ALIGN(32); \ | 159 | . = ALIGN(32); \ |
158 | VMLINUX_SYMBOL(__start___tracepoints) = .; \ | 160 | VMLINUX_SYMBOL(__start___tracepoints) = .; \ |
159 | *(__tracepoints) \ | 161 | *(__tracepoints) \ |
@@ -166,7 +168,11 @@ | |||
166 | LIKELY_PROFILE() \ | 168 | LIKELY_PROFILE() \ |
167 | BRANCH_PROFILE() \ | 169 | BRANCH_PROFILE() \ |
168 | TRACE_PRINTKS() \ | 170 | TRACE_PRINTKS() \ |
171 | \ | ||
172 | STRUCT_ALIGN(); \ | ||
169 | FTRACE_EVENTS() \ | 173 | FTRACE_EVENTS() \ |
174 | \ | ||
175 | STRUCT_ALIGN(); \ | ||
170 | TRACE_SYSCALLS() | 176 | TRACE_SYSCALLS() |
171 | 177 | ||
172 | /* | 178 | /* |
@@ -175,25 +181,25 @@ | |||
175 | #define NOSAVE_DATA \ | 181 | #define NOSAVE_DATA \ |
176 | . = ALIGN(PAGE_SIZE); \ | 182 | . = ALIGN(PAGE_SIZE); \ |
177 | VMLINUX_SYMBOL(__nosave_begin) = .; \ | 183 | VMLINUX_SYMBOL(__nosave_begin) = .; \ |
178 | *(.data.nosave) \ | 184 | *(.data..nosave) \ |
179 | . = ALIGN(PAGE_SIZE); \ | 185 | . = ALIGN(PAGE_SIZE); \ |
180 | VMLINUX_SYMBOL(__nosave_end) = .; | 186 | VMLINUX_SYMBOL(__nosave_end) = .; |
181 | 187 | ||
182 | #define PAGE_ALIGNED_DATA(page_align) \ | 188 | #define PAGE_ALIGNED_DATA(page_align) \ |
183 | . = ALIGN(page_align); \ | 189 | . = ALIGN(page_align); \ |
184 | *(.data.page_aligned) | 190 | *(.data..page_aligned) |
185 | 191 | ||
186 | #define READ_MOSTLY_DATA(align) \ | 192 | #define READ_MOSTLY_DATA(align) \ |
187 | . = ALIGN(align); \ | 193 | . = ALIGN(align); \ |
188 | *(.data.read_mostly) | 194 | *(.data..read_mostly) |
189 | 195 | ||
190 | #define CACHELINE_ALIGNED_DATA(align) \ | 196 | #define CACHELINE_ALIGNED_DATA(align) \ |
191 | . = ALIGN(align); \ | 197 | . = ALIGN(align); \ |
192 | *(.data.cacheline_aligned) | 198 | *(.data..cacheline_aligned) |
193 | 199 | ||
194 | #define INIT_TASK_DATA(align) \ | 200 | #define INIT_TASK_DATA(align) \ |
195 | . = ALIGN(align); \ | 201 | . = ALIGN(align); \ |
196 | *(.data.init_task) | 202 | *(.data..init_task) |
197 | 203 | ||
198 | /* | 204 | /* |
199 | * Read only Data | 205 | * Read only Data |
@@ -247,10 +253,10 @@ | |||
247 | } \ | 253 | } \ |
248 | \ | 254 | \ |
249 | /* RapidIO route ops */ \ | 255 | /* RapidIO route ops */ \ |
250 | .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \ | 256 | .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \ |
251 | VMLINUX_SYMBOL(__start_rio_route_ops) = .; \ | 257 | VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \ |
252 | *(.rio_route_ops) \ | 258 | *(.rio_switch_ops) \ |
253 | VMLINUX_SYMBOL(__end_rio_route_ops) = .; \ | 259 | VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \ |
254 | } \ | 260 | } \ |
255 | \ | 261 | \ |
256 | TRACEDATA \ | 262 | TRACEDATA \ |
@@ -435,7 +441,7 @@ | |||
435 | */ | 441 | */ |
436 | #define INIT_TASK_DATA_SECTION(align) \ | 442 | #define INIT_TASK_DATA_SECTION(align) \ |
437 | . = ALIGN(align); \ | 443 | . = ALIGN(align); \ |
438 | .data.init_task : { \ | 444 | .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ |
439 | INIT_TASK_DATA(align) \ | 445 | INIT_TASK_DATA(align) \ |
440 | } | 446 | } |
441 | 447 | ||
@@ -499,7 +505,7 @@ | |||
499 | #define BSS(bss_align) \ | 505 | #define BSS(bss_align) \ |
500 | . = ALIGN(bss_align); \ | 506 | . = ALIGN(bss_align); \ |
501 | .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ | 507 | .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ |
502 | *(.bss.page_aligned) \ | 508 | *(.bss..page_aligned) \ |
503 | *(.dynbss) \ | 509 | *(.dynbss) \ |
504 | *(.bss) \ | 510 | *(.bss) \ |
505 | *(COMMON) \ | 511 | *(COMMON) \ |
@@ -643,6 +649,7 @@ | |||
643 | EXIT_DATA \ | 649 | EXIT_DATA \ |
644 | EXIT_CALL \ | 650 | EXIT_CALL \ |
645 | *(.discard) \ | 651 | *(.discard) \ |
652 | *(.discard.*) \ | ||
646 | } | 653 | } |
647 | 654 | ||
648 | /** | 655 | /** |
@@ -666,16 +673,16 @@ | |||
666 | */ | 673 | */ |
667 | #define PERCPU_VADDR(vaddr, phdr) \ | 674 | #define PERCPU_VADDR(vaddr, phdr) \ |
668 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ | 675 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ |
669 | .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ | 676 | .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ |
670 | - LOAD_OFFSET) { \ | 677 | - LOAD_OFFSET) { \ |
671 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | 678 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ |
672 | *(.data.percpu.first) \ | 679 | *(.data..percpu..first) \ |
673 | *(.data.percpu.page_aligned) \ | 680 | *(.data..percpu..page_aligned) \ |
674 | *(.data.percpu) \ | 681 | *(.data..percpu) \ |
675 | *(.data.percpu.shared_aligned) \ | 682 | *(.data..percpu..shared_aligned) \ |
676 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ | 683 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ |
677 | } phdr \ | 684 | } phdr \ |
678 | . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu); | 685 | . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); |
679 | 686 | ||
680 | /** | 687 | /** |
681 | * PERCPU - define output section for percpu area, simple version | 688 | * PERCPU - define output section for percpu area, simple version |
@@ -687,18 +694,18 @@ | |||
687 | * | 694 | * |
688 | * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except | 695 | * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except |
689 | * that __per_cpu_load is defined as a relative symbol against | 696 | * that __per_cpu_load is defined as a relative symbol against |
690 | * .data.percpu which is required for relocatable x86_32 | 697 | * .data..percpu which is required for relocatable x86_32 |
691 | * configuration. | 698 | * configuration. |
692 | */ | 699 | */ |
693 | #define PERCPU(align) \ | 700 | #define PERCPU(align) \ |
694 | . = ALIGN(align); \ | 701 | . = ALIGN(align); \ |
695 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ | 702 | .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ |
696 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ | 703 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ |
697 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | 704 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ |
698 | *(.data.percpu.first) \ | 705 | *(.data..percpu..first) \ |
699 | *(.data.percpu.page_aligned) \ | 706 | *(.data..percpu..page_aligned) \ |
700 | *(.data.percpu) \ | 707 | *(.data..percpu) \ |
701 | *(.data.percpu.shared_aligned) \ | 708 | *(.data..percpu..shared_aligned) \ |
702 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ | 709 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ |
703 | } | 710 | } |
704 | 711 | ||