diff options
Diffstat (limited to 'include/asm-generic')
52 files changed, 917 insertions, 391 deletions
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h index 9d40e879f99e..77ff547730af 100644 --- a/include/asm-generic/4level-fixup.h +++ b/include/asm-generic/4level-fixup.h | |||
@@ -27,9 +27,9 @@ | |||
27 | #define pud_page_vaddr(pud) pgd_page_vaddr(pud) | 27 | #define pud_page_vaddr(pud) pgd_page_vaddr(pud) |
28 | 28 | ||
29 | #undef pud_free_tlb | 29 | #undef pud_free_tlb |
30 | #define pud_free_tlb(tlb, x) do { } while (0) | 30 | #define pud_free_tlb(tlb, x, addr) do { } while (0) |
31 | #define pud_free(mm, x) do { } while (0) | 31 | #define pud_free(mm, x) do { } while (0) |
32 | #define __pud_free_tlb(tlb, x) do { } while (0) | 32 | #define __pud_free_tlb(tlb, x, addr) do { } while (0) |
33 | 33 | ||
34 | #undef pud_addr_end | 34 | #undef pud_addr_end |
35 | #define pud_addr_end(addr, end) (end) | 35 | #define pud_addr_end(addr, end) (end) |
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index eb62334cda29..53f91b1ae53a 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild | |||
@@ -3,6 +3,8 @@ header-y += bitsperlong.h | |||
3 | header-y += errno-base.h | 3 | header-y += errno-base.h |
4 | header-y += errno.h | 4 | header-y += errno.h |
5 | header-y += fcntl.h | 5 | header-y += fcntl.h |
6 | header-y += int-l64.h | ||
7 | header-y += int-ll64.h | ||
6 | header-y += ioctl.h | 8 | header-y += ioctl.h |
7 | header-y += ioctls.h | 9 | header-y += ioctls.h |
8 | header-y += ipcbuf.h | 10 | header-y += ipcbuf.h |
@@ -12,10 +14,12 @@ header-y += msgbuf.h | |||
12 | header-y += param.h | 14 | header-y += param.h |
13 | header-y += poll.h | 15 | header-y += poll.h |
14 | header-y += posix_types.h | 16 | header-y += posix_types.h |
17 | header-y += resource.h | ||
15 | header-y += sembuf.h | 18 | header-y += sembuf.h |
16 | header-y += setup.h | 19 | header-y += setup.h |
17 | header-y += shmbuf.h | 20 | header-y += shmbuf.h |
18 | header-y += shmparam.h | 21 | header-y += shmparam.h |
22 | header-y += siginfo.h | ||
19 | header-y += signal-defs.h | 23 | header-y += signal-defs.h |
20 | header-y += signal.h | 24 | header-y += signal.h |
21 | header-y += socket.h | 25 | header-y += socket.h |
@@ -28,8 +32,3 @@ header-y += termios.h | |||
28 | header-y += types.h | 32 | header-y += types.h |
29 | header-y += ucontext.h | 33 | header-y += ucontext.h |
30 | header-y += unistd.h | 34 | header-y += unistd.h |
31 | |||
32 | unifdef-y += int-l64.h | ||
33 | unifdef-y += int-ll64.h | ||
34 | unifdef-y += resource.h | ||
35 | unifdef-y += siginfo.h | ||
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm index 290910e4ede4..c5d2e5dd871b 100644 --- a/include/asm-generic/Kbuild.asm +++ b/include/asm-generic/Kbuild.asm | |||
@@ -1,39 +1,45 @@ | |||
1 | ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm.h \ | 1 | ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm.h \ |
2 | $(srctree)/include/asm-$(SRCARCH)/kvm.h),) | 2 | $(srctree)/include/asm-$(SRCARCH)/kvm.h),) |
3 | header-y += kvm.h | 3 | header-y += kvm.h |
4 | endif | 4 | endif |
5 | 5 | ||
6 | ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm_para.h \ | ||
7 | $(srctree)/include/asm-$(SRCARCH)/kvm_para.h),) | ||
8 | header-y += kvm_para.h | ||
9 | endif | ||
10 | |||
6 | ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h \ | 11 | ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h \ |
7 | $(srctree)/include/asm-$(SRCARCH)/a.out.h),) | 12 | $(srctree)/include/asm-$(SRCARCH)/a.out.h),) |
8 | unifdef-y += a.out.h | 13 | header-y += a.out.h |
9 | endif | 14 | endif |
10 | unifdef-y += auxvec.h | 15 | |
11 | unifdef-y += byteorder.h | 16 | header-y += auxvec.h |
12 | unifdef-y += bitsperlong.h | 17 | header-y += bitsperlong.h |
13 | unifdef-y += errno.h | 18 | header-y += byteorder.h |
14 | unifdef-y += fcntl.h | 19 | header-y += errno.h |
15 | unifdef-y += ioctl.h | 20 | header-y += fcntl.h |
16 | unifdef-y += ioctls.h | 21 | header-y += ioctl.h |
17 | unifdef-y += ipcbuf.h | 22 | header-y += ioctls.h |
18 | unifdef-y += mman.h | 23 | header-y += ipcbuf.h |
19 | unifdef-y += msgbuf.h | 24 | header-y += mman.h |
20 | unifdef-y += param.h | 25 | header-y += msgbuf.h |
21 | unifdef-y += poll.h | 26 | header-y += param.h |
22 | unifdef-y += posix_types.h | 27 | header-y += poll.h |
23 | unifdef-y += ptrace.h | 28 | header-y += posix_types.h |
24 | unifdef-y += resource.h | 29 | header-y += ptrace.h |
25 | unifdef-y += sembuf.h | 30 | header-y += resource.h |
26 | unifdef-y += setup.h | 31 | header-y += sembuf.h |
27 | unifdef-y += shmbuf.h | 32 | header-y += setup.h |
28 | unifdef-y += sigcontext.h | 33 | header-y += shmbuf.h |
29 | unifdef-y += siginfo.h | 34 | header-y += sigcontext.h |
30 | unifdef-y += signal.h | 35 | header-y += siginfo.h |
31 | unifdef-y += socket.h | 36 | header-y += signal.h |
32 | unifdef-y += sockios.h | 37 | header-y += socket.h |
33 | unifdef-y += stat.h | 38 | header-y += sockios.h |
34 | unifdef-y += statfs.h | 39 | header-y += stat.h |
35 | unifdef-y += swab.h | 40 | header-y += statfs.h |
36 | unifdef-y += termbits.h | 41 | header-y += swab.h |
37 | unifdef-y += termios.h | 42 | header-y += termbits.h |
38 | unifdef-y += types.h | 43 | header-y += termios.h |
39 | unifdef-y += unistd.h | 44 | header-y += types.h |
45 | header-y += unistd.h | ||
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index c99c64dc5f3d..e994197f84b7 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h | |||
@@ -30,21 +30,20 @@ | |||
30 | * atomic_read - read atomic variable | 30 | * atomic_read - read atomic variable |
31 | * @v: pointer of type atomic_t | 31 | * @v: pointer of type atomic_t |
32 | * | 32 | * |
33 | * Atomically reads the value of @v. Note that the guaranteed | 33 | * Atomically reads the value of @v. |
34 | * useful range of an atomic_t is only 24 bits. | ||
35 | */ | 34 | */ |
36 | #define atomic_read(v) ((v)->counter) | 35 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
37 | 36 | ||
38 | /** | 37 | /** |
39 | * atomic_set - set atomic variable | 38 | * atomic_set - set atomic variable |
40 | * @v: pointer of type atomic_t | 39 | * @v: pointer of type atomic_t |
41 | * @i: required value | 40 | * @i: required value |
42 | * | 41 | * |
43 | * Atomically sets the value of @v to @i. Note that the guaranteed | 42 | * Atomically sets the value of @v to @i. |
44 | * useful range of an atomic_t is only 24 bits. | ||
45 | */ | 43 | */ |
46 | #define atomic_set(v, i) (((v)->counter) = (i)) | 44 | #define atomic_set(v, i) (((v)->counter) = (i)) |
47 | 45 | ||
46 | #include <linux/irqflags.h> | ||
48 | #include <asm/system.h> | 47 | #include <asm/system.h> |
49 | 48 | ||
50 | /** | 49 | /** |
@@ -53,18 +52,17 @@ | |||
53 | * @v: pointer of type atomic_t | 52 | * @v: pointer of type atomic_t |
54 | * | 53 | * |
55 | * Atomically adds @i to @v and returns the result | 54 | * Atomically adds @i to @v and returns the result |
56 | * Note that the guaranteed useful range of an atomic_t is only 24 bits. | ||
57 | */ | 55 | */ |
58 | static inline int atomic_add_return(int i, atomic_t *v) | 56 | static inline int atomic_add_return(int i, atomic_t *v) |
59 | { | 57 | { |
60 | unsigned long flags; | 58 | unsigned long flags; |
61 | int temp; | 59 | int temp; |
62 | 60 | ||
63 | local_irq_save(flags); | 61 | raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ |
64 | temp = v->counter; | 62 | temp = v->counter; |
65 | temp += i; | 63 | temp += i; |
66 | v->counter = temp; | 64 | v->counter = temp; |
67 | local_irq_restore(flags); | 65 | raw_local_irq_restore(flags); |
68 | 66 | ||
69 | return temp; | 67 | return temp; |
70 | } | 68 | } |
@@ -75,18 +73,17 @@ static inline int atomic_add_return(int i, atomic_t *v) | |||
75 | * @v: pointer of type atomic_t | 73 | * @v: pointer of type atomic_t |
76 | * | 74 | * |
77 | * Atomically subtracts @i from @v and returns the result | 75 | * Atomically subtracts @i from @v and returns the result |
78 | * Note that the guaranteed useful range of an atomic_t is only 24 bits. | ||
79 | */ | 76 | */ |
80 | static inline int atomic_sub_return(int i, atomic_t *v) | 77 | static inline int atomic_sub_return(int i, atomic_t *v) |
81 | { | 78 | { |
82 | unsigned long flags; | 79 | unsigned long flags; |
83 | int temp; | 80 | int temp; |
84 | 81 | ||
85 | local_irq_save(flags); | 82 | raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ |
86 | temp = v->counter; | 83 | temp = v->counter; |
87 | temp -= i; | 84 | temp -= i; |
88 | v->counter = temp; | 85 | v->counter = temp; |
89 | local_irq_restore(flags); | 86 | raw_local_irq_restore(flags); |
90 | 87 | ||
91 | return temp; | 88 | return temp; |
92 | } | 89 | } |
@@ -123,14 +120,23 @@ static inline void atomic_dec(atomic_t *v) | |||
123 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) | 120 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) |
124 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) | 121 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) |
125 | 122 | ||
126 | #define atomic_add_unless(v, a, u) \ | 123 | #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) |
127 | ({ \ | 124 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) |
128 | int c, old; \ | 125 | |
129 | c = atomic_read(v); \ | 126 | #define cmpxchg_local(ptr, o, n) \ |
130 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | 127 | ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ |
131 | c = old; \ | 128 | (unsigned long)(n), sizeof(*(ptr)))) |
132 | c != (u); \ | 129 | |
133 | }) | 130 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) |
131 | |||
132 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | ||
133 | { | ||
134 | int c, old; | ||
135 | c = atomic_read(v); | ||
136 | while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) | ||
137 | c = old; | ||
138 | return c != u; | ||
139 | } | ||
134 | 140 | ||
135 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 141 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
136 | 142 | ||
@@ -139,20 +145,11 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | |||
139 | unsigned long flags; | 145 | unsigned long flags; |
140 | 146 | ||
141 | mask = ~mask; | 147 | mask = ~mask; |
142 | local_irq_save(flags); | 148 | raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ |
143 | *addr &= mask; | 149 | *addr &= mask; |
144 | local_irq_restore(flags); | 150 | raw_local_irq_restore(flags); |
145 | } | 151 | } |
146 | 152 | ||
147 | #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) | ||
148 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) | ||
149 | |||
150 | #define cmpxchg_local(ptr, o, n) \ | ||
151 | ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ | ||
152 | (unsigned long)(n), sizeof(*(ptr)))) | ||
153 | |||
154 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | ||
155 | |||
156 | /* Assume that atomic operations are already serializing */ | 153 | /* Assume that atomic operations are already serializing */ |
157 | #define smp_mb__before_atomic_dec() barrier() | 154 | #define smp_mb__before_atomic_dec() barrier() |
158 | #define smp_mb__after_atomic_dec() barrier() | 155 | #define smp_mb__after_atomic_dec() barrier() |
diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h index 50764550a60c..bcbab3e4a3be 100644 --- a/include/asm-generic/audit_change_attr.h +++ b/include/asm-generic/audit_change_attr.h | |||
@@ -20,3 +20,7 @@ __NR_chown32, | |||
20 | __NR_fchown32, | 20 | __NR_fchown32, |
21 | __NR_lchown32, | 21 | __NR_lchown32, |
22 | #endif | 22 | #endif |
23 | __NR_link, | ||
24 | #ifdef __NR_linkat | ||
25 | __NR_linkat, | ||
26 | #endif | ||
diff --git a/include/asm-generic/bitops/arch_hweight.h b/include/asm-generic/bitops/arch_hweight.h new file mode 100644 index 000000000000..6a211f40665c --- /dev/null +++ b/include/asm-generic/bitops/arch_hweight.h | |||
@@ -0,0 +1,25 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ | ||
2 | #define _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | |||
6 | static inline unsigned int __arch_hweight32(unsigned int w) | ||
7 | { | ||
8 | return __sw_hweight32(w); | ||
9 | } | ||
10 | |||
11 | static inline unsigned int __arch_hweight16(unsigned int w) | ||
12 | { | ||
13 | return __sw_hweight16(w); | ||
14 | } | ||
15 | |||
16 | static inline unsigned int __arch_hweight8(unsigned int w) | ||
17 | { | ||
18 | return __sw_hweight8(w); | ||
19 | } | ||
20 | |||
21 | static inline unsigned long __arch_hweight64(__u64 w) | ||
22 | { | ||
23 | return __sw_hweight64(w); | ||
24 | } | ||
25 | #endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ | ||
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index c8946465e63a..ecc44a8e2b44 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h | |||
@@ -15,19 +15,19 @@ | |||
15 | # define ATOMIC_HASH_SIZE 4 | 15 | # define ATOMIC_HASH_SIZE 4 |
16 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) | 16 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) |
17 | 17 | ||
18 | extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | 18 | extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; |
19 | 19 | ||
20 | /* Can't use raw_spin_lock_irq because of #include problems, so | 20 | /* Can't use raw_spin_lock_irq because of #include problems, so |
21 | * this is the substitute */ | 21 | * this is the substitute */ |
22 | #define _atomic_spin_lock_irqsave(l,f) do { \ | 22 | #define _atomic_spin_lock_irqsave(l,f) do { \ |
23 | raw_spinlock_t *s = ATOMIC_HASH(l); \ | 23 | arch_spinlock_t *s = ATOMIC_HASH(l); \ |
24 | local_irq_save(f); \ | 24 | local_irq_save(f); \ |
25 | __raw_spin_lock(s); \ | 25 | arch_spin_lock(s); \ |
26 | } while(0) | 26 | } while(0) |
27 | 27 | ||
28 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | 28 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ |
29 | raw_spinlock_t *s = ATOMIC_HASH(l); \ | 29 | arch_spinlock_t *s = ATOMIC_HASH(l); \ |
30 | __raw_spin_unlock(s); \ | 30 | arch_spin_unlock(s); \ |
31 | local_irq_restore(f); \ | 31 | local_irq_restore(f); \ |
32 | } while(0) | 32 | } while(0) |
33 | 33 | ||
diff --git a/include/asm-generic/bitops/const_hweight.h b/include/asm-generic/bitops/const_hweight.h new file mode 100644 index 000000000000..fa2a50b7ee66 --- /dev/null +++ b/include/asm-generic/bitops/const_hweight.h | |||
@@ -0,0 +1,42 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ | ||
2 | #define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ | ||
3 | |||
4 | /* | ||
5 | * Compile time versions of __arch_hweightN() | ||
6 | */ | ||
7 | #define __const_hweight8(w) \ | ||
8 | ( (!!((w) & (1ULL << 0))) + \ | ||
9 | (!!((w) & (1ULL << 1))) + \ | ||
10 | (!!((w) & (1ULL << 2))) + \ | ||
11 | (!!((w) & (1ULL << 3))) + \ | ||
12 | (!!((w) & (1ULL << 4))) + \ | ||
13 | (!!((w) & (1ULL << 5))) + \ | ||
14 | (!!((w) & (1ULL << 6))) + \ | ||
15 | (!!((w) & (1ULL << 7))) ) | ||
16 | |||
17 | #define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 )) | ||
18 | #define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16)) | ||
19 | #define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32)) | ||
20 | |||
21 | /* | ||
22 | * Generic interface. | ||
23 | */ | ||
24 | #define hweight8(w) (__builtin_constant_p(w) ? __const_hweight8(w) : __arch_hweight8(w)) | ||
25 | #define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w)) | ||
26 | #define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w)) | ||
27 | #define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w)) | ||
28 | |||
29 | /* | ||
30 | * Interface for known constant arguments | ||
31 | */ | ||
32 | #define HWEIGHT8(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w)) | ||
33 | #define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w)) | ||
34 | #define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w)) | ||
35 | #define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w)) | ||
36 | |||
37 | /* | ||
38 | * Type invariant interface to the compile time constant hweight functions. | ||
39 | */ | ||
40 | #define HWEIGHT(w) HWEIGHT64((u64)w) | ||
41 | |||
42 | #endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */ | ||
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h index 1914e9742512..110fa700f853 100644 --- a/include/asm-generic/bitops/find.h +++ b/include/asm-generic/bitops/find.h | |||
@@ -1,15 +1,50 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_FIND_H_ | 1 | #ifndef _ASM_GENERIC_BITOPS_FIND_H_ |
2 | #define _ASM_GENERIC_BITOPS_FIND_H_ | 2 | #define _ASM_GENERIC_BITOPS_FIND_H_ |
3 | 3 | ||
4 | #ifndef CONFIG_GENERIC_FIND_NEXT_BIT | 4 | /** |
5 | * find_next_bit - find the next set bit in a memory region | ||
6 | * @addr: The address to base the search on | ||
7 | * @offset: The bitnumber to start searching at | ||
8 | * @size: The bitmap size in bits | ||
9 | */ | ||
5 | extern unsigned long find_next_bit(const unsigned long *addr, unsigned long | 10 | extern unsigned long find_next_bit(const unsigned long *addr, unsigned long |
6 | size, unsigned long offset); | 11 | size, unsigned long offset); |
7 | 12 | ||
13 | /** | ||
14 | * find_next_zero_bit - find the next cleared bit in a memory region | ||
15 | * @addr: The address to base the search on | ||
16 | * @offset: The bitnumber to start searching at | ||
17 | * @size: The bitmap size in bits | ||
18 | */ | ||
8 | extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned | 19 | extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned |
9 | long size, unsigned long offset); | 20 | long size, unsigned long offset); |
10 | #endif | 21 | |
22 | #ifdef CONFIG_GENERIC_FIND_FIRST_BIT | ||
23 | |||
24 | /** | ||
25 | * find_first_bit - find the first set bit in a memory region | ||
26 | * @addr: The address to start the search at | ||
27 | * @size: The maximum size to search | ||
28 | * | ||
29 | * Returns the bit number of the first set bit. | ||
30 | */ | ||
31 | extern unsigned long find_first_bit(const unsigned long *addr, | ||
32 | unsigned long size); | ||
33 | |||
34 | /** | ||
35 | * find_first_zero_bit - find the first cleared bit in a memory region | ||
36 | * @addr: The address to start the search at | ||
37 | * @size: The maximum size to search | ||
38 | * | ||
39 | * Returns the bit number of the first cleared bit. | ||
40 | */ | ||
41 | extern unsigned long find_first_zero_bit(const unsigned long *addr, | ||
42 | unsigned long size); | ||
43 | #else /* CONFIG_GENERIC_FIND_FIRST_BIT */ | ||
11 | 44 | ||
12 | #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) | 45 | #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) |
13 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) | 46 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) |
14 | 47 | ||
48 | #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ | ||
49 | |||
15 | #endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ | 50 | #endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ |
diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h index fbbc383771da..a94d6519c7ed 100644 --- a/include/asm-generic/bitops/hweight.h +++ b/include/asm-generic/bitops/hweight.h | |||
@@ -1,11 +1,7 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ | 1 | #ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ |
2 | #define _ASM_GENERIC_BITOPS_HWEIGHT_H_ | 2 | #define _ASM_GENERIC_BITOPS_HWEIGHT_H_ |
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <asm-generic/bitops/arch_hweight.h> |
5 | 5 | #include <asm-generic/bitops/const_hweight.h> | |
6 | extern unsigned int hweight32(unsigned int w); | ||
7 | extern unsigned int hweight16(unsigned int w); | ||
8 | extern unsigned int hweight8(unsigned int w); | ||
9 | extern unsigned long hweight64(__u64 w); | ||
10 | 6 | ||
11 | #endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ | 7 | #endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ |
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 4b6755984d24..c2c9ba032d46 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h | |||
@@ -25,7 +25,10 @@ struct bug_entry { | |||
25 | }; | 25 | }; |
26 | #endif /* __ASSEMBLY__ */ | 26 | #endif /* __ASSEMBLY__ */ |
27 | 27 | ||
28 | #define BUGFLAG_WARNING (1<<0) | 28 | #define BUGFLAG_WARNING (1 << 0) |
29 | #define BUGFLAG_TAINT(taint) (BUGFLAG_WARNING | ((taint) << 8)) | ||
30 | #define BUG_GET_TAINT(bug) ((bug)->flags >> 8) | ||
31 | |||
29 | #endif /* CONFIG_GENERIC_BUG */ | 32 | #endif /* CONFIG_GENERIC_BUG */ |
30 | 33 | ||
31 | /* | 34 | /* |
@@ -56,17 +59,25 @@ struct bug_entry { | |||
56 | * appear at runtime. Use the versions with printk format strings | 59 | * appear at runtime. Use the versions with printk format strings |
57 | * to provide better diagnostics. | 60 | * to provide better diagnostics. |
58 | */ | 61 | */ |
59 | #ifndef __WARN | 62 | #ifndef __WARN_TAINT |
60 | #ifndef __ASSEMBLY__ | 63 | #ifndef __ASSEMBLY__ |
61 | extern void warn_slowpath_fmt(const char *file, const int line, | 64 | extern void warn_slowpath_fmt(const char *file, const int line, |
62 | const char *fmt, ...) __attribute__((format(printf, 3, 4))); | 65 | const char *fmt, ...) __attribute__((format(printf, 3, 4))); |
66 | extern void warn_slowpath_fmt_taint(const char *file, const int line, | ||
67 | unsigned taint, const char *fmt, ...) | ||
68 | __attribute__((format(printf, 4, 5))); | ||
63 | extern void warn_slowpath_null(const char *file, const int line); | 69 | extern void warn_slowpath_null(const char *file, const int line); |
64 | #define WANT_WARN_ON_SLOWPATH | 70 | #define WANT_WARN_ON_SLOWPATH |
65 | #endif | 71 | #endif |
66 | #define __WARN() warn_slowpath_null(__FILE__, __LINE__) | 72 | #define __WARN() warn_slowpath_null(__FILE__, __LINE__) |
67 | #define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg) | 73 | #define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg) |
74 | #define __WARN_printf_taint(taint, arg...) \ | ||
75 | warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg) | ||
68 | #else | 76 | #else |
77 | #define __WARN() __WARN_TAINT(TAINT_WARN) | ||
69 | #define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0) | 78 | #define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0) |
79 | #define __WARN_printf_taint(taint, arg...) \ | ||
80 | do { printk(arg); __WARN_TAINT(taint); } while (0) | ||
70 | #endif | 81 | #endif |
71 | 82 | ||
72 | #ifndef WARN_ON | 83 | #ifndef WARN_ON |
@@ -87,6 +98,13 @@ extern void warn_slowpath_null(const char *file, const int line); | |||
87 | }) | 98 | }) |
88 | #endif | 99 | #endif |
89 | 100 | ||
101 | #define WARN_TAINT(condition, taint, format...) ({ \ | ||
102 | int __ret_warn_on = !!(condition); \ | ||
103 | if (unlikely(__ret_warn_on)) \ | ||
104 | __WARN_printf_taint(taint, format); \ | ||
105 | unlikely(__ret_warn_on); \ | ||
106 | }) | ||
107 | |||
90 | #else /* !CONFIG_BUG */ | 108 | #else /* !CONFIG_BUG */ |
91 | #ifndef HAVE_ARCH_BUG | 109 | #ifndef HAVE_ARCH_BUG |
92 | #define BUG() do {} while(0) | 110 | #define BUG() do {} while(0) |
@@ -110,25 +128,37 @@ extern void warn_slowpath_null(const char *file, const int line); | |||
110 | }) | 128 | }) |
111 | #endif | 129 | #endif |
112 | 130 | ||
131 | #define WARN_TAINT(condition, taint, format...) WARN_ON(condition) | ||
132 | |||
113 | #endif | 133 | #endif |
114 | 134 | ||
115 | #define WARN_ON_ONCE(condition) ({ \ | 135 | #define WARN_ON_ONCE(condition) ({ \ |
116 | static int __warned; \ | 136 | static bool __warned; \ |
117 | int __ret_warn_once = !!(condition); \ | 137 | int __ret_warn_once = !!(condition); \ |
118 | \ | 138 | \ |
119 | if (unlikely(__ret_warn_once)) \ | 139 | if (unlikely(__ret_warn_once)) \ |
120 | if (WARN_ON(!__warned)) \ | 140 | if (WARN_ON(!__warned)) \ |
121 | __warned = 1; \ | 141 | __warned = true; \ |
122 | unlikely(__ret_warn_once); \ | 142 | unlikely(__ret_warn_once); \ |
123 | }) | 143 | }) |
124 | 144 | ||
125 | #define WARN_ONCE(condition, format...) ({ \ | 145 | #define WARN_ONCE(condition, format...) ({ \ |
126 | static int __warned; \ | 146 | static bool __warned; \ |
127 | int __ret_warn_once = !!(condition); \ | 147 | int __ret_warn_once = !!(condition); \ |
128 | \ | 148 | \ |
129 | if (unlikely(__ret_warn_once)) \ | 149 | if (unlikely(__ret_warn_once)) \ |
130 | if (WARN(!__warned, format)) \ | 150 | if (WARN(!__warned, format)) \ |
131 | __warned = 1; \ | 151 | __warned = true; \ |
152 | unlikely(__ret_warn_once); \ | ||
153 | }) | ||
154 | |||
155 | #define WARN_TAINT_ONCE(condition, taint, format...) ({ \ | ||
156 | static bool __warned; \ | ||
157 | int __ret_warn_once = !!(condition); \ | ||
158 | \ | ||
159 | if (unlikely(__ret_warn_once)) \ | ||
160 | if (WARN_TAINT(!__warned, taint, format)) \ | ||
161 | __warned = true; \ | ||
132 | unlikely(__ret_warn_once); \ | 162 | unlikely(__ret_warn_once); \ |
133 | }) | 163 | }) |
134 | 164 | ||
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index ba4ec39a1131..57b5c3c82e86 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #define flush_cache_dup_mm(mm) do { } while (0) | 13 | #define flush_cache_dup_mm(mm) do { } while (0) |
14 | #define flush_cache_range(vma, start, end) do { } while (0) | 14 | #define flush_cache_range(vma, start, end) do { } while (0) |
15 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | 15 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
16 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
16 | #define flush_dcache_page(page) do { } while (0) | 17 | #define flush_dcache_page(page) do { } while (0) |
17 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 18 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
18 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 19 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h index b2ba2fc8829a..2533fddd34a6 100644 --- a/include/asm-generic/cmpxchg-local.h +++ b/include/asm-generic/cmpxchg-local.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define __ASM_GENERIC_CMPXCHG_LOCAL_H | 2 | #define __ASM_GENERIC_CMPXCHG_LOCAL_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/irqflags.h> | ||
5 | 6 | ||
6 | extern unsigned long wrong_size_cmpxchg(volatile void *ptr); | 7 | extern unsigned long wrong_size_cmpxchg(volatile void *ptr); |
7 | 8 | ||
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h index 1c1fa422d18a..2bcc5c7c22a6 100644 --- a/include/asm-generic/cputime.h +++ b/include/asm-generic/cputime.h | |||
@@ -7,6 +7,7 @@ | |||
7 | typedef unsigned long cputime_t; | 7 | typedef unsigned long cputime_t; |
8 | 8 | ||
9 | #define cputime_zero (0UL) | 9 | #define cputime_zero (0UL) |
10 | #define cputime_one_jiffy jiffies_to_cputime(1) | ||
10 | #define cputime_max ((~0UL >> 1) - 1) | 11 | #define cputime_max ((~0UL >> 1) - 1) |
11 | #define cputime_add(__a, __b) ((__a) + (__b)) | 12 | #define cputime_add(__a, __b) ((__a) + (__b)) |
12 | #define cputime_sub(__a, __b) ((__a) - (__b)) | 13 | #define cputime_sub(__a, __b) ((__a) - (__b)) |
@@ -32,10 +33,10 @@ typedef u64 cputime64_t; | |||
32 | 33 | ||
33 | 34 | ||
34 | /* | 35 | /* |
35 | * Convert cputime to milliseconds and back. | 36 | * Convert cputime to microseconds and back. |
36 | */ | 37 | */ |
37 | #define cputime_to_msecs(__ct) jiffies_to_msecs(__ct) | 38 | #define cputime_to_usecs(__ct) jiffies_to_usecs(__ct); |
38 | #define msecs_to_cputime(__msecs) msecs_to_jiffies(__msecs) | 39 | #define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs); |
39 | 40 | ||
40 | /* | 41 | /* |
41 | * Convert cputime to seconds and back. | 42 | * Convert cputime to seconds and back. |
diff --git a/include/asm-generic/device.h b/include/asm-generic/device.h index c17c9600f220..d7c76bba640d 100644 --- a/include/asm-generic/device.h +++ b/include/asm-generic/device.h | |||
@@ -9,4 +9,7 @@ | |||
9 | struct dev_archdata { | 9 | struct dev_archdata { |
10 | }; | 10 | }; |
11 | 11 | ||
12 | struct pdev_archdata { | ||
13 | }; | ||
14 | |||
12 | #endif /* _ASM_GENERIC_DEVICE_H */ | 15 | #endif /* _ASM_GENERIC_DEVICE_H */ |
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h index 82cd0cb1c3fe..ccf7b4f34a3c 100644 --- a/include/asm-generic/dma-mapping-broken.h +++ b/include/asm-generic/dma-mapping-broken.h | |||
@@ -72,9 +72,6 @@ dma_set_mask(struct device *dev, u64 mask); | |||
72 | extern int | 72 | extern int |
73 | dma_get_cache_alignment(void); | 73 | dma_get_cache_alignment(void); |
74 | 74 | ||
75 | extern int | ||
76 | dma_is_consistent(struct device *dev, dma_addr_t dma_handle); | ||
77 | |||
78 | extern void | 75 | extern void |
79 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 76 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
80 | enum dma_data_direction direction); | 77 | enum dma_data_direction direction); |
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index 5406a601185c..0c80bb38773f 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h | |||
@@ -103,7 +103,6 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, | |||
103 | if (ops->sync_single_for_cpu) | 103 | if (ops->sync_single_for_cpu) |
104 | ops->sync_single_for_cpu(dev, addr, size, dir); | 104 | ops->sync_single_for_cpu(dev, addr, size, dir); |
105 | debug_dma_sync_single_for_cpu(dev, addr, size, dir); | 105 | debug_dma_sync_single_for_cpu(dev, addr, size, dir); |
106 | flush_write_buffers(); | ||
107 | } | 106 | } |
108 | 107 | ||
109 | static inline void dma_sync_single_for_device(struct device *dev, | 108 | static inline void dma_sync_single_for_device(struct device *dev, |
@@ -116,7 +115,6 @@ static inline void dma_sync_single_for_device(struct device *dev, | |||
116 | if (ops->sync_single_for_device) | 115 | if (ops->sync_single_for_device) |
117 | ops->sync_single_for_device(dev, addr, size, dir); | 116 | ops->sync_single_for_device(dev, addr, size, dir); |
118 | debug_dma_sync_single_for_device(dev, addr, size, dir); | 117 | debug_dma_sync_single_for_device(dev, addr, size, dir); |
119 | flush_write_buffers(); | ||
120 | } | 118 | } |
121 | 119 | ||
122 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | 120 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
@@ -125,16 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, | |||
125 | size_t size, | 123 | size_t size, |
126 | enum dma_data_direction dir) | 124 | enum dma_data_direction dir) |
127 | { | 125 | { |
128 | struct dma_map_ops *ops = get_dma_ops(dev); | 126 | dma_sync_single_for_cpu(dev, addr + offset, size, dir); |
129 | |||
130 | BUG_ON(!valid_dma_direction(dir)); | ||
131 | if (ops->sync_single_range_for_cpu) { | ||
132 | ops->sync_single_range_for_cpu(dev, addr, offset, size, dir); | ||
133 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); | ||
134 | |||
135 | flush_write_buffers(); | ||
136 | } else | ||
137 | dma_sync_single_for_cpu(dev, addr, size, dir); | ||
138 | } | 127 | } |
139 | 128 | ||
140 | static inline void dma_sync_single_range_for_device(struct device *dev, | 129 | static inline void dma_sync_single_range_for_device(struct device *dev, |
@@ -143,16 +132,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, | |||
143 | size_t size, | 132 | size_t size, |
144 | enum dma_data_direction dir) | 133 | enum dma_data_direction dir) |
145 | { | 134 | { |
146 | struct dma_map_ops *ops = get_dma_ops(dev); | 135 | dma_sync_single_for_device(dev, addr + offset, size, dir); |
147 | |||
148 | BUG_ON(!valid_dma_direction(dir)); | ||
149 | if (ops->sync_single_range_for_device) { | ||
150 | ops->sync_single_range_for_device(dev, addr, offset, size, dir); | ||
151 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); | ||
152 | |||
153 | flush_write_buffers(); | ||
154 | } else | ||
155 | dma_sync_single_for_device(dev, addr, size, dir); | ||
156 | } | 136 | } |
157 | 137 | ||
158 | static inline void | 138 | static inline void |
@@ -165,7 +145,6 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
165 | if (ops->sync_sg_for_cpu) | 145 | if (ops->sync_sg_for_cpu) |
166 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); | 146 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); |
167 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); | 147 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); |
168 | flush_write_buffers(); | ||
169 | } | 148 | } |
170 | 149 | ||
171 | static inline void | 150 | static inline void |
@@ -179,7 +158,6 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
179 | ops->sync_sg_for_device(dev, sg, nelems, dir); | 158 | ops->sync_sg_for_device(dev, sg, nelems, dir); |
180 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); | 159 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); |
181 | 160 | ||
182 | flush_write_buffers(); | ||
183 | } | 161 | } |
184 | 162 | ||
185 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) | 163 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) |
diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h index 4d3e48373e74..0fc16e3f0bfc 100644 --- a/include/asm-generic/fcntl.h +++ b/include/asm-generic/fcntl.h | |||
@@ -3,8 +3,18 @@ | |||
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | 5 | ||
6 | /* open/fcntl - O_SYNC is only implemented on blocks devices and on files | 6 | /* |
7 | located on an ext2 file system */ | 7 | * FMODE_EXEC is 0x20 |
8 | * FMODE_NONOTIFY is 0x1000000 | ||
9 | * These cannot be used by userspace O_* until internal and external open | ||
10 | * flags are split. | ||
11 | * -Eric Paris | ||
12 | */ | ||
13 | |||
14 | /* | ||
15 | * When introducing new O_* bits, please check its uniqueness in fcntl_init(). | ||
16 | */ | ||
17 | |||
8 | #define O_ACCMODE 00000003 | 18 | #define O_ACCMODE 00000003 |
9 | #define O_RDONLY 00000000 | 19 | #define O_RDONLY 00000000 |
10 | #define O_WRONLY 00000001 | 20 | #define O_WRONLY 00000001 |
@@ -27,8 +37,8 @@ | |||
27 | #ifndef O_NONBLOCK | 37 | #ifndef O_NONBLOCK |
28 | #define O_NONBLOCK 00004000 | 38 | #define O_NONBLOCK 00004000 |
29 | #endif | 39 | #endif |
30 | #ifndef O_SYNC | 40 | #ifndef O_DSYNC |
31 | #define O_SYNC 00010000 | 41 | #define O_DSYNC 00010000 /* used to be O_SYNC, see below */ |
32 | #endif | 42 | #endif |
33 | #ifndef FASYNC | 43 | #ifndef FASYNC |
34 | #define FASYNC 00020000 /* fcntl, for BSD compatibility */ | 44 | #define FASYNC 00020000 /* fcntl, for BSD compatibility */ |
@@ -51,6 +61,25 @@ | |||
51 | #ifndef O_CLOEXEC | 61 | #ifndef O_CLOEXEC |
52 | #define O_CLOEXEC 02000000 /* set close_on_exec */ | 62 | #define O_CLOEXEC 02000000 /* set close_on_exec */ |
53 | #endif | 63 | #endif |
64 | |||
65 | /* | ||
66 | * Before Linux 2.6.33 only O_DSYNC semantics were implemented, but using | ||
67 | * the O_SYNC flag. We continue to use the existing numerical value | ||
68 | * for O_DSYNC semantics now, but using the correct symbolic name for it. | ||
69 | * This new value is used to request true Posix O_SYNC semantics. It is | ||
70 | * defined in this strange way to make sure applications compiled against | ||
71 | * new headers get at least O_DSYNC semantics on older kernels. | ||
72 | * | ||
73 | * This has the nice side-effect that we can simply test for O_DSYNC | ||
74 | * wherever we do not care if O_DSYNC or O_SYNC is used. | ||
75 | * | ||
76 | * Note: __O_SYNC must never be used directly. | ||
77 | */ | ||
78 | #ifndef O_SYNC | ||
79 | #define __O_SYNC 04000000 | ||
80 | #define O_SYNC (__O_SYNC|O_DSYNC) | ||
81 | #endif | ||
82 | |||
54 | #ifndef O_NDELAY | 83 | #ifndef O_NDELAY |
55 | #define O_NDELAY O_NONBLOCK | 84 | #define O_NDELAY O_NONBLOCK |
56 | #endif | 85 | #endif |
@@ -74,6 +103,28 @@ | |||
74 | #define F_GETSIG 11 /* for sockets. */ | 103 | #define F_GETSIG 11 /* for sockets. */ |
75 | #endif | 104 | #endif |
76 | 105 | ||
106 | #ifndef CONFIG_64BIT | ||
107 | #ifndef F_GETLK64 | ||
108 | #define F_GETLK64 12 /* using 'struct flock64' */ | ||
109 | #define F_SETLK64 13 | ||
110 | #define F_SETLKW64 14 | ||
111 | #endif | ||
112 | #endif | ||
113 | |||
114 | #ifndef F_SETOWN_EX | ||
115 | #define F_SETOWN_EX 15 | ||
116 | #define F_GETOWN_EX 16 | ||
117 | #endif | ||
118 | |||
119 | #define F_OWNER_TID 0 | ||
120 | #define F_OWNER_PID 1 | ||
121 | #define F_OWNER_PGRP 2 | ||
122 | |||
123 | struct f_owner_ex { | ||
124 | int type; | ||
125 | __kernel_pid_t pid; | ||
126 | }; | ||
127 | |||
77 | /* for F_[GET|SET]FL */ | 128 | /* for F_[GET|SET]FL */ |
78 | #define FD_CLOEXEC 1 /* actually anything with low bit set goes */ | 129 | #define FD_CLOEXEC 1 /* actually anything with low bit set goes */ |
79 | 130 | ||
@@ -126,12 +177,6 @@ struct flock { | |||
126 | 177 | ||
127 | #ifndef CONFIG_64BIT | 178 | #ifndef CONFIG_64BIT |
128 | 179 | ||
129 | #ifndef F_GETLK64 | ||
130 | #define F_GETLK64 12 /* using 'struct flock64' */ | ||
131 | #define F_SETLK64 13 | ||
132 | #define F_SETLKW64 14 | ||
133 | #endif | ||
134 | |||
135 | #ifndef HAVE_ARCH_STRUCT_FLOCK64 | 180 | #ifndef HAVE_ARCH_STRUCT_FLOCK64 |
136 | #ifndef __ARCH_FLOCK64_PAD | 181 | #ifndef __ARCH_FLOCK64_PAD |
137 | #define __ARCH_FLOCK64_PAD | 182 | #define __ARCH_FLOCK64_PAD |
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index d6c379dc64fa..ff5c66080c8c 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _ASM_GENERIC_GPIO_H | 1 | #ifndef _ASM_GENERIC_GPIO_H |
2 | #define _ASM_GENERIC_GPIO_H | 2 | #define _ASM_GENERIC_GPIO_H |
3 | 3 | ||
4 | #include <linux/kernel.h> | ||
4 | #include <linux/types.h> | 5 | #include <linux/types.h> |
5 | #include <linux/errno.h> | 6 | #include <linux/errno.h> |
6 | 7 | ||
@@ -15,20 +16,34 @@ | |||
15 | * While the GPIO programming interface defines valid GPIO numbers | 16 | * While the GPIO programming interface defines valid GPIO numbers |
16 | * to be in the range 0..MAX_INT, this library restricts them to the | 17 | * to be in the range 0..MAX_INT, this library restricts them to the |
17 | * smaller range 0..ARCH_NR_GPIOS-1. | 18 | * smaller range 0..ARCH_NR_GPIOS-1. |
19 | * | ||
20 | * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of | ||
21 | * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is | ||
22 | * actually an estimate of a board-specific value. | ||
18 | */ | 23 | */ |
19 | 24 | ||
20 | #ifndef ARCH_NR_GPIOS | 25 | #ifndef ARCH_NR_GPIOS |
21 | #define ARCH_NR_GPIOS 256 | 26 | #define ARCH_NR_GPIOS 256 |
22 | #endif | 27 | #endif |
23 | 28 | ||
29 | /* | ||
30 | * "valid" GPIO numbers are nonnegative and may be passed to | ||
31 | * setup routines like gpio_request(). only some valid numbers | ||
32 | * can successfully be requested and used. | ||
33 | * | ||
34 | * Invalid GPIO numbers are useful for indicating no-such-GPIO in | ||
35 | * platform data and other tables. | ||
36 | */ | ||
37 | |||
24 | static inline int gpio_is_valid(int number) | 38 | static inline int gpio_is_valid(int number) |
25 | { | 39 | { |
26 | /* only some non-negative numbers are valid */ | ||
27 | return ((unsigned)number) < ARCH_NR_GPIOS; | 40 | return ((unsigned)number) < ARCH_NR_GPIOS; |
28 | } | 41 | } |
29 | 42 | ||
43 | struct device; | ||
30 | struct seq_file; | 44 | struct seq_file; |
31 | struct module; | 45 | struct module; |
46 | struct device_node; | ||
32 | 47 | ||
33 | /** | 48 | /** |
34 | * struct gpio_chip - abstract a GPIO controller | 49 | * struct gpio_chip - abstract a GPIO controller |
@@ -58,7 +73,9 @@ struct module; | |||
58 | * @names: if set, must be an array of strings to use as alternative | 73 | * @names: if set, must be an array of strings to use as alternative |
59 | * names for the GPIOs in this chip. Any entry in the array | 74 | * names for the GPIOs in this chip. Any entry in the array |
60 | * may be NULL if there is no alias for the GPIO, however the | 75 | * may be NULL if there is no alias for the GPIO, however the |
61 | * array must be @ngpio entries long. | 76 | * array must be @ngpio entries long. A name can include a single printk |
77 | * format specifier for an unsigned int. It is substituted by the actual | ||
78 | * number of the gpio. | ||
62 | * | 79 | * |
63 | * A gpio_chip can help platforms abstract various sources of GPIOs so | 80 | * A gpio_chip can help platforms abstract various sources of GPIOs so |
64 | * they can all be accessed through a common programing interface. | 81 | * they can all be accessed through a common programing interface. |
@@ -86,6 +103,9 @@ struct gpio_chip { | |||
86 | unsigned offset); | 103 | unsigned offset); |
87 | int (*direction_output)(struct gpio_chip *chip, | 104 | int (*direction_output)(struct gpio_chip *chip, |
88 | unsigned offset, int value); | 105 | unsigned offset, int value); |
106 | int (*set_debounce)(struct gpio_chip *chip, | ||
107 | unsigned offset, unsigned debounce); | ||
108 | |||
89 | void (*set)(struct gpio_chip *chip, | 109 | void (*set)(struct gpio_chip *chip, |
90 | unsigned offset, int value); | 110 | unsigned offset, int value); |
91 | 111 | ||
@@ -96,9 +116,20 @@ struct gpio_chip { | |||
96 | struct gpio_chip *chip); | 116 | struct gpio_chip *chip); |
97 | int base; | 117 | int base; |
98 | u16 ngpio; | 118 | u16 ngpio; |
99 | char **names; | 119 | const char *const *names; |
100 | unsigned can_sleep:1; | 120 | unsigned can_sleep:1; |
101 | unsigned exported:1; | 121 | unsigned exported:1; |
122 | |||
123 | #if defined(CONFIG_OF_GPIO) | ||
124 | /* | ||
125 | * If CONFIG_OF is enabled, then all GPIO controllers described in the | ||
126 | * device tree automatically may have an OF translation | ||
127 | */ | ||
128 | struct device_node *of_node; | ||
129 | int of_gpio_n_cells; | ||
130 | int (*of_xlate)(struct gpio_chip *gc, struct device_node *np, | ||
131 | const void *gpio_spec, u32 *flags); | ||
132 | #endif | ||
102 | }; | 133 | }; |
103 | 134 | ||
104 | extern const char *gpiochip_is_requested(struct gpio_chip *chip, | 135 | extern const char *gpiochip_is_requested(struct gpio_chip *chip, |
@@ -108,6 +139,9 @@ extern int __must_check gpiochip_reserve(int start, int ngpio); | |||
108 | /* add/remove chips */ | 139 | /* add/remove chips */ |
109 | extern int gpiochip_add(struct gpio_chip *chip); | 140 | extern int gpiochip_add(struct gpio_chip *chip); |
110 | extern int __must_check gpiochip_remove(struct gpio_chip *chip); | 141 | extern int __must_check gpiochip_remove(struct gpio_chip *chip); |
142 | extern struct gpio_chip *gpiochip_find(void *data, | ||
143 | int (*match)(struct gpio_chip *chip, | ||
144 | void *data)); | ||
111 | 145 | ||
112 | 146 | ||
113 | /* Always use the library code for GPIO management calls, | 147 | /* Always use the library code for GPIO management calls, |
@@ -119,6 +153,8 @@ extern void gpio_free(unsigned gpio); | |||
119 | extern int gpio_direction_input(unsigned gpio); | 153 | extern int gpio_direction_input(unsigned gpio); |
120 | extern int gpio_direction_output(unsigned gpio, int value); | 154 | extern int gpio_direction_output(unsigned gpio, int value); |
121 | 155 | ||
156 | extern int gpio_set_debounce(unsigned gpio, unsigned debounce); | ||
157 | |||
122 | extern int gpio_get_value_cansleep(unsigned gpio); | 158 | extern int gpio_get_value_cansleep(unsigned gpio); |
123 | extern void gpio_set_value_cansleep(unsigned gpio, int value); | 159 | extern void gpio_set_value_cansleep(unsigned gpio, int value); |
124 | 160 | ||
@@ -134,6 +170,32 @@ extern int __gpio_cansleep(unsigned gpio); | |||
134 | 170 | ||
135 | extern int __gpio_to_irq(unsigned gpio); | 171 | extern int __gpio_to_irq(unsigned gpio); |
136 | 172 | ||
173 | #define GPIOF_DIR_OUT (0 << 0) | ||
174 | #define GPIOF_DIR_IN (1 << 0) | ||
175 | |||
176 | #define GPIOF_INIT_LOW (0 << 1) | ||
177 | #define GPIOF_INIT_HIGH (1 << 1) | ||
178 | |||
179 | #define GPIOF_IN (GPIOF_DIR_IN) | ||
180 | #define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW) | ||
181 | #define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH) | ||
182 | |||
183 | /** | ||
184 | * struct gpio - a structure describing a GPIO with configuration | ||
185 | * @gpio: the GPIO number | ||
186 | * @flags: GPIO configuration as specified by GPIOF_* | ||
187 | * @label: a literal description string of this GPIO | ||
188 | */ | ||
189 | struct gpio { | ||
190 | unsigned gpio; | ||
191 | unsigned long flags; | ||
192 | const char *label; | ||
193 | }; | ||
194 | |||
195 | extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *label); | ||
196 | extern int gpio_request_array(struct gpio *array, size_t num); | ||
197 | extern void gpio_free_array(struct gpio *array, size_t num); | ||
198 | |||
137 | #ifdef CONFIG_GPIO_SYSFS | 199 | #ifdef CONFIG_GPIO_SYSFS |
138 | 200 | ||
139 | /* | 201 | /* |
@@ -141,11 +203,14 @@ extern int __gpio_to_irq(unsigned gpio); | |||
141 | * but more typically is configured entirely from userspace. | 203 | * but more typically is configured entirely from userspace. |
142 | */ | 204 | */ |
143 | extern int gpio_export(unsigned gpio, bool direction_may_change); | 205 | extern int gpio_export(unsigned gpio, bool direction_may_change); |
206 | extern int gpio_export_link(struct device *dev, const char *name, | ||
207 | unsigned gpio); | ||
208 | extern int gpio_sysfs_set_active_low(unsigned gpio, int value); | ||
144 | extern void gpio_unexport(unsigned gpio); | 209 | extern void gpio_unexport(unsigned gpio); |
145 | 210 | ||
146 | #endif /* CONFIG_GPIO_SYSFS */ | 211 | #endif /* CONFIG_GPIO_SYSFS */ |
147 | 212 | ||
148 | #else /* !CONFIG_HAVE_GPIO_LIB */ | 213 | #else /* !CONFIG_GPIOLIB */ |
149 | 214 | ||
150 | static inline int gpio_is_valid(int number) | 215 | static inline int gpio_is_valid(int number) |
151 | { | 216 | { |
@@ -174,10 +239,12 @@ static inline void gpio_set_value_cansleep(unsigned gpio, int value) | |||
174 | gpio_set_value(gpio, value); | 239 | gpio_set_value(gpio, value); |
175 | } | 240 | } |
176 | 241 | ||
177 | #endif /* !CONFIG_HAVE_GPIO_LIB */ | 242 | #endif /* !CONFIG_GPIOLIB */ |
178 | 243 | ||
179 | #ifndef CONFIG_GPIO_SYSFS | 244 | #ifndef CONFIG_GPIO_SYSFS |
180 | 245 | ||
246 | struct device; | ||
247 | |||
181 | /* sysfs support is only available with gpiolib, where it's optional */ | 248 | /* sysfs support is only available with gpiolib, where it's optional */ |
182 | 249 | ||
183 | static inline int gpio_export(unsigned gpio, bool direction_may_change) | 250 | static inline int gpio_export(unsigned gpio, bool direction_may_change) |
@@ -185,6 +252,17 @@ static inline int gpio_export(unsigned gpio, bool direction_may_change) | |||
185 | return -ENOSYS; | 252 | return -ENOSYS; |
186 | } | 253 | } |
187 | 254 | ||
255 | static inline int gpio_export_link(struct device *dev, const char *name, | ||
256 | unsigned gpio) | ||
257 | { | ||
258 | return -ENOSYS; | ||
259 | } | ||
260 | |||
261 | static inline int gpio_sysfs_set_active_low(unsigned gpio, int value) | ||
262 | { | ||
263 | return -ENOSYS; | ||
264 | } | ||
265 | |||
188 | static inline void gpio_unexport(unsigned gpio) | 266 | static inline void gpio_unexport(unsigned gpio) |
189 | { | 267 | { |
190 | } | 268 | } |
diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h index 3d5d2c906ab3..04d0a977cd43 100644 --- a/include/asm-generic/hardirq.h +++ b/include/asm-generic/hardirq.h | |||
@@ -3,26 +3,13 @@ | |||
3 | 3 | ||
4 | #include <linux/cache.h> | 4 | #include <linux/cache.h> |
5 | #include <linux/threads.h> | 5 | #include <linux/threads.h> |
6 | #include <linux/irq.h> | ||
7 | 6 | ||
8 | typedef struct { | 7 | typedef struct { |
9 | unsigned long __softirq_pending; | 8 | unsigned int __softirq_pending; |
10 | } ____cacheline_aligned irq_cpustat_t; | 9 | } ____cacheline_aligned irq_cpustat_t; |
11 | 10 | ||
12 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | 11 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ |
13 | 12 | #include <linux/irq.h> | |
14 | #ifndef HARDIRQ_BITS | ||
15 | #define HARDIRQ_BITS 8 | ||
16 | #endif | ||
17 | |||
18 | /* | ||
19 | * The hardirq mask has to be large enough to have | ||
20 | * space for potentially all IRQ sources in the system | ||
21 | * nesting on a single CPU: | ||
22 | */ | ||
23 | #if (1 << HARDIRQ_BITS) < NR_IRQS | ||
24 | # error HARDIRQ_BITS is too low! | ||
25 | #endif | ||
26 | 13 | ||
27 | #ifndef ack_bad_irq | 14 | #ifndef ack_bad_irq |
28 | static inline void ack_bad_irq(unsigned int irq) | 15 | static inline void ack_bad_irq(unsigned int irq) |
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index bcee6365dca0..4644c9a7f724 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h | |||
@@ -19,7 +19,9 @@ | |||
19 | #include <asm-generic/iomap.h> | 19 | #include <asm-generic/iomap.h> |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #ifndef mmiowb | ||
22 | #define mmiowb() do {} while (0) | 23 | #define mmiowb() do {} while (0) |
24 | #endif | ||
23 | 25 | ||
24 | /*****************************************************************************/ | 26 | /*****************************************************************************/ |
25 | /* | 27 | /* |
@@ -28,39 +30,51 @@ | |||
28 | * differently. On the simple architectures, we just read/write the | 30 | * differently. On the simple architectures, we just read/write the |
29 | * memory location directly. | 31 | * memory location directly. |
30 | */ | 32 | */ |
33 | #ifndef __raw_readb | ||
31 | static inline u8 __raw_readb(const volatile void __iomem *addr) | 34 | static inline u8 __raw_readb(const volatile void __iomem *addr) |
32 | { | 35 | { |
33 | return *(const volatile u8 __force *) addr; | 36 | return *(const volatile u8 __force *) addr; |
34 | } | 37 | } |
38 | #endif | ||
35 | 39 | ||
40 | #ifndef __raw_readw | ||
36 | static inline u16 __raw_readw(const volatile void __iomem *addr) | 41 | static inline u16 __raw_readw(const volatile void __iomem *addr) |
37 | { | 42 | { |
38 | return *(const volatile u16 __force *) addr; | 43 | return *(const volatile u16 __force *) addr; |
39 | } | 44 | } |
45 | #endif | ||
40 | 46 | ||
47 | #ifndef __raw_readl | ||
41 | static inline u32 __raw_readl(const volatile void __iomem *addr) | 48 | static inline u32 __raw_readl(const volatile void __iomem *addr) |
42 | { | 49 | { |
43 | return *(const volatile u32 __force *) addr; | 50 | return *(const volatile u32 __force *) addr; |
44 | } | 51 | } |
52 | #endif | ||
45 | 53 | ||
46 | #define readb __raw_readb | 54 | #define readb __raw_readb |
47 | #define readw(addr) __le16_to_cpu(__raw_readw(addr)) | 55 | #define readw(addr) __le16_to_cpu(__raw_readw(addr)) |
48 | #define readl(addr) __le32_to_cpu(__raw_readl(addr)) | 56 | #define readl(addr) __le32_to_cpu(__raw_readl(addr)) |
49 | 57 | ||
58 | #ifndef __raw_writeb | ||
50 | static inline void __raw_writeb(u8 b, volatile void __iomem *addr) | 59 | static inline void __raw_writeb(u8 b, volatile void __iomem *addr) |
51 | { | 60 | { |
52 | *(volatile u8 __force *) addr = b; | 61 | *(volatile u8 __force *) addr = b; |
53 | } | 62 | } |
63 | #endif | ||
54 | 64 | ||
65 | #ifndef __raw_writew | ||
55 | static inline void __raw_writew(u16 b, volatile void __iomem *addr) | 66 | static inline void __raw_writew(u16 b, volatile void __iomem *addr) |
56 | { | 67 | { |
57 | *(volatile u16 __force *) addr = b; | 68 | *(volatile u16 __force *) addr = b; |
58 | } | 69 | } |
70 | #endif | ||
59 | 71 | ||
72 | #ifndef __raw_writel | ||
60 | static inline void __raw_writel(u32 b, volatile void __iomem *addr) | 73 | static inline void __raw_writel(u32 b, volatile void __iomem *addr) |
61 | { | 74 | { |
62 | *(volatile u32 __force *) addr = b; | 75 | *(volatile u32 __force *) addr = b; |
63 | } | 76 | } |
77 | #endif | ||
64 | 78 | ||
65 | #define writeb __raw_writeb | 79 | #define writeb __raw_writeb |
66 | #define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr) | 80 | #define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr) |
@@ -122,6 +136,7 @@ static inline void outl(u32 b, unsigned long addr) | |||
122 | #define outw_p(x, addr) outw((x), (addr)) | 136 | #define outw_p(x, addr) outw((x), (addr)) |
123 | #define outl_p(x, addr) outl((x), (addr)) | 137 | #define outl_p(x, addr) outl((x), (addr)) |
124 | 138 | ||
139 | #ifndef insb | ||
125 | static inline void insb(unsigned long addr, void *buffer, int count) | 140 | static inline void insb(unsigned long addr, void *buffer, int count) |
126 | { | 141 | { |
127 | if (count) { | 142 | if (count) { |
@@ -132,7 +147,9 @@ static inline void insb(unsigned long addr, void *buffer, int count) | |||
132 | } while (--count); | 147 | } while (--count); |
133 | } | 148 | } |
134 | } | 149 | } |
150 | #endif | ||
135 | 151 | ||
152 | #ifndef insw | ||
136 | static inline void insw(unsigned long addr, void *buffer, int count) | 153 | static inline void insw(unsigned long addr, void *buffer, int count) |
137 | { | 154 | { |
138 | if (count) { | 155 | if (count) { |
@@ -143,7 +160,9 @@ static inline void insw(unsigned long addr, void *buffer, int count) | |||
143 | } while (--count); | 160 | } while (--count); |
144 | } | 161 | } |
145 | } | 162 | } |
163 | #endif | ||
146 | 164 | ||
165 | #ifndef insl | ||
147 | static inline void insl(unsigned long addr, void *buffer, int count) | 166 | static inline void insl(unsigned long addr, void *buffer, int count) |
148 | { | 167 | { |
149 | if (count) { | 168 | if (count) { |
@@ -154,7 +173,9 @@ static inline void insl(unsigned long addr, void *buffer, int count) | |||
154 | } while (--count); | 173 | } while (--count); |
155 | } | 174 | } |
156 | } | 175 | } |
176 | #endif | ||
157 | 177 | ||
178 | #ifndef outsb | ||
158 | static inline void outsb(unsigned long addr, const void *buffer, int count) | 179 | static inline void outsb(unsigned long addr, const void *buffer, int count) |
159 | { | 180 | { |
160 | if (count) { | 181 | if (count) { |
@@ -164,7 +185,9 @@ static inline void outsb(unsigned long addr, const void *buffer, int count) | |||
164 | } while (--count); | 185 | } while (--count); |
165 | } | 186 | } |
166 | } | 187 | } |
188 | #endif | ||
167 | 189 | ||
190 | #ifndef outsw | ||
168 | static inline void outsw(unsigned long addr, const void *buffer, int count) | 191 | static inline void outsw(unsigned long addr, const void *buffer, int count) |
169 | { | 192 | { |
170 | if (count) { | 193 | if (count) { |
@@ -174,7 +197,9 @@ static inline void outsw(unsigned long addr, const void *buffer, int count) | |||
174 | } while (--count); | 197 | } while (--count); |
175 | } | 198 | } |
176 | } | 199 | } |
200 | #endif | ||
177 | 201 | ||
202 | #ifndef outsl | ||
178 | static inline void outsl(unsigned long addr, const void *buffer, int count) | 203 | static inline void outsl(unsigned long addr, const void *buffer, int count) |
179 | { | 204 | { |
180 | if (count) { | 205 | if (count) { |
@@ -184,15 +209,50 @@ static inline void outsl(unsigned long addr, const void *buffer, int count) | |||
184 | } while (--count); | 209 | } while (--count); |
185 | } | 210 | } |
186 | } | 211 | } |
212 | #endif | ||
213 | |||
214 | static inline void readsl(const void __iomem *addr, void *buf, int len) | ||
215 | { | ||
216 | insl((unsigned long)addr, buf, len); | ||
217 | } | ||
218 | |||
219 | static inline void readsw(const void __iomem *addr, void *buf, int len) | ||
220 | { | ||
221 | insw((unsigned long)addr, buf, len); | ||
222 | } | ||
223 | |||
224 | static inline void readsb(const void __iomem *addr, void *buf, int len) | ||
225 | { | ||
226 | insb((unsigned long)addr, buf, len); | ||
227 | } | ||
228 | |||
229 | static inline void writesl(const void __iomem *addr, const void *buf, int len) | ||
230 | { | ||
231 | outsl((unsigned long)addr, buf, len); | ||
232 | } | ||
233 | |||
234 | static inline void writesw(const void __iomem *addr, const void *buf, int len) | ||
235 | { | ||
236 | outsw((unsigned long)addr, buf, len); | ||
237 | } | ||
238 | |||
239 | static inline void writesb(const void __iomem *addr, const void *buf, int len) | ||
240 | { | ||
241 | outsb((unsigned long)addr, buf, len); | ||
242 | } | ||
187 | 243 | ||
188 | #ifndef CONFIG_GENERIC_IOMAP | 244 | #ifndef CONFIG_GENERIC_IOMAP |
189 | #define ioread8(addr) readb(addr) | 245 | #define ioread8(addr) readb(addr) |
190 | #define ioread16(addr) readw(addr) | 246 | #define ioread16(addr) readw(addr) |
247 | #define ioread16be(addr) be16_to_cpu(ioread16(addr)) | ||
191 | #define ioread32(addr) readl(addr) | 248 | #define ioread32(addr) readl(addr) |
249 | #define ioread32be(addr) be32_to_cpu(ioread32(addr)) | ||
192 | 250 | ||
193 | #define iowrite8(v, addr) writeb((v), (addr)) | 251 | #define iowrite8(v, addr) writeb((v), (addr)) |
194 | #define iowrite16(v, addr) writew((v), (addr)) | 252 | #define iowrite16(v, addr) writew((v), (addr)) |
253 | #define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr)) | ||
195 | #define iowrite32(v, addr) writel((v), (addr)) | 254 | #define iowrite32(v, addr) writel((v), (addr)) |
255 | #define iowrite32be(v, addr) iowrite32(be32_to_cpu(v), (addr)) | ||
196 | 256 | ||
197 | #define ioread8_rep(p, dst, count) \ | 257 | #define ioread8_rep(p, dst, count) \ |
198 | insb((unsigned long) (p), (dst), (count)) | 258 | insb((unsigned long) (p), (dst), (count)) |
diff --git a/include/asm-generic/ioctls.h b/include/asm-generic/ioctls.h index a799e20a769e..3f3f2d189fb8 100644 --- a/include/asm-generic/ioctls.h +++ b/include/asm-generic/ioctls.h | |||
@@ -62,13 +62,17 @@ | |||
62 | #define TCSETSW2 _IOW('T', 0x2C, struct termios2) | 62 | #define TCSETSW2 _IOW('T', 0x2C, struct termios2) |
63 | #define TCSETSF2 _IOW('T', 0x2D, struct termios2) | 63 | #define TCSETSF2 _IOW('T', 0x2D, struct termios2) |
64 | #define TIOCGRS485 0x542E | 64 | #define TIOCGRS485 0x542E |
65 | #ifndef TIOCSRS485 | ||
65 | #define TIOCSRS485 0x542F | 66 | #define TIOCSRS485 0x542F |
67 | #endif | ||
66 | #define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ | 68 | #define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ |
67 | #define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ | 69 | #define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ |
70 | #define TIOCGDEV _IOR('T', 0x32, unsigned int) /* Get primary device node of /dev/console */ | ||
68 | #define TCGETX 0x5432 /* SYS5 TCGETX compatibility */ | 71 | #define TCGETX 0x5432 /* SYS5 TCGETX compatibility */ |
69 | #define TCSETX 0x5433 | 72 | #define TCSETX 0x5433 |
70 | #define TCSETXF 0x5434 | 73 | #define TCSETXF 0x5434 |
71 | #define TCSETXW 0x5435 | 74 | #define TCSETXW 0x5435 |
75 | #define TIOCSIG _IOW('T', 0x36, int) /* pty: generate signal */ | ||
72 | 76 | ||
73 | #define FIONCLEX 0x5450 | 77 | #define FIONCLEX 0x5450 |
74 | #define FIOCLEX 0x5451 | 78 | #define FIOCLEX 0x5451 |
@@ -87,12 +91,10 @@ | |||
87 | #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ | 91 | #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ |
88 | 92 | ||
89 | /* | 93 | /* |
90 | * some architectures define FIOQSIZE as 0x545E, which is used for | 94 | * Some arches already define FIOQSIZE due to a historical |
91 | * TIOCGHAYESESP on others | 95 | * conflict with a Hayes modem-specific ioctl value. |
92 | */ | 96 | */ |
93 | #ifndef FIOQSIZE | 97 | #ifndef FIOQSIZE |
94 | # define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ | ||
95 | # define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ | ||
96 | # define FIOQSIZE 0x5460 | 98 | # define FIOQSIZE 0x5460 |
97 | #endif | 99 | #endif |
98 | 100 | ||
@@ -104,6 +106,7 @@ | |||
104 | #define TIOCPKT_START 8 | 106 | #define TIOCPKT_START 8 |
105 | #define TIOCPKT_NOSTOP 16 | 107 | #define TIOCPKT_NOSTOP 16 |
106 | #define TIOCPKT_DOSTOP 32 | 108 | #define TIOCPKT_DOSTOP 32 |
109 | #define TIOCPKT_IOCTL 64 | ||
107 | 110 | ||
108 | #define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ | 111 | #define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ |
109 | 112 | ||
diff --git a/include/asm-generic/irq_regs.h b/include/asm-generic/irq_regs.h index 5ae1d07d4a12..6bf9355fa7eb 100644 --- a/include/asm-generic/irq_regs.h +++ b/include/asm-generic/irq_regs.h | |||
@@ -22,15 +22,15 @@ DECLARE_PER_CPU(struct pt_regs *, __irq_regs); | |||
22 | 22 | ||
23 | static inline struct pt_regs *get_irq_regs(void) | 23 | static inline struct pt_regs *get_irq_regs(void) |
24 | { | 24 | { |
25 | return __get_cpu_var(__irq_regs); | 25 | return __this_cpu_read(__irq_regs); |
26 | } | 26 | } |
27 | 27 | ||
28 | static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) | 28 | static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) |
29 | { | 29 | { |
30 | struct pt_regs *old_regs, **pp_regs = &__get_cpu_var(__irq_regs); | 30 | struct pt_regs *old_regs; |
31 | 31 | ||
32 | old_regs = *pp_regs; | 32 | old_regs = __this_cpu_read(__irq_regs); |
33 | *pp_regs = new_regs; | 33 | __this_cpu_write(__irq_regs, new_regs); |
34 | return old_regs; | 34 | return old_regs; |
35 | } | 35 | } |
36 | 36 | ||
diff --git a/include/asm-generic/irqflags.h b/include/asm-generic/irqflags.h index 9aebf618275a..1f40d0024cf3 100644 --- a/include/asm-generic/irqflags.h +++ b/include/asm-generic/irqflags.h | |||
@@ -5,68 +5,62 @@ | |||
5 | * All architectures should implement at least the first two functions, | 5 | * All architectures should implement at least the first two functions, |
6 | * usually inline assembly will be the best way. | 6 | * usually inline assembly will be the best way. |
7 | */ | 7 | */ |
8 | #ifndef RAW_IRQ_DISABLED | 8 | #ifndef ARCH_IRQ_DISABLED |
9 | #define RAW_IRQ_DISABLED 0 | 9 | #define ARCH_IRQ_DISABLED 0 |
10 | #define RAW_IRQ_ENABLED 1 | 10 | #define ARCH_IRQ_ENABLED 1 |
11 | #endif | 11 | #endif |
12 | 12 | ||
13 | /* read interrupt enabled status */ | 13 | /* read interrupt enabled status */ |
14 | #ifndef __raw_local_save_flags | 14 | #ifndef arch_local_save_flags |
15 | unsigned long __raw_local_save_flags(void); | 15 | unsigned long arch_local_save_flags(void); |
16 | #endif | 16 | #endif |
17 | 17 | ||
18 | /* set interrupt enabled status */ | 18 | /* set interrupt enabled status */ |
19 | #ifndef raw_local_irq_restore | 19 | #ifndef arch_local_irq_restore |
20 | void raw_local_irq_restore(unsigned long flags); | 20 | void arch_local_irq_restore(unsigned long flags); |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | /* get status and disable interrupts */ | 23 | /* get status and disable interrupts */ |
24 | #ifndef __raw_local_irq_save | 24 | #ifndef arch_local_irq_save |
25 | static inline unsigned long __raw_local_irq_save(void) | 25 | static inline unsigned long arch_local_irq_save(void) |
26 | { | 26 | { |
27 | unsigned long flags; | 27 | unsigned long flags; |
28 | flags = __raw_local_save_flags(); | 28 | flags = arch_local_save_flags(); |
29 | raw_local_irq_restore(RAW_IRQ_DISABLED); | 29 | arch_local_irq_restore(ARCH_IRQ_DISABLED); |
30 | return flags; | 30 | return flags; |
31 | } | 31 | } |
32 | #endif | 32 | #endif |
33 | 33 | ||
34 | /* test flags */ | 34 | /* test flags */ |
35 | #ifndef raw_irqs_disabled_flags | 35 | #ifndef arch_irqs_disabled_flags |
36 | static inline int raw_irqs_disabled_flags(unsigned long flags) | 36 | static inline int arch_irqs_disabled_flags(unsigned long flags) |
37 | { | 37 | { |
38 | return flags == RAW_IRQ_DISABLED; | 38 | return flags == ARCH_IRQ_DISABLED; |
39 | } | 39 | } |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | /* unconditionally enable interrupts */ | 42 | /* unconditionally enable interrupts */ |
43 | #ifndef raw_local_irq_enable | 43 | #ifndef arch_local_irq_enable |
44 | static inline void raw_local_irq_enable(void) | 44 | static inline void arch_local_irq_enable(void) |
45 | { | 45 | { |
46 | raw_local_irq_restore(RAW_IRQ_ENABLED); | 46 | arch_local_irq_restore(ARCH_IRQ_ENABLED); |
47 | } | 47 | } |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | /* unconditionally disable interrupts */ | 50 | /* unconditionally disable interrupts */ |
51 | #ifndef raw_local_irq_disable | 51 | #ifndef arch_local_irq_disable |
52 | static inline void raw_local_irq_disable(void) | 52 | static inline void arch_local_irq_disable(void) |
53 | { | 53 | { |
54 | raw_local_irq_restore(RAW_IRQ_DISABLED); | 54 | arch_local_irq_restore(ARCH_IRQ_DISABLED); |
55 | } | 55 | } |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | /* test hardware interrupt enable bit */ | 58 | /* test hardware interrupt enable bit */ |
59 | #ifndef raw_irqs_disabled | 59 | #ifndef arch_irqs_disabled |
60 | static inline int raw_irqs_disabled(void) | 60 | static inline int arch_irqs_disabled(void) |
61 | { | 61 | { |
62 | return raw_irqs_disabled_flags(__raw_local_save_flags()); | 62 | return arch_irqs_disabled_flags(arch_local_save_flags()); |
63 | } | 63 | } |
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | #define raw_local_save_flags(flags) \ | ||
67 | do { (flags) = __raw_local_save_flags(); } while (0) | ||
68 | |||
69 | #define raw_local_irq_save(flags) \ | ||
70 | do { (flags) = __raw_local_irq_save(); } while (0) | ||
71 | |||
72 | #endif /* __ASM_GENERIC_IRQFLAGS_H */ | 66 | #endif /* __ASM_GENERIC_IRQFLAGS_H */ |
diff --git a/include/asm-generic/kdebug.h b/include/asm-generic/kdebug.h index 11e57b6a85fc..d1814497bcdb 100644 --- a/include/asm-generic/kdebug.h +++ b/include/asm-generic/kdebug.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | enum die_val { | 4 | enum die_val { |
5 | DIE_UNUSED, | 5 | DIE_UNUSED, |
6 | DIE_OOPS=1 | 6 | DIE_OOPS = 1, |
7 | }; | 7 | }; |
8 | 8 | ||
9 | #endif /* _ASM_GENERIC_KDEBUG_H */ | 9 | #endif /* _ASM_GENERIC_KDEBUG_H */ |
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h index eddbce0f9fb9..0232ccb76f2b 100644 --- a/include/asm-generic/kmap_types.h +++ b/include/asm-generic/kmap_types.h | |||
@@ -2,34 +2,39 @@ | |||
2 | #define _ASM_GENERIC_KMAP_TYPES_H | 2 | #define _ASM_GENERIC_KMAP_TYPES_H |
3 | 3 | ||
4 | #ifdef __WITH_KM_FENCE | 4 | #ifdef __WITH_KM_FENCE |
5 | # define D(n) __KM_FENCE_##n , | 5 | # define KMAP_D(n) __KM_FENCE_##n , |
6 | #else | 6 | #else |
7 | # define D(n) | 7 | # define KMAP_D(n) |
8 | #endif | 8 | #endif |
9 | 9 | ||
10 | enum km_type { | 10 | enum km_type { |
11 | D(0) KM_BOUNCE_READ, | 11 | KMAP_D(0) KM_BOUNCE_READ, |
12 | D(1) KM_SKB_SUNRPC_DATA, | 12 | KMAP_D(1) KM_SKB_SUNRPC_DATA, |
13 | D(2) KM_SKB_DATA_SOFTIRQ, | 13 | KMAP_D(2) KM_SKB_DATA_SOFTIRQ, |
14 | D(3) KM_USER0, | 14 | KMAP_D(3) KM_USER0, |
15 | D(4) KM_USER1, | 15 | KMAP_D(4) KM_USER1, |
16 | D(5) KM_BIO_SRC_IRQ, | 16 | KMAP_D(5) KM_BIO_SRC_IRQ, |
17 | D(6) KM_BIO_DST_IRQ, | 17 | KMAP_D(6) KM_BIO_DST_IRQ, |
18 | D(7) KM_PTE0, | 18 | KMAP_D(7) KM_PTE0, |
19 | D(8) KM_PTE1, | 19 | KMAP_D(8) KM_PTE1, |
20 | D(9) KM_IRQ0, | 20 | KMAP_D(9) KM_IRQ0, |
21 | D(10) KM_IRQ1, | 21 | KMAP_D(10) KM_IRQ1, |
22 | D(11) KM_SOFTIRQ0, | 22 | KMAP_D(11) KM_SOFTIRQ0, |
23 | D(12) KM_SOFTIRQ1, | 23 | KMAP_D(12) KM_SOFTIRQ1, |
24 | D(13) KM_SYNC_ICACHE, | 24 | KMAP_D(13) KM_SYNC_ICACHE, |
25 | D(14) KM_SYNC_DCACHE, | 25 | KMAP_D(14) KM_SYNC_DCACHE, |
26 | D(15) KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */ | 26 | /* UML specific, for copy_*_user - used in do_op_one_page */ |
27 | D(16) KM_IRQ_PTE, | 27 | KMAP_D(15) KM_UML_USERCOPY, |
28 | D(17) KM_NMI, | 28 | KMAP_D(16) KM_IRQ_PTE, |
29 | D(18) KM_NMI_PTE, | 29 | KMAP_D(17) KM_NMI, |
30 | D(19) KM_TYPE_NR | 30 | KMAP_D(18) KM_NMI_PTE, |
31 | KMAP_D(19) KM_KDB, | ||
32 | /* | ||
33 | * Remember to update debug_kmap_atomic() when adding new kmap types! | ||
34 | */ | ||
35 | KMAP_D(20) KM_TYPE_NR | ||
31 | }; | 36 | }; |
32 | 37 | ||
33 | #undef D | 38 | #undef KMAP_D |
34 | 39 | ||
35 | #endif | 40 | #endif |
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h index fc218444e315..c8a5d68541d7 100644 --- a/include/asm-generic/local.h +++ b/include/asm-generic/local.h | |||
@@ -52,23 +52,4 @@ typedef struct | |||
52 | #define __local_add(i,l) local_set((l), local_read(l) + (i)) | 52 | #define __local_add(i,l) local_set((l), local_read(l) + (i)) |
53 | #define __local_sub(i,l) local_set((l), local_read(l) - (i)) | 53 | #define __local_sub(i,l) local_set((l), local_read(l) - (i)) |
54 | 54 | ||
55 | /* Use these for per-cpu local_t variables: on some archs they are | ||
56 | * much more efficient than these naive implementations. Note they take | ||
57 | * a variable (eg. mystruct.foo), not an address. | ||
58 | */ | ||
59 | #define cpu_local_read(l) local_read(&__get_cpu_var(l)) | ||
60 | #define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i)) | ||
61 | #define cpu_local_inc(l) local_inc(&__get_cpu_var(l)) | ||
62 | #define cpu_local_dec(l) local_dec(&__get_cpu_var(l)) | ||
63 | #define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l)) | ||
64 | #define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l)) | ||
65 | |||
66 | /* Non-atomic increments, ie. preemption disabled and won't be touched | ||
67 | * in interrupt, etc. Some archs can optimize this case well. | ||
68 | */ | ||
69 | #define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l)) | ||
70 | #define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l)) | ||
71 | #define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l)) | ||
72 | #define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l)) | ||
73 | |||
74 | #endif /* _ASM_GENERIC_LOCAL_H */ | 55 | #endif /* _ASM_GENERIC_LOCAL_H */ |
diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h new file mode 100644 index 000000000000..02ac760c1a8b --- /dev/null +++ b/include/asm-generic/local64.h | |||
@@ -0,0 +1,96 @@ | |||
1 | #ifndef _ASM_GENERIC_LOCAL64_H | ||
2 | #define _ASM_GENERIC_LOCAL64_H | ||
3 | |||
4 | #include <linux/percpu.h> | ||
5 | #include <asm/types.h> | ||
6 | |||
7 | /* | ||
8 | * A signed long type for operations which are atomic for a single CPU. | ||
9 | * Usually used in combination with per-cpu variables. | ||
10 | * | ||
11 | * This is the default implementation, which uses atomic64_t. Which is | ||
12 | * rather pointless. The whole point behind local64_t is that some processors | ||
13 | * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs | ||
14 | * running on this CPU. local64_t allows exploitation of such capabilities. | ||
15 | */ | ||
16 | |||
17 | /* Implement in terms of atomics. */ | ||
18 | |||
19 | #if BITS_PER_LONG == 64 | ||
20 | |||
21 | #include <asm/local.h> | ||
22 | |||
23 | typedef struct { | ||
24 | local_t a; | ||
25 | } local64_t; | ||
26 | |||
27 | #define LOCAL64_INIT(i) { LOCAL_INIT(i) } | ||
28 | |||
29 | #define local64_read(l) local_read(&(l)->a) | ||
30 | #define local64_set(l,i) local_set((&(l)->a),(i)) | ||
31 | #define local64_inc(l) local_inc(&(l)->a) | ||
32 | #define local64_dec(l) local_dec(&(l)->a) | ||
33 | #define local64_add(i,l) local_add((i),(&(l)->a)) | ||
34 | #define local64_sub(i,l) local_sub((i),(&(l)->a)) | ||
35 | |||
36 | #define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a)) | ||
37 | #define local64_dec_and_test(l) local_dec_and_test(&(l)->a) | ||
38 | #define local64_inc_and_test(l) local_inc_and_test(&(l)->a) | ||
39 | #define local64_add_negative(i, l) local_add_negative((i), (&(l)->a)) | ||
40 | #define local64_add_return(i, l) local_add_return((i), (&(l)->a)) | ||
41 | #define local64_sub_return(i, l) local_sub_return((i), (&(l)->a)) | ||
42 | #define local64_inc_return(l) local_inc_return(&(l)->a) | ||
43 | |||
44 | #define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n)) | ||
45 | #define local64_xchg(l, n) local_xchg((&(l)->a), (n)) | ||
46 | #define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u)) | ||
47 | #define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a) | ||
48 | |||
49 | /* Non-atomic variants, ie. preemption disabled and won't be touched | ||
50 | * in interrupt, etc. Some archs can optimize this case well. */ | ||
51 | #define __local64_inc(l) local64_set((l), local64_read(l) + 1) | ||
52 | #define __local64_dec(l) local64_set((l), local64_read(l) - 1) | ||
53 | #define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) | ||
54 | #define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) | ||
55 | |||
56 | #else /* BITS_PER_LONG != 64 */ | ||
57 | |||
58 | #include <asm/atomic.h> | ||
59 | |||
60 | /* Don't use typedef: don't want them to be mixed with atomic_t's. */ | ||
61 | typedef struct { | ||
62 | atomic64_t a; | ||
63 | } local64_t; | ||
64 | |||
65 | #define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) } | ||
66 | |||
67 | #define local64_read(l) atomic64_read(&(l)->a) | ||
68 | #define local64_set(l,i) atomic64_set((&(l)->a),(i)) | ||
69 | #define local64_inc(l) atomic64_inc(&(l)->a) | ||
70 | #define local64_dec(l) atomic64_dec(&(l)->a) | ||
71 | #define local64_add(i,l) atomic64_add((i),(&(l)->a)) | ||
72 | #define local64_sub(i,l) atomic64_sub((i),(&(l)->a)) | ||
73 | |||
74 | #define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a)) | ||
75 | #define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a) | ||
76 | #define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a) | ||
77 | #define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a)) | ||
78 | #define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a)) | ||
79 | #define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a)) | ||
80 | #define local64_inc_return(l) atomic64_inc_return(&(l)->a) | ||
81 | |||
82 | #define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n)) | ||
83 | #define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n)) | ||
84 | #define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u)) | ||
85 | #define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a) | ||
86 | |||
87 | /* Non-atomic variants, ie. preemption disabled and won't be touched | ||
88 | * in interrupt, etc. Some archs can optimize this case well. */ | ||
89 | #define __local64_inc(l) local64_set((l), local64_read(l) + 1) | ||
90 | #define __local64_dec(l) local64_set((l), local64_read(l) - 1) | ||
91 | #define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) | ||
92 | #define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) | ||
93 | |||
94 | #endif /* BITS_PER_LONG != 64 */ | ||
95 | |||
96 | #endif /* _ASM_GENERIC_LOCAL64_H */ | ||
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index 4c8d0afae711..fb2d63f13f4c 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h | |||
@@ -47,7 +47,7 @@ | |||
47 | 47 | ||
48 | #elif defined(CONFIG_SPARSEMEM_VMEMMAP) | 48 | #elif defined(CONFIG_SPARSEMEM_VMEMMAP) |
49 | 49 | ||
50 | /* memmap is virtually contigious. */ | 50 | /* memmap is virtually contiguous. */ |
51 | #define __pfn_to_page(pfn) (vmemmap + (pfn)) | 51 | #define __pfn_to_page(pfn) (vmemmap + (pfn)) |
52 | #define __page_to_pfn(page) (unsigned long)((page) - vmemmap) | 52 | #define __page_to_pfn(page) (unsigned long)((page) - vmemmap) |
53 | 53 | ||
diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h index 3b69ad34189a..3da9e2742fa0 100644 --- a/include/asm-generic/mman-common.h +++ b/include/asm-generic/mman-common.h | |||
@@ -19,6 +19,11 @@ | |||
19 | #define MAP_TYPE 0x0f /* Mask for type of mapping */ | 19 | #define MAP_TYPE 0x0f /* Mask for type of mapping */ |
20 | #define MAP_FIXED 0x10 /* Interpret addr exactly */ | 20 | #define MAP_FIXED 0x10 /* Interpret addr exactly */ |
21 | #define MAP_ANONYMOUS 0x20 /* don't use a file */ | 21 | #define MAP_ANONYMOUS 0x20 /* don't use a file */ |
22 | #ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED | ||
23 | # define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be uninitialized */ | ||
24 | #else | ||
25 | # define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ | ||
26 | #endif | ||
22 | 27 | ||
23 | #define MS_ASYNC 1 /* sync memory asynchronously */ | 28 | #define MS_ASYNC 1 /* sync memory asynchronously */ |
24 | #define MS_INVALIDATE 2 /* invalidate the caches */ | 29 | #define MS_INVALIDATE 2 /* invalidate the caches */ |
@@ -34,6 +39,11 @@ | |||
34 | #define MADV_REMOVE 9 /* remove these pages & resources */ | 39 | #define MADV_REMOVE 9 /* remove these pages & resources */ |
35 | #define MADV_DONTFORK 10 /* don't inherit across fork */ | 40 | #define MADV_DONTFORK 10 /* don't inherit across fork */ |
36 | #define MADV_DOFORK 11 /* do inherit across fork */ | 41 | #define MADV_DOFORK 11 /* do inherit across fork */ |
42 | #define MADV_HWPOISON 100 /* poison a page for testing */ | ||
43 | #define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */ | ||
44 | |||
45 | #define MADV_MERGEABLE 12 /* KSM may merge identical pages */ | ||
46 | #define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ | ||
37 | 47 | ||
38 | /* compatibility flags */ | 48 | /* compatibility flags */ |
39 | #define MAP_FILE 0 | 49 | #define MAP_FILE 0 |
diff --git a/include/asm-generic/mman.h b/include/asm-generic/mman.h index 7cab4de2bca6..32c8bd6a196d 100644 --- a/include/asm-generic/mman.h +++ b/include/asm-generic/mman.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ | 11 | #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ |
12 | #define MAP_NONBLOCK 0x10000 /* do not block on IO */ | 12 | #define MAP_NONBLOCK 0x10000 /* do not block on IO */ |
13 | #define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ | 13 | #define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ |
14 | #define MAP_HUGETLB 0x40000 /* create a huge page mapping */ | ||
14 | 15 | ||
15 | #define MCL_CURRENT 1 /* lock all current mappings */ | 16 | #define MCL_CURRENT 1 /* lock all current mappings */ |
16 | #define MCL_FUTURE 2 /* lock all future mappings */ | 17 | #define MCL_FUTURE 2 /* lock all future mappings */ |
diff --git a/include/asm-generic/pci-dma-compat.h b/include/asm-generic/pci-dma-compat.h index 37b3706226e7..1437b7da09b2 100644 --- a/include/asm-generic/pci-dma-compat.h +++ b/include/asm-generic/pci-dma-compat.h | |||
@@ -6,9 +6,6 @@ | |||
6 | 6 | ||
7 | #include <linux/dma-mapping.h> | 7 | #include <linux/dma-mapping.h> |
8 | 8 | ||
9 | /* note pci_set_dma_mask isn't here, since it's a public function | ||
10 | * exported from drivers/pci, use dma_supported instead */ | ||
11 | |||
12 | static inline int | 9 | static inline int |
13 | pci_dma_supported(struct pci_dev *hwdev, u64 mask) | 10 | pci_dma_supported(struct pci_dev *hwdev, u64 mask) |
14 | { | 11 | { |
@@ -104,4 +101,16 @@ pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) | |||
104 | return dma_mapping_error(&pdev->dev, dma_addr); | 101 | return dma_mapping_error(&pdev->dev, dma_addr); |
105 | } | 102 | } |
106 | 103 | ||
104 | #ifdef CONFIG_PCI | ||
105 | static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) | ||
106 | { | ||
107 | return dma_set_mask(&dev->dev, mask); | ||
108 | } | ||
109 | |||
110 | static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) | ||
111 | { | ||
112 | return dma_set_coherent_mask(&dev->dev, mask); | ||
113 | } | ||
114 | #endif | ||
115 | |||
107 | #endif | 116 | #endif |
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h index b4326b5466eb..26373cff4546 100644 --- a/include/asm-generic/pci.h +++ b/include/asm-generic/pci.h | |||
@@ -30,7 +30,18 @@ pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, | |||
30 | res->end = region->end; | 30 | res->end = region->end; |
31 | } | 31 | } |
32 | 32 | ||
33 | #define pcibios_scan_all_fns(a, b) 0 | 33 | static inline struct resource * |
34 | pcibios_select_root(struct pci_dev *pdev, struct resource *res) | ||
35 | { | ||
36 | struct resource *root = NULL; | ||
37 | |||
38 | if (res->flags & IORESOURCE_IO) | ||
39 | root = &ioport_resource; | ||
40 | if (res->flags & IORESOURCE_MEM) | ||
41 | root = &iomem_resource; | ||
42 | |||
43 | return root; | ||
44 | } | ||
34 | 45 | ||
35 | #ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ | 46 | #ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ |
36 | static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) | 47 | static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) |
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index d7d50d7ee51e..d17784ea37ff 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h | |||
@@ -41,7 +41,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; | |||
41 | * Only S390 provides its own means of moving the pointer. | 41 | * Only S390 provides its own means of moving the pointer. |
42 | */ | 42 | */ |
43 | #ifndef SHIFT_PERCPU_PTR | 43 | #ifndef SHIFT_PERCPU_PTR |
44 | #define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset)) | 44 | /* Weird cast keeps both GCC and sparse happy. */ |
45 | #define SHIFT_PERCPU_PTR(__p, __offset) ({ \ | ||
46 | __verify_pcpu_ptr((__p)); \ | ||
47 | RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \ | ||
48 | }) | ||
45 | #endif | 49 | #endif |
46 | 50 | ||
47 | /* | 51 | /* |
@@ -50,12 +54,19 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; | |||
50 | * offset. | 54 | * offset. |
51 | */ | 55 | */ |
52 | #define per_cpu(var, cpu) \ | 56 | #define per_cpu(var, cpu) \ |
53 | (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu))) | 57 | (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu))) |
54 | #define __get_cpu_var(var) \ | ||
55 | (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset)) | ||
56 | #define __raw_get_cpu_var(var) \ | ||
57 | (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset)) | ||
58 | 58 | ||
59 | #ifndef __this_cpu_ptr | ||
60 | #define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) | ||
61 | #endif | ||
62 | #ifdef CONFIG_DEBUG_PREEMPT | ||
63 | #define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) | ||
64 | #else | ||
65 | #define this_cpu_ptr(ptr) __this_cpu_ptr(ptr) | ||
66 | #endif | ||
67 | |||
68 | #define __get_cpu_var(var) (*this_cpu_ptr(&(var))) | ||
69 | #define __raw_get_cpu_var(var) (*__this_cpu_ptr(&(var))) | ||
59 | 70 | ||
60 | #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA | 71 | #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA |
61 | extern void setup_per_cpu_areas(void); | 72 | extern void setup_per_cpu_areas(void); |
@@ -63,15 +74,22 @@ extern void setup_per_cpu_areas(void); | |||
63 | 74 | ||
64 | #else /* ! SMP */ | 75 | #else /* ! SMP */ |
65 | 76 | ||
66 | #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) | 77 | #define VERIFY_PERCPU_PTR(__p) ({ \ |
67 | #define __get_cpu_var(var) per_cpu_var(var) | 78 | __verify_pcpu_ptr((__p)); \ |
68 | #define __raw_get_cpu_var(var) per_cpu_var(var) | 79 | (typeof(*(__p)) __kernel __force *)(__p); \ |
80 | }) | ||
81 | |||
82 | #define per_cpu(var, cpu) (*((void)(cpu), VERIFY_PERCPU_PTR(&(var)))) | ||
83 | #define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) | ||
84 | #define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) | ||
85 | #define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) | ||
86 | #define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) | ||
69 | 87 | ||
70 | #endif /* SMP */ | 88 | #endif /* SMP */ |
71 | 89 | ||
72 | #ifndef PER_CPU_BASE_SECTION | 90 | #ifndef PER_CPU_BASE_SECTION |
73 | #ifdef CONFIG_SMP | 91 | #ifdef CONFIG_SMP |
74 | #define PER_CPU_BASE_SECTION ".data.percpu" | 92 | #define PER_CPU_BASE_SECTION ".data..percpu" |
75 | #else | 93 | #else |
76 | #define PER_CPU_BASE_SECTION ".data" | 94 | #define PER_CPU_BASE_SECTION ".data" |
77 | #endif | 95 | #endif |
@@ -81,14 +99,17 @@ extern void setup_per_cpu_areas(void); | |||
81 | 99 | ||
82 | #ifdef MODULE | 100 | #ifdef MODULE |
83 | #define PER_CPU_SHARED_ALIGNED_SECTION "" | 101 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
102 | #define PER_CPU_ALIGNED_SECTION "" | ||
84 | #else | 103 | #else |
85 | #define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" | 104 | #define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" |
105 | #define PER_CPU_ALIGNED_SECTION "..shared_aligned" | ||
86 | #endif | 106 | #endif |
87 | #define PER_CPU_FIRST_SECTION ".first" | 107 | #define PER_CPU_FIRST_SECTION "..first" |
88 | 108 | ||
89 | #else | 109 | #else |
90 | 110 | ||
91 | #define PER_CPU_SHARED_ALIGNED_SECTION "" | 111 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
112 | #define PER_CPU_ALIGNED_SECTION "..shared_aligned" | ||
92 | #define PER_CPU_FIRST_SECTION "" | 113 | #define PER_CPU_FIRST_SECTION "" |
93 | 114 | ||
94 | #endif | 115 | #endif |
@@ -97,4 +118,8 @@ extern void setup_per_cpu_areas(void); | |||
97 | #define PER_CPU_ATTRIBUTES | 118 | #define PER_CPU_ATTRIBUTES |
98 | #endif | 119 | #endif |
99 | 120 | ||
121 | #ifndef PER_CPU_DEF_ATTRIBUTES | ||
122 | #define PER_CPU_DEF_ATTRIBUTES | ||
123 | #endif | ||
124 | |||
100 | #endif /* _ASM_GENERIC_PERCPU_H_ */ | 125 | #endif /* _ASM_GENERIC_PERCPU_H_ */ |
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h index a7cdc48e8b78..725612b793ce 100644 --- a/include/asm-generic/pgtable-nopmd.h +++ b/include/asm-generic/pgtable-nopmd.h | |||
@@ -59,7 +59,7 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address) | |||
59 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | 59 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
60 | { | 60 | { |
61 | } | 61 | } |
62 | #define __pmd_free_tlb(tlb, x) do { } while (0) | 62 | #define __pmd_free_tlb(tlb, x, a) do { } while (0) |
63 | 63 | ||
64 | #undef pmd_addr_end | 64 | #undef pmd_addr_end |
65 | #define pmd_addr_end(addr, end) (end) | 65 | #define pmd_addr_end(addr, end) (end) |
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h index 87cf449a6df3..810431d8351b 100644 --- a/include/asm-generic/pgtable-nopud.h +++ b/include/asm-generic/pgtable-nopud.h | |||
@@ -52,7 +52,7 @@ static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address) | |||
52 | */ | 52 | */ |
53 | #define pud_alloc_one(mm, address) NULL | 53 | #define pud_alloc_one(mm, address) NULL |
54 | #define pud_free(mm, x) do { } while (0) | 54 | #define pud_free(mm, x) do { } while (0) |
55 | #define __pud_free_tlb(tlb, x) do { } while (0) | 55 | #define __pud_free_tlb(tlb, x, a) do { } while (0) |
56 | 56 | ||
57 | #undef pud_addr_end | 57 | #undef pud_addr_end |
58 | #define pud_addr_end(addr, end) (end) | 58 | #define pud_addr_end(addr, end) (end) |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index e410f602cab1..6f3c6ae4fe03 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -108,7 +108,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres | |||
108 | #endif | 108 | #endif |
109 | 109 | ||
110 | #ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY | 110 | #ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY |
111 | #define page_clear_dirty(page) do { } while (0) | 111 | #define page_clear_dirty(page, mapped) do { } while (0) |
112 | #endif | 112 | #endif |
113 | 113 | ||
114 | #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY | 114 | #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY |
@@ -129,6 +129,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres | |||
129 | #define move_pte(pte, prot, old_addr, new_addr) (pte) | 129 | #define move_pte(pte, prot, old_addr, new_addr) (pte) |
130 | #endif | 130 | #endif |
131 | 131 | ||
132 | #ifndef flush_tlb_fix_spurious_fault | ||
133 | #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) | ||
134 | #endif | ||
135 | |||
136 | #ifndef pgprot_noncached | ||
137 | #define pgprot_noncached(prot) (prot) | ||
138 | #endif | ||
139 | |||
132 | #ifndef pgprot_writecombine | 140 | #ifndef pgprot_writecombine |
133 | #define pgprot_writecombine pgprot_noncached | 141 | #define pgprot_writecombine pgprot_noncached |
134 | #endif | 142 | #endif |
diff --git a/include/asm-generic/scatterlist.h b/include/asm-generic/scatterlist.h index 8b9454496a7c..5de07355fad4 100644 --- a/include/asm-generic/scatterlist.h +++ b/include/asm-generic/scatterlist.h | |||
@@ -11,7 +11,9 @@ struct scatterlist { | |||
11 | unsigned int offset; | 11 | unsigned int offset; |
12 | unsigned int length; | 12 | unsigned int length; |
13 | dma_addr_t dma_address; | 13 | dma_addr_t dma_address; |
14 | #ifdef CONFIG_NEED_SG_DMA_LENGTH | ||
14 | unsigned int dma_length; | 15 | unsigned int dma_length; |
16 | #endif | ||
15 | }; | 17 | }; |
16 | 18 | ||
17 | /* | 19 | /* |
@@ -22,22 +24,11 @@ struct scatterlist { | |||
22 | * is 0. | 24 | * is 0. |
23 | */ | 25 | */ |
24 | #define sg_dma_address(sg) ((sg)->dma_address) | 26 | #define sg_dma_address(sg) ((sg)->dma_address) |
25 | #ifndef sg_dma_len | 27 | |
26 | /* | 28 | #ifdef CONFIG_NEED_SG_DMA_LENGTH |
27 | * Normally, you have an iommu on 64 bit machines, but not on 32 bit | ||
28 | * machines. Architectures that are differnt should override this. | ||
29 | */ | ||
30 | #if __BITS_PER_LONG == 64 | ||
31 | #define sg_dma_len(sg) ((sg)->dma_length) | 29 | #define sg_dma_len(sg) ((sg)->dma_length) |
32 | #else | 30 | #else |
33 | #define sg_dma_len(sg) ((sg)->length) | 31 | #define sg_dma_len(sg) ((sg)->length) |
34 | #endif /* 64 bit */ | ||
35 | #endif /* sg_dma_len */ | ||
36 | |||
37 | #ifndef ISA_DMA_THRESHOLD | ||
38 | #define ISA_DMA_THRESHOLD (~0UL) | ||
39 | #endif | 32 | #endif |
40 | 33 | ||
41 | #define ARCH_HAS_SG_CHAIN | ||
42 | |||
43 | #endif /* __ASM_GENERIC_SCATTERLIST_H */ | 34 | #endif /* __ASM_GENERIC_SCATTERLIST_H */ |
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index d083561337f2..b3bfabc258f3 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h | |||
@@ -23,4 +23,20 @@ extern char __ctors_start[], __ctors_end[]; | |||
23 | #define dereference_function_descriptor(p) (p) | 23 | #define dereference_function_descriptor(p) (p) |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | /* random extra sections (if any). Override | ||
27 | * in asm/sections.h */ | ||
28 | #ifndef arch_is_kernel_text | ||
29 | static inline int arch_is_kernel_text(unsigned long addr) | ||
30 | { | ||
31 | return 0; | ||
32 | } | ||
33 | #endif | ||
34 | |||
35 | #ifndef arch_is_kernel_data | ||
36 | static inline int arch_is_kernel_data(unsigned long addr) | ||
37 | { | ||
38 | return 0; | ||
39 | } | ||
40 | #endif | ||
41 | |||
26 | #endif /* _ASM_GENERIC_SECTIONS_H_ */ | 42 | #endif /* _ASM_GENERIC_SECTIONS_H_ */ |
diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h index c840719a8c59..942d30b5aab1 100644 --- a/include/asm-generic/siginfo.h +++ b/include/asm-generic/siginfo.h | |||
@@ -82,6 +82,7 @@ typedef struct siginfo { | |||
82 | #ifdef __ARCH_SI_TRAPNO | 82 | #ifdef __ARCH_SI_TRAPNO |
83 | int _trapno; /* TRAP # which caused the signal */ | 83 | int _trapno; /* TRAP # which caused the signal */ |
84 | #endif | 84 | #endif |
85 | short _addr_lsb; /* LSB of the reported address */ | ||
85 | } _sigfault; | 86 | } _sigfault; |
86 | 87 | ||
87 | /* SIGPOLL */ | 88 | /* SIGPOLL */ |
@@ -112,6 +113,7 @@ typedef struct siginfo { | |||
112 | #ifdef __ARCH_SI_TRAPNO | 113 | #ifdef __ARCH_SI_TRAPNO |
113 | #define si_trapno _sifields._sigfault._trapno | 114 | #define si_trapno _sifields._sigfault._trapno |
114 | #endif | 115 | #endif |
116 | #define si_addr_lsb _sifields._sigfault._addr_lsb | ||
115 | #define si_band _sifields._sigpoll._band | 117 | #define si_band _sifields._sigpoll._band |
116 | #define si_fd _sifields._sigpoll._fd | 118 | #define si_fd _sifields._sigpoll._fd |
117 | 119 | ||
@@ -192,7 +194,11 @@ typedef struct siginfo { | |||
192 | #define BUS_ADRALN (__SI_FAULT|1) /* invalid address alignment */ | 194 | #define BUS_ADRALN (__SI_FAULT|1) /* invalid address alignment */ |
193 | #define BUS_ADRERR (__SI_FAULT|2) /* non-existant physical address */ | 195 | #define BUS_ADRERR (__SI_FAULT|2) /* non-existant physical address */ |
194 | #define BUS_OBJERR (__SI_FAULT|3) /* object specific hardware error */ | 196 | #define BUS_OBJERR (__SI_FAULT|3) /* object specific hardware error */ |
195 | #define NSIGBUS 3 | 197 | /* hardware memory error consumed on a machine check: action required */ |
198 | #define BUS_MCEERR_AR (__SI_FAULT|4) | ||
199 | /* hardware memory error detected in process but not consumed: action optional*/ | ||
200 | #define BUS_MCEERR_AO (__SI_FAULT|5) | ||
201 | #define NSIGBUS 5 | ||
196 | 202 | ||
197 | /* | 203 | /* |
198 | * SIGTRAP si_codes | 204 | * SIGTRAP si_codes |
diff --git a/include/asm-generic/socket.h b/include/asm-generic/socket.h index 5d79e409241c..9a6115e7cf63 100644 --- a/include/asm-generic/socket.h +++ b/include/asm-generic/socket.h | |||
@@ -60,4 +60,8 @@ | |||
60 | #define SO_TIMESTAMPING 37 | 60 | #define SO_TIMESTAMPING 37 |
61 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | 61 | #define SCM_TIMESTAMPING SO_TIMESTAMPING |
62 | 62 | ||
63 | #define SO_PROTOCOL 38 | ||
64 | #define SO_DOMAIN 39 | ||
65 | |||
66 | #define SO_RXQ_OVFL 40 | ||
63 | #endif /* __ASM_GENERIC_SOCKET_H */ | 67 | #endif /* __ASM_GENERIC_SOCKET_H */ |
diff --git a/include/asm-generic/stat.h b/include/asm-generic/stat.h index 47e64170305d..bd8cad21998e 100644 --- a/include/asm-generic/stat.h +++ b/include/asm-generic/stat.h | |||
@@ -33,18 +33,18 @@ struct stat { | |||
33 | int st_blksize; /* Optimal block size for I/O. */ | 33 | int st_blksize; /* Optimal block size for I/O. */ |
34 | int __pad2; | 34 | int __pad2; |
35 | long st_blocks; /* Number 512-byte blocks allocated. */ | 35 | long st_blocks; /* Number 512-byte blocks allocated. */ |
36 | int st_atime; /* Time of last access. */ | 36 | long st_atime; /* Time of last access. */ |
37 | unsigned int st_atime_nsec; | 37 | unsigned long st_atime_nsec; |
38 | int st_mtime; /* Time of last modification. */ | 38 | long st_mtime; /* Time of last modification. */ |
39 | unsigned int st_mtime_nsec; | 39 | unsigned long st_mtime_nsec; |
40 | int st_ctime; /* Time of last status change. */ | 40 | long st_ctime; /* Time of last status change. */ |
41 | unsigned int st_ctime_nsec; | 41 | unsigned long st_ctime_nsec; |
42 | unsigned int __unused4; | 42 | unsigned int __unused4; |
43 | unsigned int __unused5; | 43 | unsigned int __unused5; |
44 | }; | 44 | }; |
45 | 45 | ||
46 | #if __BITS_PER_LONG != 64 | ||
47 | /* This matches struct stat64 in glibc2.1. Only used for 32 bit. */ | 46 | /* This matches struct stat64 in glibc2.1. Only used for 32 bit. */ |
47 | #if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64) | ||
48 | struct stat64 { | 48 | struct stat64 { |
49 | unsigned long long st_dev; /* Device. */ | 49 | unsigned long long st_dev; /* Device. */ |
50 | unsigned long long st_ino; /* File serial number. */ | 50 | unsigned long long st_ino; /* File serial number. */ |
diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h index 3b4fb3e52f0d..0fd28e028de1 100644 --- a/include/asm-generic/statfs.h +++ b/include/asm-generic/statfs.h | |||
@@ -33,7 +33,8 @@ struct statfs { | |||
33 | __kernel_fsid_t f_fsid; | 33 | __kernel_fsid_t f_fsid; |
34 | __statfs_word f_namelen; | 34 | __statfs_word f_namelen; |
35 | __statfs_word f_frsize; | 35 | __statfs_word f_frsize; |
36 | __statfs_word f_spare[5]; | 36 | __statfs_word f_flags; |
37 | __statfs_word f_spare[4]; | ||
37 | }; | 38 | }; |
38 | 39 | ||
39 | /* | 40 | /* |
@@ -55,7 +56,8 @@ struct statfs64 { | |||
55 | __kernel_fsid_t f_fsid; | 56 | __kernel_fsid_t f_fsid; |
56 | __statfs_word f_namelen; | 57 | __statfs_word f_namelen; |
57 | __statfs_word f_frsize; | 58 | __statfs_word f_frsize; |
58 | __statfs_word f_spare[5]; | 59 | __statfs_word f_flags; |
60 | __statfs_word f_spare[4]; | ||
59 | } ARCH_PACK_STATFS64; | 61 | } ARCH_PACK_STATFS64; |
60 | 62 | ||
61 | /* | 63 | /* |
@@ -77,7 +79,8 @@ struct compat_statfs64 { | |||
77 | __kernel_fsid_t f_fsid; | 79 | __kernel_fsid_t f_fsid; |
78 | __u32 f_namelen; | 80 | __u32 f_namelen; |
79 | __u32 f_frsize; | 81 | __u32 f_frsize; |
80 | __u32 f_spare[5]; | 82 | __u32 f_flags; |
83 | __u32 f_spare[4]; | ||
81 | } ARCH_PACK_COMPAT_STATFS64; | 84 | } ARCH_PACK_COMPAT_STATFS64; |
82 | 85 | ||
83 | #endif | 86 | #endif |
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h index ea8087b55ffc..5c122ae6bfa6 100644 --- a/include/asm-generic/syscall.h +++ b/include/asm-generic/syscall.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Access to user system call parameters and results | 2 | * Access to user system call parameters and results |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Red Hat, Inc. All rights reserved. | 4 | * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This copyrighted material is made available to anyone wishing to use, | 6 | * This copyrighted material is made available to anyone wishing to use, |
7 | * modify, copy, or redistribute it subject to the terms and conditions | 7 | * modify, copy, or redistribute it subject to the terms and conditions |
@@ -32,9 +32,13 @@ struct pt_regs; | |||
32 | * If @task is not executing a system call, i.e. it's blocked | 32 | * If @task is not executing a system call, i.e. it's blocked |
33 | * inside the kernel for a fault or signal, returns -1. | 33 | * inside the kernel for a fault or signal, returns -1. |
34 | * | 34 | * |
35 | * Note this returns int even on 64-bit machines. Only 32 bits of | ||
36 | * system call number can be meaningful. If the actual arch value | ||
37 | * is 64 bits, this truncates to 32 bits so 0xffffffff means -1. | ||
38 | * | ||
35 | * It's only valid to call this when @task is known to be blocked. | 39 | * It's only valid to call this when @task is known to be blocked. |
36 | */ | 40 | */ |
37 | long syscall_get_nr(struct task_struct *task, struct pt_regs *regs); | 41 | int syscall_get_nr(struct task_struct *task, struct pt_regs *regs); |
38 | 42 | ||
39 | /** | 43 | /** |
40 | * syscall_rollback - roll back registers after an aborted system call | 44 | * syscall_rollback - roll back registers after an aborted system call |
diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h index df84e3b04555..d89dec864d42 100644 --- a/include/asm-generic/syscalls.h +++ b/include/asm-generic/syscalls.h | |||
@@ -23,8 +23,10 @@ asmlinkage long sys_vfork(struct pt_regs *regs); | |||
23 | #endif | 23 | #endif |
24 | 24 | ||
25 | #ifndef sys_execve | 25 | #ifndef sys_execve |
26 | asmlinkage long sys_execve(char __user *filename, char __user * __user *argv, | 26 | asmlinkage long sys_execve(const char __user *filename, |
27 | char __user * __user *envp, struct pt_regs *regs); | 27 | const char __user *const __user *argv, |
28 | const char __user *const __user *envp, | ||
29 | struct pt_regs *regs); | ||
28 | #endif | 30 | #endif |
29 | 31 | ||
30 | #ifndef sys_mmap2 | 32 | #ifndef sys_mmap2 |
diff --git a/include/asm-generic/system.h b/include/asm-generic/system.h index efa403b5e121..4b0b9cbbfae5 100644 --- a/include/asm-generic/system.h +++ b/include/asm-generic/system.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/irqflags.h> | 21 | #include <linux/irqflags.h> |
22 | 22 | ||
23 | #include <asm/cmpxchg-local.h> | 23 | #include <asm/cmpxchg-local.h> |
24 | #include <asm/cmpxchg.h> | ||
24 | 25 | ||
25 | struct task_struct; | 26 | struct task_struct; |
26 | 27 | ||
@@ -136,25 +137,6 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | |||
136 | #define xchg(ptr, x) \ | 137 | #define xchg(ptr, x) \ |
137 | ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) | 138 | ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) |
138 | 139 | ||
139 | static inline unsigned long __cmpxchg(volatile unsigned long *m, | ||
140 | unsigned long old, unsigned long new) | ||
141 | { | ||
142 | unsigned long retval; | ||
143 | unsigned long flags; | ||
144 | |||
145 | local_irq_save(flags); | ||
146 | retval = *m; | ||
147 | if (retval == old) | ||
148 | *m = new; | ||
149 | local_irq_restore(flags); | ||
150 | return retval; | ||
151 | } | ||
152 | |||
153 | #define cmpxchg(ptr, o, n) \ | ||
154 | ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \ | ||
155 | (unsigned long)(o), \ | ||
156 | (unsigned long)(n))) | ||
157 | |||
158 | #endif /* !__ASSEMBLY__ */ | 140 | #endif /* !__ASSEMBLY__ */ |
159 | 141 | ||
160 | #endif /* __KERNEL__ */ | 142 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-generic/termbits.h b/include/asm-generic/termbits.h index 1c9773d48cb0..232b4781aef3 100644 --- a/include/asm-generic/termbits.h +++ b/include/asm-generic/termbits.h | |||
@@ -178,6 +178,7 @@ struct ktermios { | |||
178 | #define FLUSHO 0010000 | 178 | #define FLUSHO 0010000 |
179 | #define PENDIN 0040000 | 179 | #define PENDIN 0040000 |
180 | #define IEXTEN 0100000 | 180 | #define IEXTEN 0100000 |
181 | #define EXTPROC 0200000 | ||
181 | 182 | ||
182 | /* tcflow() and TCXONC use these */ | 183 | /* tcflow() and TCXONC use these */ |
183 | #define TCOOFF 0 | 184 | #define TCOOFF 0 |
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index f490e43a90b9..e43f9766259f 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h | |||
@@ -123,24 +123,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |||
123 | __tlb_remove_tlb_entry(tlb, ptep, address); \ | 123 | __tlb_remove_tlb_entry(tlb, ptep, address); \ |
124 | } while (0) | 124 | } while (0) |
125 | 125 | ||
126 | #define pte_free_tlb(tlb, ptep) \ | 126 | #define pte_free_tlb(tlb, ptep, address) \ |
127 | do { \ | 127 | do { \ |
128 | tlb->need_flush = 1; \ | 128 | tlb->need_flush = 1; \ |
129 | __pte_free_tlb(tlb, ptep); \ | 129 | __pte_free_tlb(tlb, ptep, address); \ |
130 | } while (0) | 130 | } while (0) |
131 | 131 | ||
132 | #ifndef __ARCH_HAS_4LEVEL_HACK | 132 | #ifndef __ARCH_HAS_4LEVEL_HACK |
133 | #define pud_free_tlb(tlb, pudp) \ | 133 | #define pud_free_tlb(tlb, pudp, address) \ |
134 | do { \ | 134 | do { \ |
135 | tlb->need_flush = 1; \ | 135 | tlb->need_flush = 1; \ |
136 | __pud_free_tlb(tlb, pudp); \ | 136 | __pud_free_tlb(tlb, pudp, address); \ |
137 | } while (0) | 137 | } while (0) |
138 | #endif | 138 | #endif |
139 | 139 | ||
140 | #define pmd_free_tlb(tlb, pmdp) \ | 140 | #define pmd_free_tlb(tlb, pmdp, address) \ |
141 | do { \ | 141 | do { \ |
142 | tlb->need_flush = 1; \ | 142 | tlb->need_flush = 1; \ |
143 | __pmd_free_tlb(tlb, pmdp); \ | 143 | __pmd_free_tlb(tlb, pmdp, address); \ |
144 | } while (0) | 144 | } while (0) |
145 | 145 | ||
146 | #define tlb_migrate_finish(mm) do {} while (0) | 146 | #define tlb_migrate_finish(mm) do {} while (0) |
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index 88bada2ebc4b..fc824e2828f3 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Copyright (C) 2002, IBM Corp. | 6 | * Copyright (C) 2002, IBM Corp. |
7 | * | 7 | * |
8 | * All rights reserved. | 8 | * All rights reserved. |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 11 | * it under the terms of the GNU General Public License as published by |
@@ -34,12 +34,19 @@ | |||
34 | #ifndef cpu_to_node | 34 | #ifndef cpu_to_node |
35 | #define cpu_to_node(cpu) ((void)(cpu),0) | 35 | #define cpu_to_node(cpu) ((void)(cpu),0) |
36 | #endif | 36 | #endif |
37 | #ifndef set_numa_node | ||
38 | #define set_numa_node(node) | ||
39 | #endif | ||
40 | #ifndef set_cpu_numa_node | ||
41 | #define set_cpu_numa_node(cpu, node) | ||
42 | #endif | ||
43 | #ifndef cpu_to_mem | ||
44 | #define cpu_to_mem(cpu) ((void)(cpu),0) | ||
45 | #endif | ||
46 | |||
37 | #ifndef parent_node | 47 | #ifndef parent_node |
38 | #define parent_node(node) ((void)(node),0) | 48 | #define parent_node(node) ((void)(node),0) |
39 | #endif | 49 | #endif |
40 | #ifndef node_to_cpumask | ||
41 | #define node_to_cpumask(node) ((void)node, cpu_online_map) | ||
42 | #endif | ||
43 | #ifndef cpumask_of_node | 50 | #ifndef cpumask_of_node |
44 | #define cpumask_of_node(node) ((void)node, cpu_online_mask) | 51 | #define cpumask_of_node(node) ((void)node, cpu_online_mask) |
45 | #endif | 52 | #endif |
@@ -55,18 +62,15 @@ | |||
55 | 62 | ||
56 | #endif /* CONFIG_NUMA */ | 63 | #endif /* CONFIG_NUMA */ |
57 | 64 | ||
58 | /* | 65 | #if !defined(CONFIG_NUMA) || !defined(CONFIG_HAVE_MEMORYLESS_NODES) |
59 | * returns pointer to cpumask for specified node | ||
60 | * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" | ||
61 | */ | ||
62 | #ifndef node_to_cpumask_ptr | ||
63 | |||
64 | #define node_to_cpumask_ptr(v, node) \ | ||
65 | cpumask_t _##v = node_to_cpumask(node); \ | ||
66 | const cpumask_t *v = &_##v | ||
67 | 66 | ||
68 | #define node_to_cpumask_ptr_next(v, node) \ | 67 | #ifndef set_numa_mem |
69 | _##v = node_to_cpumask(node) | 68 | #define set_numa_mem(node) |
70 | #endif | 69 | #endif |
70 | #ifndef set_cpu_numa_mem | ||
71 | #define set_cpu_numa_mem(cpu, node) | ||
72 | #endif | ||
73 | |||
74 | #endif /* !CONFIG_NUMA || !CONFIG_HAVE_MEMORYLESS_NODES */ | ||
71 | 75 | ||
72 | #endif /* _ASM_GENERIC_TOPOLOGY_H */ | 76 | #endif /* _ASM_GENERIC_TOPOLOGY_H */ |
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index 6d8cab22e294..b218b8513d04 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h | |||
@@ -163,7 +163,7 @@ static inline __must_check long __copy_to_user(void __user *to, | |||
163 | #define put_user(x, ptr) \ | 163 | #define put_user(x, ptr) \ |
164 | ({ \ | 164 | ({ \ |
165 | might_sleep(); \ | 165 | might_sleep(); \ |
166 | __access_ok(ptr, sizeof (*ptr)) ? \ | 166 | access_ok(VERIFY_WRITE, ptr, sizeof(*ptr)) ? \ |
167 | __put_user(x, ptr) : \ | 167 | __put_user(x, ptr) : \ |
168 | -EFAULT; \ | 168 | -EFAULT; \ |
169 | }) | 169 | }) |
@@ -219,7 +219,7 @@ extern int __put_user_bad(void) __attribute__((noreturn)); | |||
219 | #define get_user(x, ptr) \ | 219 | #define get_user(x, ptr) \ |
220 | ({ \ | 220 | ({ \ |
221 | might_sleep(); \ | 221 | might_sleep(); \ |
222 | __access_ok(ptr, sizeof (*ptr)) ? \ | 222 | access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ? \ |
223 | __get_user(x, ptr) : \ | 223 | __get_user(x, ptr) : \ |
224 | -EFAULT; \ | 224 | -EFAULT; \ |
225 | }) | 225 | }) |
@@ -244,7 +244,7 @@ static inline long copy_from_user(void *to, | |||
244 | const void __user * from, unsigned long n) | 244 | const void __user * from, unsigned long n) |
245 | { | 245 | { |
246 | might_sleep(); | 246 | might_sleep(); |
247 | if (__access_ok(from, n)) | 247 | if (access_ok(VERIFY_READ, from, n)) |
248 | return __copy_from_user(to, from, n); | 248 | return __copy_from_user(to, from, n); |
249 | else | 249 | else |
250 | return n; | 250 | return n; |
@@ -254,7 +254,7 @@ static inline long copy_to_user(void __user *to, | |||
254 | const void *from, unsigned long n) | 254 | const void *from, unsigned long n) |
255 | { | 255 | { |
256 | might_sleep(); | 256 | might_sleep(); |
257 | if (__access_ok(to, n)) | 257 | if (access_ok(VERIFY_WRITE, to, n)) |
258 | return __copy_to_user(to, from, n); | 258 | return __copy_to_user(to, from, n); |
259 | else | 259 | else |
260 | return n; | 260 | return n; |
@@ -278,7 +278,7 @@ __strncpy_from_user(char *dst, const char __user *src, long count) | |||
278 | static inline long | 278 | static inline long |
279 | strncpy_from_user(char *dst, const char __user *src, long count) | 279 | strncpy_from_user(char *dst, const char __user *src, long count) |
280 | { | 280 | { |
281 | if (!__access_ok(src, 1)) | 281 | if (!access_ok(VERIFY_READ, src, 1)) |
282 | return -EFAULT; | 282 | return -EFAULT; |
283 | return __strncpy_from_user(dst, src, count); | 283 | return __strncpy_from_user(dst, src, count); |
284 | } | 284 | } |
@@ -291,6 +291,8 @@ strncpy_from_user(char *dst, const char __user *src, long count) | |||
291 | #ifndef strnlen_user | 291 | #ifndef strnlen_user |
292 | static inline long strnlen_user(const char __user *src, long n) | 292 | static inline long strnlen_user(const char __user *src, long n) |
293 | { | 293 | { |
294 | if (!access_ok(VERIFY_READ, src, 1)) | ||
295 | return 0; | ||
294 | return strlen((void * __force)src) + 1; | 296 | return strlen((void * __force)src) + 1; |
295 | } | 297 | } |
296 | #endif | 298 | #endif |
@@ -316,7 +318,7 @@ static inline __must_check unsigned long | |||
316 | clear_user(void __user *to, unsigned long n) | 318 | clear_user(void __user *to, unsigned long n) |
317 | { | 319 | { |
318 | might_sleep(); | 320 | might_sleep(); |
319 | if (!__access_ok(to, n)) | 321 | if (!access_ok(VERIFY_WRITE, to, n)) |
320 | return n; | 322 | return n; |
321 | 323 | ||
322 | return __clear_user(to, n); | 324 | return __clear_user(to, n); |
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h index 5b34b6233d6d..b969770196c2 100644 --- a/include/asm-generic/unistd.h +++ b/include/asm-generic/unistd.h | |||
@@ -18,7 +18,7 @@ | |||
18 | #define __SYSCALL(x, y) | 18 | #define __SYSCALL(x, y) |
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | #if __BITS_PER_LONG == 32 | 21 | #if __BITS_PER_LONG == 32 || defined(__SYSCALL_COMPAT) |
22 | #define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _32) | 22 | #define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _32) |
23 | #else | 23 | #else |
24 | #define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64) | 24 | #define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64) |
@@ -241,8 +241,13 @@ __SYSCALL(__NR_sync, sys_sync) | |||
241 | __SYSCALL(__NR_fsync, sys_fsync) | 241 | __SYSCALL(__NR_fsync, sys_fsync) |
242 | #define __NR_fdatasync 83 | 242 | #define __NR_fdatasync 83 |
243 | __SYSCALL(__NR_fdatasync, sys_fdatasync) | 243 | __SYSCALL(__NR_fdatasync, sys_fdatasync) |
244 | #ifdef __ARCH_WANT_SYNC_FILE_RANGE2 | ||
245 | #define __NR_sync_file_range2 84 | ||
246 | __SYSCALL(__NR_sync_file_range2, sys_sync_file_range2) | ||
247 | #else | ||
244 | #define __NR_sync_file_range 84 | 248 | #define __NR_sync_file_range 84 |
245 | __SYSCALL(__NR_sync_file_range, sys_sync_file_range) /* .long sys_sync_file_range2, */ | 249 | __SYSCALL(__NR_sync_file_range, sys_sync_file_range) |
250 | #endif | ||
246 | 251 | ||
247 | /* fs/timerfd.c */ | 252 | /* fs/timerfd.c */ |
248 | #define __NR_timerfd_create 85 | 253 | #define __NR_timerfd_create 85 |
@@ -580,7 +585,7 @@ __SYSCALL(__NR_execve, sys_execve) /* .long sys_execve_wrapper */ | |||
580 | __SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap) | 585 | __SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap) |
581 | /* mm/fadvise.c */ | 586 | /* mm/fadvise.c */ |
582 | #define __NR3264_fadvise64 223 | 587 | #define __NR3264_fadvise64 223 |
583 | __SC_3264(__NR3264_fadvise64, sys_fadvise64_64, sys_fadvise64) | 588 | __SYSCALL(__NR3264_fadvise64, sys_fadvise64_64) |
584 | 589 | ||
585 | /* mm/, CONFIG_MMU only */ | 590 | /* mm/, CONFIG_MMU only */ |
586 | #ifndef __ARCH_NOMMU | 591 | #ifndef __ARCH_NOMMU |
@@ -618,15 +623,39 @@ __SYSCALL(__NR_migrate_pages, sys_migrate_pages) | |||
618 | __SYSCALL(__NR_move_pages, sys_move_pages) | 623 | __SYSCALL(__NR_move_pages, sys_move_pages) |
619 | #endif | 624 | #endif |
620 | 625 | ||
626 | #define __NR_rt_tgsigqueueinfo 240 | ||
627 | __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) | ||
628 | #define __NR_perf_event_open 241 | ||
629 | __SYSCALL(__NR_perf_event_open, sys_perf_event_open) | ||
630 | #define __NR_accept4 242 | ||
631 | __SYSCALL(__NR_accept4, sys_accept4) | ||
632 | #define __NR_recvmmsg 243 | ||
633 | __SYSCALL(__NR_recvmmsg, sys_recvmmsg) | ||
634 | |||
635 | /* | ||
636 | * Architectures may provide up to 16 syscalls of their own | ||
637 | * starting with this value. | ||
638 | */ | ||
639 | #define __NR_arch_specific_syscall 244 | ||
640 | |||
641 | #define __NR_wait4 260 | ||
642 | __SYSCALL(__NR_wait4, sys_wait4) | ||
643 | #define __NR_prlimit64 261 | ||
644 | __SYSCALL(__NR_prlimit64, sys_prlimit64) | ||
645 | #define __NR_fanotify_init 262 | ||
646 | __SYSCALL(__NR_fanotify_init, sys_fanotify_init) | ||
647 | #define __NR_fanotify_mark 263 | ||
648 | __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark) | ||
649 | |||
621 | #undef __NR_syscalls | 650 | #undef __NR_syscalls |
622 | #define __NR_syscalls 240 | 651 | #define __NR_syscalls 264 |
623 | 652 | ||
624 | /* | 653 | /* |
625 | * All syscalls below here should go away really, | 654 | * All syscalls below here should go away really, |
626 | * these are provided for both review and as a porting | 655 | * these are provided for both review and as a porting |
627 | * help for the C library version. | 656 | * help for the C library version. |
628 | * | 657 | * |
629 | * Last chance: are any of these important enought to | 658 | * Last chance: are any of these important enough to |
630 | * enable by default? | 659 | * enable by default? |
631 | */ | 660 | */ |
632 | #ifdef __ARCH_WANT_SYSCALL_NO_AT | 661 | #ifdef __ARCH_WANT_SYSCALL_NO_AT |
@@ -685,7 +714,8 @@ __SYSCALL(__NR_signalfd, sys_signalfd) | |||
685 | #define __NR_syscalls (__NR_signalfd+1) | 714 | #define __NR_syscalls (__NR_signalfd+1) |
686 | #endif /* __ARCH_WANT_SYSCALL_NO_FLAGS */ | 715 | #endif /* __ARCH_WANT_SYSCALL_NO_FLAGS */ |
687 | 716 | ||
688 | #if __BITS_PER_LONG == 32 && defined(__ARCH_WANT_SYSCALL_OFF_T) | 717 | #if (__BITS_PER_LONG == 32 || defined(__SYSCALL_COMPAT)) && \ |
718 | defined(__ARCH_WANT_SYSCALL_OFF_T) | ||
689 | #define __NR_sendfile 1046 | 719 | #define __NR_sendfile 1046 |
690 | __SYSCALL(__NR_sendfile, sys_sendfile) | 720 | __SYSCALL(__NR_sendfile, sys_sendfile) |
691 | #define __NR_ftruncate 1047 | 721 | #define __NR_ftruncate 1047 |
@@ -731,6 +761,7 @@ __SYSCALL(__NR_getpgrp, sys_getpgrp) | |||
731 | __SYSCALL(__NR_pause, sys_pause) | 761 | __SYSCALL(__NR_pause, sys_pause) |
732 | #define __NR_time 1062 | 762 | #define __NR_time 1062 |
733 | #define __ARCH_WANT_SYS_TIME | 763 | #define __ARCH_WANT_SYS_TIME |
764 | #define __ARCH_WANT_COMPAT_SYS_TIME | ||
734 | __SYSCALL(__NR_time, sys_time) | 765 | __SYSCALL(__NR_time, sys_time) |
735 | #define __NR_utime 1063 | 766 | #define __NR_utime 1063 |
736 | #define __ARCH_WANT_SYS_UTIME | 767 | #define __ARCH_WANT_SYS_UTIME |
@@ -754,8 +785,8 @@ __SYSCALL(__NR_epoll_wait, sys_epoll_wait) | |||
754 | __SYSCALL(__NR_ustat, sys_ustat) | 785 | __SYSCALL(__NR_ustat, sys_ustat) |
755 | #define __NR_vfork 1071 | 786 | #define __NR_vfork 1071 |
756 | __SYSCALL(__NR_vfork, sys_vfork) | 787 | __SYSCALL(__NR_vfork, sys_vfork) |
757 | #define __NR_wait4 1072 | 788 | #define __NR_oldwait4 1072 |
758 | __SYSCALL(__NR_wait4, sys_wait4) | 789 | __SYSCALL(__NR_oldwait4, sys_wait4) |
759 | #define __NR_recv 1073 | 790 | #define __NR_recv 1073 |
760 | __SYSCALL(__NR_recv, sys_recv) | 791 | __SYSCALL(__NR_recv, sys_recv) |
761 | #define __NR_send 1074 | 792 | #define __NR_send 1074 |
@@ -792,12 +823,12 @@ __SYSCALL(__NR_fork, sys_ni_syscall) | |||
792 | * Here we map the numbers so that both versions | 823 | * Here we map the numbers so that both versions |
793 | * use the same syscall table layout. | 824 | * use the same syscall table layout. |
794 | */ | 825 | */ |
795 | #if __BITS_PER_LONG == 64 | 826 | #if __BITS_PER_LONG == 64 && !defined(__SYSCALL_COMPAT) |
796 | #define __NR_fcntl __NR3264_fcntl | 827 | #define __NR_fcntl __NR3264_fcntl |
797 | #define __NR_statfs __NR3264_statfs | 828 | #define __NR_statfs __NR3264_statfs |
798 | #define __NR_fstatfs __NR3264_fstatfs | 829 | #define __NR_fstatfs __NR3264_fstatfs |
799 | #define __NR_truncate __NR3264_truncate | 830 | #define __NR_truncate __NR3264_truncate |
800 | #define __NR_ftruncate __NR3264_truncate | 831 | #define __NR_ftruncate __NR3264_ftruncate |
801 | #define __NR_lseek __NR3264_lseek | 832 | #define __NR_lseek __NR3264_lseek |
802 | #define __NR_sendfile __NR3264_sendfile | 833 | #define __NR_sendfile __NR3264_sendfile |
803 | #define __NR_newfstatat __NR3264_fstatat | 834 | #define __NR_newfstatat __NR3264_fstatat |
@@ -813,7 +844,7 @@ __SYSCALL(__NR_fork, sys_ni_syscall) | |||
813 | #define __NR_statfs64 __NR3264_statfs | 844 | #define __NR_statfs64 __NR3264_statfs |
814 | #define __NR_fstatfs64 __NR3264_fstatfs | 845 | #define __NR_fstatfs64 __NR3264_fstatfs |
815 | #define __NR_truncate64 __NR3264_truncate | 846 | #define __NR_truncate64 __NR3264_truncate |
816 | #define __NR_ftruncate64 __NR3264_truncate | 847 | #define __NR_ftruncate64 __NR3264_ftruncate |
817 | #define __NR_llseek __NR3264_lseek | 848 | #define __NR_llseek __NR3264_lseek |
818 | #define __NR_sendfile64 __NR3264_sendfile | 849 | #define __NR_sendfile64 __NR3264_sendfile |
819 | #define __NR_fstatat64 __NR3264_fstatat | 850 | #define __NR_fstatat64 __NR3264_fstatat |
@@ -839,6 +870,7 @@ __SYSCALL(__NR_fork, sys_ni_syscall) | |||
839 | #endif | 870 | #endif |
840 | #define __ARCH_WANT_SYS_RT_SIGACTION | 871 | #define __ARCH_WANT_SYS_RT_SIGACTION |
841 | #define __ARCH_WANT_SYS_RT_SIGSUSPEND | 872 | #define __ARCH_WANT_SYS_RT_SIGSUSPEND |
873 | #define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND | ||
842 | 874 | ||
843 | /* | 875 | /* |
844 | * "Conditional" syscalls | 876 | * "Conditional" syscalls |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 92b73b6140ff..05cbad03c5ab 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -30,18 +30,13 @@ | |||
30 | * EXCEPTION_TABLE(...) | 30 | * EXCEPTION_TABLE(...) |
31 | * NOTES | 31 | * NOTES |
32 | * | 32 | * |
33 | * __bss_start = .; | 33 | * BSS_SECTION(0, 0, 0) |
34 | * BSS_SECTION(0, 0) | ||
35 | * __bss_stop = .; | ||
36 | * _end = .; | 34 | * _end = .; |
37 | * | 35 | * |
38 | * /DISCARD/ : { | ||
39 | * EXIT_TEXT | ||
40 | * EXIT_DATA | ||
41 | * EXIT_CALL | ||
42 | * } | ||
43 | * STABS_DEBUG | 36 | * STABS_DEBUG |
44 | * DWARF_DEBUG | 37 | * DWARF_DEBUG |
38 | * | ||
39 | * DISCARDS // must be the last | ||
45 | * } | 40 | * } |
46 | * | 41 | * |
47 | * [__init_begin, __init_end] is the init section that may be freed after init | 42 | * [__init_begin, __init_end] is the init section that may be freed after init |
@@ -57,13 +52,24 @@ | |||
57 | #define LOAD_OFFSET 0 | 52 | #define LOAD_OFFSET 0 |
58 | #endif | 53 | #endif |
59 | 54 | ||
60 | #ifndef VMLINUX_SYMBOL | 55 | #ifndef SYMBOL_PREFIX |
61 | #define VMLINUX_SYMBOL(_sym_) _sym_ | 56 | #define VMLINUX_SYMBOL(sym) sym |
57 | #else | ||
58 | #define PASTE2(x,y) x##y | ||
59 | #define PASTE(x,y) PASTE2(x,y) | ||
60 | #define VMLINUX_SYMBOL(sym) PASTE(SYMBOL_PREFIX, sym) | ||
62 | #endif | 61 | #endif |
63 | 62 | ||
64 | /* Align . to a 8 byte boundary equals to maximum function alignment. */ | 63 | /* Align . to a 8 byte boundary equals to maximum function alignment. */ |
65 | #define ALIGN_FUNCTION() . = ALIGN(8) | 64 | #define ALIGN_FUNCTION() . = ALIGN(8) |
66 | 65 | ||
66 | /* | ||
67 | * Align to a 32 byte boundary equal to the | ||
68 | * alignment gcc 4.5 uses for a struct | ||
69 | */ | ||
70 | #define STRUCT_ALIGNMENT 32 | ||
71 | #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) | ||
72 | |||
67 | /* The actual configuration determine if the init/exit sections | 73 | /* The actual configuration determine if the init/exit sections |
68 | * are handled as text/data or they can be discarded (which | 74 | * are handled as text/data or they can be discarded (which |
69 | * often happens at runtime) | 75 | * often happens at runtime) |
@@ -93,7 +99,8 @@ | |||
93 | #endif | 99 | #endif |
94 | 100 | ||
95 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 101 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
96 | #define MCOUNT_REC() VMLINUX_SYMBOL(__start_mcount_loc) = .; \ | 102 | #define MCOUNT_REC() . = ALIGN(8); \ |
103 | VMLINUX_SYMBOL(__start_mcount_loc) = .; \ | ||
97 | *(__mcount_loc) \ | 104 | *(__mcount_loc) \ |
98 | VMLINUX_SYMBOL(__stop_mcount_loc) = .; | 105 | VMLINUX_SYMBOL(__stop_mcount_loc) = .; |
99 | #else | 106 | #else |
@@ -140,20 +147,24 @@ | |||
140 | #define TRACE_SYSCALLS() | 147 | #define TRACE_SYSCALLS() |
141 | #endif | 148 | #endif |
142 | 149 | ||
150 | |||
151 | #define KERNEL_DTB() \ | ||
152 | STRUCT_ALIGN(); \ | ||
153 | VMLINUX_SYMBOL(__dtb_start) = .; \ | ||
154 | *(.dtb.init.rodata) \ | ||
155 | VMLINUX_SYMBOL(__dtb_end) = .; | ||
156 | |||
143 | /* .data section */ | 157 | /* .data section */ |
144 | #define DATA_DATA \ | 158 | #define DATA_DATA \ |
145 | *(.data) \ | 159 | *(.data) \ |
146 | *(.ref.data) \ | 160 | *(.ref.data) \ |
161 | *(.data..shared_aligned) /* percpu related */ \ | ||
147 | DEV_KEEP(init.data) \ | 162 | DEV_KEEP(init.data) \ |
148 | DEV_KEEP(exit.data) \ | 163 | DEV_KEEP(exit.data) \ |
149 | CPU_KEEP(init.data) \ | 164 | CPU_KEEP(init.data) \ |
150 | CPU_KEEP(exit.data) \ | 165 | CPU_KEEP(exit.data) \ |
151 | MEM_KEEP(init.data) \ | 166 | MEM_KEEP(init.data) \ |
152 | MEM_KEEP(exit.data) \ | 167 | MEM_KEEP(exit.data) \ |
153 | . = ALIGN(8); \ | ||
154 | VMLINUX_SYMBOL(__start___markers) = .; \ | ||
155 | *(__markers) \ | ||
156 | VMLINUX_SYMBOL(__stop___markers) = .; \ | ||
157 | . = ALIGN(32); \ | 168 | . = ALIGN(32); \ |
158 | VMLINUX_SYMBOL(__start___tracepoints) = .; \ | 169 | VMLINUX_SYMBOL(__start___tracepoints) = .; \ |
159 | *(__tracepoints) \ | 170 | *(__tracepoints) \ |
@@ -166,7 +177,11 @@ | |||
166 | LIKELY_PROFILE() \ | 177 | LIKELY_PROFILE() \ |
167 | BRANCH_PROFILE() \ | 178 | BRANCH_PROFILE() \ |
168 | TRACE_PRINTKS() \ | 179 | TRACE_PRINTKS() \ |
180 | \ | ||
181 | STRUCT_ALIGN(); \ | ||
169 | FTRACE_EVENTS() \ | 182 | FTRACE_EVENTS() \ |
183 | \ | ||
184 | STRUCT_ALIGN(); \ | ||
170 | TRACE_SYSCALLS() | 185 | TRACE_SYSCALLS() |
171 | 186 | ||
172 | /* | 187 | /* |
@@ -175,25 +190,25 @@ | |||
175 | #define NOSAVE_DATA \ | 190 | #define NOSAVE_DATA \ |
176 | . = ALIGN(PAGE_SIZE); \ | 191 | . = ALIGN(PAGE_SIZE); \ |
177 | VMLINUX_SYMBOL(__nosave_begin) = .; \ | 192 | VMLINUX_SYMBOL(__nosave_begin) = .; \ |
178 | *(.data.nosave) \ | 193 | *(.data..nosave) \ |
179 | . = ALIGN(PAGE_SIZE); \ | 194 | . = ALIGN(PAGE_SIZE); \ |
180 | VMLINUX_SYMBOL(__nosave_end) = .; | 195 | VMLINUX_SYMBOL(__nosave_end) = .; |
181 | 196 | ||
182 | #define PAGE_ALIGNED_DATA(page_align) \ | 197 | #define PAGE_ALIGNED_DATA(page_align) \ |
183 | . = ALIGN(page_align); \ | 198 | . = ALIGN(page_align); \ |
184 | *(.data.page_aligned) | 199 | *(.data..page_aligned) |
185 | 200 | ||
186 | #define READ_MOSTLY_DATA(align) \ | 201 | #define READ_MOSTLY_DATA(align) \ |
187 | . = ALIGN(align); \ | 202 | . = ALIGN(align); \ |
188 | *(.data.read_mostly) | 203 | *(.data..read_mostly) |
189 | 204 | ||
190 | #define CACHELINE_ALIGNED_DATA(align) \ | 205 | #define CACHELINE_ALIGNED_DATA(align) \ |
191 | . = ALIGN(align); \ | 206 | . = ALIGN(align); \ |
192 | *(.data.cacheline_aligned) | 207 | *(.data..cacheline_aligned) |
193 | 208 | ||
194 | #define INIT_TASK(align) \ | 209 | #define INIT_TASK_DATA(align) \ |
195 | . = ALIGN(align); \ | 210 | . = ALIGN(align); \ |
196 | *(.data.init_task) | 211 | *(.data..init_task) |
197 | 212 | ||
198 | /* | 213 | /* |
199 | * Read only Data | 214 | * Read only Data |
@@ -214,6 +229,8 @@ | |||
214 | \ | 229 | \ |
215 | BUG_TABLE \ | 230 | BUG_TABLE \ |
216 | \ | 231 | \ |
232 | JUMP_TABLE \ | ||
233 | \ | ||
217 | /* PCI quirks */ \ | 234 | /* PCI quirks */ \ |
218 | .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ | 235 | .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ |
219 | VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ | 236 | VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ |
@@ -247,10 +264,10 @@ | |||
247 | } \ | 264 | } \ |
248 | \ | 265 | \ |
249 | /* RapidIO route ops */ \ | 266 | /* RapidIO route ops */ \ |
250 | .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \ | 267 | .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \ |
251 | VMLINUX_SYMBOL(__start_rio_route_ops) = .; \ | 268 | VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \ |
252 | *(.rio_route_ops) \ | 269 | *(.rio_switch_ops) \ |
253 | VMLINUX_SYMBOL(__end_rio_route_ops) = .; \ | 270 | VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \ |
254 | } \ | 271 | } \ |
255 | \ | 272 | \ |
256 | TRACEDATA \ | 273 | TRACEDATA \ |
@@ -333,7 +350,6 @@ | |||
333 | /* __*init sections */ \ | 350 | /* __*init sections */ \ |
334 | __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ | 351 | __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ |
335 | *(.ref.rodata) \ | 352 | *(.ref.rodata) \ |
336 | MCOUNT_REC() \ | ||
337 | DEV_KEEP(init.rodata) \ | 353 | DEV_KEEP(init.rodata) \ |
338 | DEV_KEEP(exit.rodata) \ | 354 | DEV_KEEP(exit.rodata) \ |
339 | CPU_KEEP(init.rodata) \ | 355 | CPU_KEEP(init.rodata) \ |
@@ -434,14 +450,15 @@ | |||
434 | /* | 450 | /* |
435 | * Init task | 451 | * Init task |
436 | */ | 452 | */ |
437 | #define INIT_TASK_DATA(align) \ | 453 | #define INIT_TASK_DATA_SECTION(align) \ |
438 | . = ALIGN(align); \ | 454 | . = ALIGN(align); \ |
439 | .data.init_task : { \ | 455 | .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ |
440 | INIT_TASK \ | 456 | INIT_TASK_DATA(align) \ |
441 | } | 457 | } |
442 | 458 | ||
443 | #ifdef CONFIG_CONSTRUCTORS | 459 | #ifdef CONFIG_CONSTRUCTORS |
444 | #define KERNEL_CTORS() VMLINUX_SYMBOL(__ctors_start) = .; \ | 460 | #define KERNEL_CTORS() . = ALIGN(8); \ |
461 | VMLINUX_SYMBOL(__ctors_start) = .; \ | ||
445 | *(.ctors) \ | 462 | *(.ctors) \ |
446 | VMLINUX_SYMBOL(__ctors_end) = .; | 463 | VMLINUX_SYMBOL(__ctors_end) = .; |
447 | #else | 464 | #else |
@@ -456,9 +473,11 @@ | |||
456 | MEM_DISCARD(init.data) \ | 473 | MEM_DISCARD(init.data) \ |
457 | KERNEL_CTORS() \ | 474 | KERNEL_CTORS() \ |
458 | *(.init.rodata) \ | 475 | *(.init.rodata) \ |
476 | MCOUNT_REC() \ | ||
459 | DEV_DISCARD(init.rodata) \ | 477 | DEV_DISCARD(init.rodata) \ |
460 | CPU_DISCARD(init.rodata) \ | 478 | CPU_DISCARD(init.rodata) \ |
461 | MEM_DISCARD(init.rodata) | 479 | MEM_DISCARD(init.rodata) \ |
480 | KERNEL_DTB() | ||
462 | 481 | ||
463 | #define INIT_TEXT \ | 482 | #define INIT_TEXT \ |
464 | *(.init.text) \ | 483 | *(.init.text) \ |
@@ -488,7 +507,8 @@ | |||
488 | * bss (Block Started by Symbol) - uninitialized data | 507 | * bss (Block Started by Symbol) - uninitialized data |
489 | * zeroed during startup | 508 | * zeroed during startup |
490 | */ | 509 | */ |
491 | #define SBSS \ | 510 | #define SBSS(sbss_align) \ |
511 | . = ALIGN(sbss_align); \ | ||
492 | .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ | 512 | .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ |
493 | *(.sbss) \ | 513 | *(.sbss) \ |
494 | *(.scommon) \ | 514 | *(.scommon) \ |
@@ -497,12 +517,10 @@ | |||
497 | #define BSS(bss_align) \ | 517 | #define BSS(bss_align) \ |
498 | . = ALIGN(bss_align); \ | 518 | . = ALIGN(bss_align); \ |
499 | .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ | 519 | .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ |
500 | VMLINUX_SYMBOL(__bss_start) = .; \ | 520 | *(.bss..page_aligned) \ |
501 | *(.bss.page_aligned) \ | ||
502 | *(.dynbss) \ | 521 | *(.dynbss) \ |
503 | *(.bss) \ | 522 | *(.bss) \ |
504 | *(COMMON) \ | 523 | *(COMMON) \ |
505 | VMLINUX_SYMBOL(__bss_stop) = .; \ | ||
506 | } | 524 | } |
507 | 525 | ||
508 | /* | 526 | /* |
@@ -557,6 +575,14 @@ | |||
557 | #define BUG_TABLE | 575 | #define BUG_TABLE |
558 | #endif | 576 | #endif |
559 | 577 | ||
578 | #define JUMP_TABLE \ | ||
579 | . = ALIGN(8); \ | ||
580 | __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \ | ||
581 | VMLINUX_SYMBOL(__start___jump_table) = .; \ | ||
582 | *(__jump_table) \ | ||
583 | VMLINUX_SYMBOL(__stop___jump_table) = .; \ | ||
584 | } | ||
585 | |||
560 | #ifdef CONFIG_PM_TRACE | 586 | #ifdef CONFIG_PM_TRACE |
561 | #define TRACEDATA \ | 587 | #define TRACEDATA \ |
562 | . = ALIGN(4); \ | 588 | . = ALIGN(4); \ |
@@ -620,14 +646,33 @@ | |||
620 | 646 | ||
621 | #ifdef CONFIG_BLK_DEV_INITRD | 647 | #ifdef CONFIG_BLK_DEV_INITRD |
622 | #define INIT_RAM_FS \ | 648 | #define INIT_RAM_FS \ |
623 | . = ALIGN(PAGE_SIZE); \ | 649 | . = ALIGN(4); \ |
624 | VMLINUX_SYMBOL(__initramfs_start) = .; \ | 650 | VMLINUX_SYMBOL(__initramfs_start) = .; \ |
625 | *(.init.ramfs) \ | 651 | *(.init.ramfs) \ |
626 | VMLINUX_SYMBOL(__initramfs_end) = .; | 652 | . = ALIGN(8); \ |
653 | *(.init.ramfs.info) | ||
627 | #else | 654 | #else |
628 | #define INIT_RAM_FS | 655 | #define INIT_RAM_FS |
629 | #endif | 656 | #endif |
630 | 657 | ||
658 | /* | ||
659 | * Default discarded sections. | ||
660 | * | ||
661 | * Some archs want to discard exit text/data at runtime rather than | ||
662 | * link time due to cross-section references such as alt instructions, | ||
663 | * bug table, eh_frame, etc. DISCARDS must be the last of output | ||
664 | * section definitions so that such archs put those in earlier section | ||
665 | * definitions. | ||
666 | */ | ||
667 | #define DISCARDS \ | ||
668 | /DISCARD/ : { \ | ||
669 | EXIT_TEXT \ | ||
670 | EXIT_DATA \ | ||
671 | EXIT_CALL \ | ||
672 | *(.discard) \ | ||
673 | *(.discard.*) \ | ||
674 | } | ||
675 | |||
631 | /** | 676 | /** |
632 | * PERCPU_VADDR - define output section for percpu area | 677 | * PERCPU_VADDR - define output section for percpu area |
633 | * @vaddr: explicit base address (optional) | 678 | * @vaddr: explicit base address (optional) |
@@ -649,16 +694,18 @@ | |||
649 | */ | 694 | */ |
650 | #define PERCPU_VADDR(vaddr, phdr) \ | 695 | #define PERCPU_VADDR(vaddr, phdr) \ |
651 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ | 696 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ |
652 | .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ | 697 | .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ |
653 | - LOAD_OFFSET) { \ | 698 | - LOAD_OFFSET) { \ |
654 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | 699 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ |
655 | *(.data.percpu.first) \ | 700 | *(.data..percpu..first) \ |
656 | *(.data.percpu.page_aligned) \ | 701 | . = ALIGN(PAGE_SIZE); \ |
657 | *(.data.percpu) \ | 702 | *(.data..percpu..page_aligned) \ |
658 | *(.data.percpu.shared_aligned) \ | 703 | *(.data..percpu..readmostly) \ |
704 | *(.data..percpu) \ | ||
705 | *(.data..percpu..shared_aligned) \ | ||
659 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ | 706 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ |
660 | } phdr \ | 707 | } phdr \ |
661 | . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu); | 708 | . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); |
662 | 709 | ||
663 | /** | 710 | /** |
664 | * PERCPU - define output section for percpu area, simple version | 711 | * PERCPU - define output section for percpu area, simple version |
@@ -670,18 +717,20 @@ | |||
670 | * | 717 | * |
671 | * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except | 718 | * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except |
672 | * that __per_cpu_load is defined as a relative symbol against | 719 | * that __per_cpu_load is defined as a relative symbol against |
673 | * .data.percpu which is required for relocatable x86_32 | 720 | * .data..percpu which is required for relocatable x86_32 |
674 | * configuration. | 721 | * configuration. |
675 | */ | 722 | */ |
676 | #define PERCPU(align) \ | 723 | #define PERCPU(align) \ |
677 | . = ALIGN(align); \ | 724 | . = ALIGN(align); \ |
678 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ | 725 | .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ |
679 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ | 726 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ |
680 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | 727 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ |
681 | *(.data.percpu.first) \ | 728 | *(.data..percpu..first) \ |
682 | *(.data.percpu.page_aligned) \ | 729 | . = ALIGN(PAGE_SIZE); \ |
683 | *(.data.percpu) \ | 730 | *(.data..percpu..page_aligned) \ |
684 | *(.data.percpu.shared_aligned) \ | 731 | *(.data..percpu..readmostly) \ |
732 | *(.data..percpu) \ | ||
733 | *(.data..percpu..shared_aligned) \ | ||
685 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ | 734 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ |
686 | } | 735 | } |
687 | 736 | ||
@@ -704,16 +753,16 @@ | |||
704 | * matches the requirment of PAGE_ALIGNED_DATA. | 753 | * matches the requirment of PAGE_ALIGNED_DATA. |
705 | * | 754 | * |
706 | * use 0 as page_align if page_aligned data is not used */ | 755 | * use 0 as page_align if page_aligned data is not used */ |
707 | #define RW_DATA_SECTION(cacheline, nosave, pagealigned, inittask) \ | 756 | #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ |
708 | . = ALIGN(PAGE_SIZE); \ | 757 | . = ALIGN(PAGE_SIZE); \ |
709 | .data : AT(ADDR(.data) - LOAD_OFFSET) { \ | 758 | .data : AT(ADDR(.data) - LOAD_OFFSET) { \ |
710 | INIT_TASK(inittask) \ | 759 | INIT_TASK_DATA(inittask) \ |
760 | NOSAVE_DATA \ | ||
761 | PAGE_ALIGNED_DATA(pagealigned) \ | ||
711 | CACHELINE_ALIGNED_DATA(cacheline) \ | 762 | CACHELINE_ALIGNED_DATA(cacheline) \ |
712 | READ_MOSTLY_DATA(cacheline) \ | 763 | READ_MOSTLY_DATA(cacheline) \ |
713 | DATA_DATA \ | 764 | DATA_DATA \ |
714 | CONSTRUCTORS \ | 765 | CONSTRUCTORS \ |
715 | NOSAVE_DATA(nosave) \ | ||
716 | PAGE_ALIGNED_DATA(pagealigned) \ | ||
717 | } | 766 | } |
718 | 767 | ||
719 | #define INIT_TEXT_SECTION(inittext_align) \ | 768 | #define INIT_TEXT_SECTION(inittext_align) \ |
@@ -734,8 +783,10 @@ | |||
734 | INIT_RAM_FS \ | 783 | INIT_RAM_FS \ |
735 | } | 784 | } |
736 | 785 | ||
737 | #define BSS_SECTION(sbss_align, bss_align) \ | 786 | #define BSS_SECTION(sbss_align, bss_align, stop_align) \ |
738 | SBSS \ | 787 | . = ALIGN(sbss_align); \ |
788 | VMLINUX_SYMBOL(__bss_start) = .; \ | ||
789 | SBSS(sbss_align) \ | ||
739 | BSS(bss_align) \ | 790 | BSS(bss_align) \ |
740 | . = ALIGN(4); | 791 | . = ALIGN(stop_align); \ |
741 | 792 | VMLINUX_SYMBOL(__bss_stop) = .; | |