aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/Kbuild22
-rw-r--r--include/asm-generic/Kbuild.asm1
-rw-r--r--include/asm-generic/atomic-long.h258
-rw-r--r--include/asm-generic/atomic.h321
-rw-r--r--include/asm-generic/atomic64.h42
-rw-r--r--include/asm-generic/auxvec.h8
-rw-r--r--include/asm-generic/bitops.h24
-rw-r--r--include/asm-generic/bitops/atomic.h1
-rw-r--r--include/asm-generic/bitsperlong.h32
-rw-r--r--include/asm-generic/bugs.h10
-rw-r--r--include/asm-generic/cache.h12
-rw-r--r--include/asm-generic/cacheflush.h30
-rw-r--r--include/asm-generic/checksum.h79
-rw-r--r--include/asm-generic/current.h9
-rw-r--r--include/asm-generic/delay.h9
-rw-r--r--include/asm-generic/dma-mapping-common.h190
-rw-r--r--include/asm-generic/dma.h15
-rw-r--r--include/asm-generic/errno.h2
-rw-r--r--include/asm-generic/fb.h12
-rw-r--r--include/asm-generic/getorder.h24
-rw-r--r--include/asm-generic/hardirq.h21
-rw-r--r--include/asm-generic/hw_irq.h9
-rw-r--r--include/asm-generic/int-l64.h2
-rw-r--r--include/asm-generic/int-ll64.h2
-rw-r--r--include/asm-generic/io.h300
-rw-r--r--include/asm-generic/ioctls.h110
-rw-r--r--include/asm-generic/ipcbuf.h34
-rw-r--r--include/asm-generic/irq.h18
-rw-r--r--include/asm-generic/irqflags.h72
-rw-r--r--include/asm-generic/kmap_types.h35
-rw-r--r--include/asm-generic/linkage.h8
-rw-r--r--include/asm-generic/mman-common.h41
-rw-r--r--include/asm-generic/mman.h51
-rw-r--r--include/asm-generic/mmu.h15
-rw-r--r--include/asm-generic/mmu_context.h45
-rw-r--r--include/asm-generic/module.h22
-rw-r--r--include/asm-generic/msgbuf.h47
-rw-r--r--include/asm-generic/mutex.h9
-rw-r--r--include/asm-generic/page.h109
-rw-r--r--include/asm-generic/param.h24
-rw-r--r--include/asm-generic/parport.h23
-rw-r--r--include/asm-generic/pci.h21
-rw-r--r--include/asm-generic/pgalloc.h12
-rw-r--r--include/asm-generic/pgtable.h25
-rw-r--r--include/asm-generic/posix_types.h165
-rw-r--r--include/asm-generic/rtc.h2
-rw-r--r--include/asm-generic/scatterlist.h43
-rw-r--r--include/asm-generic/sections.h3
-rw-r--r--include/asm-generic/segment.h9
-rw-r--r--include/asm-generic/sembuf.h38
-rw-r--r--include/asm-generic/serial.h13
-rw-r--r--include/asm-generic/setup.h6
-rw-r--r--include/asm-generic/shmbuf.h59
-rw-r--r--include/asm-generic/shmparam.h6
-rw-r--r--include/asm-generic/signal-defs.h28
-rw-r--r--include/asm-generic/signal.h137
-rw-r--r--include/asm-generic/socket.h63
-rw-r--r--include/asm-generic/sockios.h13
-rw-r--r--include/asm-generic/spinlock.h11
-rw-r--r--include/asm-generic/stat.h72
-rw-r--r--include/asm-generic/string.h10
-rw-r--r--include/asm-generic/swab.h18
-rw-r--r--include/asm-generic/syscalls.h60
-rw-r--r--include/asm-generic/system.h161
-rw-r--r--include/asm-generic/termbits.h198
-rw-r--r--include/asm-generic/termios-base.h77
-rw-r--r--include/asm-generic/termios.h105
-rw-r--r--include/asm-generic/timex.h22
-rw-r--r--include/asm-generic/tlbflush.h18
-rw-r--r--include/asm-generic/types.h42
-rw-r--r--include/asm-generic/uaccess-unaligned.h26
-rw-r--r--include/asm-generic/uaccess.h335
-rw-r--r--include/asm-generic/ucontext.h12
-rw-r--r--include/asm-generic/unaligned.h30
-rw-r--r--include/asm-generic/unistd.h859
-rw-r--r--include/asm-generic/user.h8
-rw-r--r--include/asm-generic/vga.h24
-rw-r--r--include/asm-generic/vmlinux.lds.h252
78 files changed, 4731 insertions, 350 deletions
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 4c9932a2503f..eb62334cda29 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -1,11 +1,33 @@
1header-y += auxvec.h
2header-y += bitsperlong.h
1header-y += errno-base.h 3header-y += errno-base.h
2header-y += errno.h 4header-y += errno.h
3header-y += fcntl.h 5header-y += fcntl.h
4header-y += ioctl.h 6header-y += ioctl.h
7header-y += ioctls.h
8header-y += ipcbuf.h
9header-y += mman-common.h
5header-y += mman.h 10header-y += mman.h
11header-y += msgbuf.h
12header-y += param.h
6header-y += poll.h 13header-y += poll.h
14header-y += posix_types.h
15header-y += sembuf.h
16header-y += setup.h
17header-y += shmbuf.h
18header-y += shmparam.h
19header-y += signal-defs.h
7header-y += signal.h 20header-y += signal.h
21header-y += socket.h
22header-y += sockios.h
23header-y += stat.h
8header-y += statfs.h 24header-y += statfs.h
25header-y += swab.h
26header-y += termbits.h
27header-y += termios.h
28header-y += types.h
29header-y += ucontext.h
30header-y += unistd.h
9 31
10unifdef-y += int-l64.h 32unifdef-y += int-l64.h
11unifdef-y += int-ll64.h 33unifdef-y += int-ll64.h
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm
index 70d185534b9d..290910e4ede4 100644
--- a/include/asm-generic/Kbuild.asm
+++ b/include/asm-generic/Kbuild.asm
@@ -9,6 +9,7 @@ unifdef-y += a.out.h
9endif 9endif
10unifdef-y += auxvec.h 10unifdef-y += auxvec.h
11unifdef-y += byteorder.h 11unifdef-y += byteorder.h
12unifdef-y += bitsperlong.h
12unifdef-y += errno.h 13unifdef-y += errno.h
13unifdef-y += fcntl.h 14unifdef-y += fcntl.h
14unifdef-y += ioctl.h 15unifdef-y += ioctl.h
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
new file mode 100644
index 000000000000..b7babf0206b8
--- /dev/null
+++ b/include/asm-generic/atomic-long.h
@@ -0,0 +1,258 @@
1#ifndef _ASM_GENERIC_ATOMIC_LONG_H
2#define _ASM_GENERIC_ATOMIC_LONG_H
3/*
4 * Copyright (C) 2005 Silicon Graphics, Inc.
5 * Christoph Lameter
6 *
7 * Allows to provide arch independent atomic definitions without the need to
8 * edit all arch specific atomic.h files.
9 */
10
11#include <asm/types.h>
12
13/*
14 * Suppport for atomic_long_t
15 *
16 * Casts for parameters are avoided for existing atomic functions in order to
17 * avoid issues with cast-as-lval under gcc 4.x and other limitations that the
18 * macros of a platform may have.
19 */
20
21#if BITS_PER_LONG == 64
22
23typedef atomic64_t atomic_long_t;
24
25#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
26
27static inline long atomic_long_read(atomic_long_t *l)
28{
29 atomic64_t *v = (atomic64_t *)l;
30
31 return (long)atomic64_read(v);
32}
33
34static inline void atomic_long_set(atomic_long_t *l, long i)
35{
36 atomic64_t *v = (atomic64_t *)l;
37
38 atomic64_set(v, i);
39}
40
41static inline void atomic_long_inc(atomic_long_t *l)
42{
43 atomic64_t *v = (atomic64_t *)l;
44
45 atomic64_inc(v);
46}
47
48static inline void atomic_long_dec(atomic_long_t *l)
49{
50 atomic64_t *v = (atomic64_t *)l;
51
52 atomic64_dec(v);
53}
54
55static inline void atomic_long_add(long i, atomic_long_t *l)
56{
57 atomic64_t *v = (atomic64_t *)l;
58
59 atomic64_add(i, v);
60}
61
62static inline void atomic_long_sub(long i, atomic_long_t *l)
63{
64 atomic64_t *v = (atomic64_t *)l;
65
66 atomic64_sub(i, v);
67}
68
69static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
70{
71 atomic64_t *v = (atomic64_t *)l;
72
73 return atomic64_sub_and_test(i, v);
74}
75
76static inline int atomic_long_dec_and_test(atomic_long_t *l)
77{
78 atomic64_t *v = (atomic64_t *)l;
79
80 return atomic64_dec_and_test(v);
81}
82
83static inline int atomic_long_inc_and_test(atomic_long_t *l)
84{
85 atomic64_t *v = (atomic64_t *)l;
86
87 return atomic64_inc_and_test(v);
88}
89
90static inline int atomic_long_add_negative(long i, atomic_long_t *l)
91{
92 atomic64_t *v = (atomic64_t *)l;
93
94 return atomic64_add_negative(i, v);
95}
96
97static inline long atomic_long_add_return(long i, atomic_long_t *l)
98{
99 atomic64_t *v = (atomic64_t *)l;
100
101 return (long)atomic64_add_return(i, v);
102}
103
104static inline long atomic_long_sub_return(long i, atomic_long_t *l)
105{
106 atomic64_t *v = (atomic64_t *)l;
107
108 return (long)atomic64_sub_return(i, v);
109}
110
111static inline long atomic_long_inc_return(atomic_long_t *l)
112{
113 atomic64_t *v = (atomic64_t *)l;
114
115 return (long)atomic64_inc_return(v);
116}
117
118static inline long atomic_long_dec_return(atomic_long_t *l)
119{
120 atomic64_t *v = (atomic64_t *)l;
121
122 return (long)atomic64_dec_return(v);
123}
124
125static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
126{
127 atomic64_t *v = (atomic64_t *)l;
128
129 return (long)atomic64_add_unless(v, a, u);
130}
131
132#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
133
134#define atomic_long_cmpxchg(l, old, new) \
135 (atomic64_cmpxchg((atomic64_t *)(l), (old), (new)))
136#define atomic_long_xchg(v, new) \
137 (atomic64_xchg((atomic64_t *)(v), (new)))
138
139#else /* BITS_PER_LONG == 64 */
140
141typedef atomic_t atomic_long_t;
142
143#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
144static inline long atomic_long_read(atomic_long_t *l)
145{
146 atomic_t *v = (atomic_t *)l;
147
148 return (long)atomic_read(v);
149}
150
151static inline void atomic_long_set(atomic_long_t *l, long i)
152{
153 atomic_t *v = (atomic_t *)l;
154
155 atomic_set(v, i);
156}
157
158static inline void atomic_long_inc(atomic_long_t *l)
159{
160 atomic_t *v = (atomic_t *)l;
161
162 atomic_inc(v);
163}
164
165static inline void atomic_long_dec(atomic_long_t *l)
166{
167 atomic_t *v = (atomic_t *)l;
168
169 atomic_dec(v);
170}
171
172static inline void atomic_long_add(long i, atomic_long_t *l)
173{
174 atomic_t *v = (atomic_t *)l;
175
176 atomic_add(i, v);
177}
178
179static inline void atomic_long_sub(long i, atomic_long_t *l)
180{
181 atomic_t *v = (atomic_t *)l;
182
183 atomic_sub(i, v);
184}
185
186static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
187{
188 atomic_t *v = (atomic_t *)l;
189
190 return atomic_sub_and_test(i, v);
191}
192
193static inline int atomic_long_dec_and_test(atomic_long_t *l)
194{
195 atomic_t *v = (atomic_t *)l;
196
197 return atomic_dec_and_test(v);
198}
199
200static inline int atomic_long_inc_and_test(atomic_long_t *l)
201{
202 atomic_t *v = (atomic_t *)l;
203
204 return atomic_inc_and_test(v);
205}
206
207static inline int atomic_long_add_negative(long i, atomic_long_t *l)
208{
209 atomic_t *v = (atomic_t *)l;
210
211 return atomic_add_negative(i, v);
212}
213
214static inline long atomic_long_add_return(long i, atomic_long_t *l)
215{
216 atomic_t *v = (atomic_t *)l;
217
218 return (long)atomic_add_return(i, v);
219}
220
221static inline long atomic_long_sub_return(long i, atomic_long_t *l)
222{
223 atomic_t *v = (atomic_t *)l;
224
225 return (long)atomic_sub_return(i, v);
226}
227
228static inline long atomic_long_inc_return(atomic_long_t *l)
229{
230 atomic_t *v = (atomic_t *)l;
231
232 return (long)atomic_inc_return(v);
233}
234
235static inline long atomic_long_dec_return(atomic_long_t *l)
236{
237 atomic_t *v = (atomic_t *)l;
238
239 return (long)atomic_dec_return(v);
240}
241
242static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
243{
244 atomic_t *v = (atomic_t *)l;
245
246 return (long)atomic_add_unless(v, a, u);
247}
248
249#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
250
251#define atomic_long_cmpxchg(l, old, new) \
252 (atomic_cmpxchg((atomic_t *)(l), (old), (new)))
253#define atomic_long_xchg(v, new) \
254 (atomic_xchg((atomic_t *)(v), (new)))
255
256#endif /* BITS_PER_LONG == 64 */
257
258#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 3673a13b6703..c99c64dc5f3d 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -1,258 +1,165 @@
1#ifndef _ASM_GENERIC_ATOMIC_H
2#define _ASM_GENERIC_ATOMIC_H
3/* 1/*
4 * Copyright (C) 2005 Silicon Graphics, Inc. 2 * Generic C implementation of atomic counter operations
5 * Christoph Lameter 3 * Originally implemented for MN10300.
6 * 4 *
7 * Allows to provide arch independent atomic definitions without the need to 5 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
8 * edit all arch specific atomic.h files. 6 * Written by David Howells (dhowells@redhat.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public Licence
10 * as published by the Free Software Foundation; either version
11 * 2 of the Licence, or (at your option) any later version.
9 */ 12 */
13#ifndef __ASM_GENERIC_ATOMIC_H
14#define __ASM_GENERIC_ATOMIC_H
10 15
11#include <asm/types.h> 16#ifdef CONFIG_SMP
17#error not SMP safe
18#endif
12 19
13/* 20/*
14 * Suppport for atomic_long_t 21 * Atomic operations that C can't guarantee us. Useful for
15 * 22 * resource counting etc..
16 * Casts for parameters are avoided for existing atomic functions in order to
17 * avoid issues with cast-as-lval under gcc 4.x and other limitations that the
18 * macros of a platform may have.
19 */ 23 */
20 24
21#if BITS_PER_LONG == 64 25#define ATOMIC_INIT(i) { (i) }
22
23typedef atomic64_t atomic_long_t;
24
25#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
26 26
27static inline long atomic_long_read(atomic_long_t *l) 27#ifdef __KERNEL__
28{
29 atomic64_t *v = (atomic64_t *)l;
30
31 return (long)atomic64_read(v);
32}
33
34static inline void atomic_long_set(atomic_long_t *l, long i)
35{
36 atomic64_t *v = (atomic64_t *)l;
37
38 atomic64_set(v, i);
39}
40
41static inline void atomic_long_inc(atomic_long_t *l)
42{
43 atomic64_t *v = (atomic64_t *)l;
44
45 atomic64_inc(v);
46}
47
48static inline void atomic_long_dec(atomic_long_t *l)
49{
50 atomic64_t *v = (atomic64_t *)l;
51
52 atomic64_dec(v);
53}
54
55static inline void atomic_long_add(long i, atomic_long_t *l)
56{
57 atomic64_t *v = (atomic64_t *)l;
58
59 atomic64_add(i, v);
60}
61
62static inline void atomic_long_sub(long i, atomic_long_t *l)
63{
64 atomic64_t *v = (atomic64_t *)l;
65
66 atomic64_sub(i, v);
67}
68
69static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
70{
71 atomic64_t *v = (atomic64_t *)l;
72
73 return atomic64_sub_and_test(i, v);
74}
75
76static inline int atomic_long_dec_and_test(atomic_long_t *l)
77{
78 atomic64_t *v = (atomic64_t *)l;
79
80 return atomic64_dec_and_test(v);
81}
82
83static inline int atomic_long_inc_and_test(atomic_long_t *l)
84{
85 atomic64_t *v = (atomic64_t *)l;
86
87 return atomic64_inc_and_test(v);
88}
89
90static inline int atomic_long_add_negative(long i, atomic_long_t *l)
91{
92 atomic64_t *v = (atomic64_t *)l;
93
94 return atomic64_add_negative(i, v);
95}
96
97static inline long atomic_long_add_return(long i, atomic_long_t *l)
98{
99 atomic64_t *v = (atomic64_t *)l;
100
101 return (long)atomic64_add_return(i, v);
102}
103
104static inline long atomic_long_sub_return(long i, atomic_long_t *l)
105{
106 atomic64_t *v = (atomic64_t *)l;
107 28
108 return (long)atomic64_sub_return(i, v); 29/**
109} 30 * atomic_read - read atomic variable
110 31 * @v: pointer of type atomic_t
111static inline long atomic_long_inc_return(atomic_long_t *l) 32 *
112{ 33 * Atomically reads the value of @v. Note that the guaranteed
113 atomic64_t *v = (atomic64_t *)l; 34 * useful range of an atomic_t is only 24 bits.
114 35 */
115 return (long)atomic64_inc_return(v); 36#define atomic_read(v) ((v)->counter)
116}
117
118static inline long atomic_long_dec_return(atomic_long_t *l)
119{
120 atomic64_t *v = (atomic64_t *)l;
121
122 return (long)atomic64_dec_return(v);
123}
124
125static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
126{
127 atomic64_t *v = (atomic64_t *)l;
128
129 return (long)atomic64_add_unless(v, a, u);
130}
131
132#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
133
134#define atomic_long_cmpxchg(l, old, new) \
135 (atomic64_cmpxchg((atomic64_t *)(l), (old), (new)))
136#define atomic_long_xchg(v, new) \
137 (atomic64_xchg((atomic64_t *)(l), (new)))
138
139#else /* BITS_PER_LONG == 64 */
140
141typedef atomic_t atomic_long_t;
142
143#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
144static inline long atomic_long_read(atomic_long_t *l)
145{
146 atomic_t *v = (atomic_t *)l;
147
148 return (long)atomic_read(v);
149}
150 37
151static inline void atomic_long_set(atomic_long_t *l, long i) 38/**
152{ 39 * atomic_set - set atomic variable
153 atomic_t *v = (atomic_t *)l; 40 * @v: pointer of type atomic_t
41 * @i: required value
42 *
43 * Atomically sets the value of @v to @i. Note that the guaranteed
44 * useful range of an atomic_t is only 24 bits.
45 */
46#define atomic_set(v, i) (((v)->counter) = (i))
154 47
155 atomic_set(v, i); 48#include <asm/system.h>
156}
157 49
158static inline void atomic_long_inc(atomic_long_t *l) 50/**
51 * atomic_add_return - add integer to atomic variable
52 * @i: integer value to add
53 * @v: pointer of type atomic_t
54 *
55 * Atomically adds @i to @v and returns the result
56 * Note that the guaranteed useful range of an atomic_t is only 24 bits.
57 */
58static inline int atomic_add_return(int i, atomic_t *v)
159{ 59{
160 atomic_t *v = (atomic_t *)l; 60 unsigned long flags;
161 61 int temp;
162 atomic_inc(v);
163}
164 62
165static inline void atomic_long_dec(atomic_long_t *l) 63 local_irq_save(flags);
166{ 64 temp = v->counter;
167 atomic_t *v = (atomic_t *)l; 65 temp += i;
66 v->counter = temp;
67 local_irq_restore(flags);
168 68
169 atomic_dec(v); 69 return temp;
170} 70}
171 71
172static inline void atomic_long_add(long i, atomic_long_t *l) 72/**
73 * atomic_sub_return - subtract integer from atomic variable
74 * @i: integer value to subtract
75 * @v: pointer of type atomic_t
76 *
77 * Atomically subtracts @i from @v and returns the result
78 * Note that the guaranteed useful range of an atomic_t is only 24 bits.
79 */
80static inline int atomic_sub_return(int i, atomic_t *v)
173{ 81{
174 atomic_t *v = (atomic_t *)l; 82 unsigned long flags;
175 83 int temp;
176 atomic_add(i, v);
177}
178 84
179static inline void atomic_long_sub(long i, atomic_long_t *l) 85 local_irq_save(flags);
180{ 86 temp = v->counter;
181 atomic_t *v = (atomic_t *)l; 87 temp -= i;
88 v->counter = temp;
89 local_irq_restore(flags);
182 90
183 atomic_sub(i, v); 91 return temp;
184} 92}
185 93
186static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) 94static inline int atomic_add_negative(int i, atomic_t *v)
187{ 95{
188 atomic_t *v = (atomic_t *)l; 96 return atomic_add_return(i, v) < 0;
189
190 return atomic_sub_and_test(i, v);
191} 97}
192 98
193static inline int atomic_long_dec_and_test(atomic_long_t *l) 99static inline void atomic_add(int i, atomic_t *v)
194{ 100{
195 atomic_t *v = (atomic_t *)l; 101 atomic_add_return(i, v);
196
197 return atomic_dec_and_test(v);
198} 102}
199 103
200static inline int atomic_long_inc_and_test(atomic_long_t *l) 104static inline void atomic_sub(int i, atomic_t *v)
201{ 105{
202 atomic_t *v = (atomic_t *)l; 106 atomic_sub_return(i, v);
203
204 return atomic_inc_and_test(v);
205} 107}
206 108
207static inline int atomic_long_add_negative(long i, atomic_long_t *l) 109static inline void atomic_inc(atomic_t *v)
208{ 110{
209 atomic_t *v = (atomic_t *)l; 111 atomic_add_return(1, v);
210
211 return atomic_add_negative(i, v);
212} 112}
213 113
214static inline long atomic_long_add_return(long i, atomic_long_t *l) 114static inline void atomic_dec(atomic_t *v)
215{ 115{
216 atomic_t *v = (atomic_t *)l; 116 atomic_sub_return(1, v);
217
218 return (long)atomic_add_return(i, v);
219} 117}
220 118
221static inline long atomic_long_sub_return(long i, atomic_long_t *l) 119#define atomic_dec_return(v) atomic_sub_return(1, (v))
222{ 120#define atomic_inc_return(v) atomic_add_return(1, (v))
223 atomic_t *v = (atomic_t *)l;
224 121
225 return (long)atomic_sub_return(i, v); 122#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
226} 123#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
124#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
227 125
228static inline long atomic_long_inc_return(atomic_long_t *l) 126#define atomic_add_unless(v, a, u) \
229{ 127({ \
230 atomic_t *v = (atomic_t *)l; 128 int c, old; \
129 c = atomic_read(v); \
130 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
131 c = old; \
132 c != (u); \
133})
231 134
232 return (long)atomic_inc_return(v); 135#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
233}
234 136
235static inline long atomic_long_dec_return(atomic_long_t *l) 137static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
236{ 138{
237 atomic_t *v = (atomic_t *)l; 139 unsigned long flags;
238 140
239 return (long)atomic_dec_return(v); 141 mask = ~mask;
142 local_irq_save(flags);
143 *addr &= mask;
144 local_irq_restore(flags);
240} 145}
241 146
242static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) 147#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
243{ 148#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
244 atomic_t *v = (atomic_t *)l;
245 149
246 return (long)atomic_add_unless(v, a, u); 150#define cmpxchg_local(ptr, o, n) \
247} 151 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
152 (unsigned long)(n), sizeof(*(ptr))))
248 153
249#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) 154#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
250 155
251#define atomic_long_cmpxchg(l, old, new) \ 156/* Assume that atomic operations are already serializing */
252 (atomic_cmpxchg((atomic_t *)(l), (old), (new))) 157#define smp_mb__before_atomic_dec() barrier()
253#define atomic_long_xchg(v, new) \ 158#define smp_mb__after_atomic_dec() barrier()
254 (atomic_xchg((atomic_t *)(v), (new))) 159#define smp_mb__before_atomic_inc() barrier()
160#define smp_mb__after_atomic_inc() barrier()
255 161
256#endif /* BITS_PER_LONG == 64 */ 162#include <asm-generic/atomic-long.h>
257 163
258#endif /* _ASM_GENERIC_ATOMIC_H */ 164#endif /* __KERNEL__ */
165#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
new file mode 100644
index 000000000000..b18ce4f9ee3d
--- /dev/null
+++ b/include/asm-generic/atomic64.h
@@ -0,0 +1,42 @@
1/*
2 * Generic implementation of 64-bit atomics using spinlocks,
3 * useful on processors that don't have 64-bit atomic instructions.
4 *
5 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#ifndef _ASM_GENERIC_ATOMIC64_H
13#define _ASM_GENERIC_ATOMIC64_H
14
15typedef struct {
16 long long counter;
17} atomic64_t;
18
19#define ATOMIC64_INIT(i) { (i) }
20
21extern long long atomic64_read(const atomic64_t *v);
22extern void atomic64_set(atomic64_t *v, long long i);
23extern void atomic64_add(long long a, atomic64_t *v);
24extern long long atomic64_add_return(long long a, atomic64_t *v);
25extern void atomic64_sub(long long a, atomic64_t *v);
26extern long long atomic64_sub_return(long long a, atomic64_t *v);
27extern long long atomic64_dec_if_positive(atomic64_t *v);
28extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
29extern long long atomic64_xchg(atomic64_t *v, long long new);
30extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
31
32#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
33#define atomic64_inc(v) atomic64_add(1LL, (v))
34#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
35#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
36#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
37#define atomic64_dec(v) atomic64_sub(1LL, (v))
38#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
39#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
40#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
41
42#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/include/asm-generic/auxvec.h b/include/asm-generic/auxvec.h
new file mode 100644
index 000000000000..b99573b0ad12
--- /dev/null
+++ b/include/asm-generic/auxvec.h
@@ -0,0 +1,8 @@
1#ifndef __ASM_GENERIC_AUXVEC_H
2#define __ASM_GENERIC_AUXVEC_H
3/*
4 * Not all architectures need their own auxvec.h, the most
5 * common definitions are already in linux/auxvec.h.
6 */
7
8#endif /* __ASM_GENERIC_AUXVEC_H */
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h
index c9f369c4bd7e..a54f4421a24d 100644
--- a/include/asm-generic/bitops.h
+++ b/include/asm-generic/bitops.h
@@ -1,19 +1,29 @@
1#ifndef _ASM_GENERIC_BITOPS_H_ 1#ifndef __ASM_GENERIC_BITOPS_H
2#define _ASM_GENERIC_BITOPS_H_ 2#define __ASM_GENERIC_BITOPS_H
3 3
4/* 4/*
5 * For the benefit of those who are trying to port Linux to another 5 * For the benefit of those who are trying to port Linux to another
6 * architecture, here are some C-language equivalents. You should 6 * architecture, here are some C-language equivalents. You should
7 * recode these in the native assembly language, if at all possible. 7 * recode these in the native assembly language, if at all possible.
8 * 8 *
9 * C language equivalents written by Theodore Ts'o, 9/26/92 9 * C language equivalents written by Theodore Ts'o, 9/26/92
10 */ 10 */
11 11
12#include <asm-generic/bitops/atomic.h> 12#include <linux/irqflags.h>
13#include <asm-generic/bitops/non-atomic.h> 13#include <linux/compiler.h>
14
15/*
16 * clear_bit may not imply a memory barrier
17 */
18#ifndef smp_mb__before_clear_bit
19#define smp_mb__before_clear_bit() smp_mb()
20#define smp_mb__after_clear_bit() smp_mb()
21#endif
22
14#include <asm-generic/bitops/__ffs.h> 23#include <asm-generic/bitops/__ffs.h>
15#include <asm-generic/bitops/ffz.h> 24#include <asm-generic/bitops/ffz.h>
16#include <asm-generic/bitops/fls.h> 25#include <asm-generic/bitops/fls.h>
26#include <asm-generic/bitops/__fls.h>
17#include <asm-generic/bitops/fls64.h> 27#include <asm-generic/bitops/fls64.h>
18#include <asm-generic/bitops/find.h> 28#include <asm-generic/bitops/find.h>
19 29
@@ -26,8 +36,10 @@
26#include <asm-generic/bitops/hweight.h> 36#include <asm-generic/bitops/hweight.h>
27#include <asm-generic/bitops/lock.h> 37#include <asm-generic/bitops/lock.h>
28 38
39#include <asm-generic/bitops/atomic.h>
40#include <asm-generic/bitops/non-atomic.h>
29#include <asm-generic/bitops/ext2-non-atomic.h> 41#include <asm-generic/bitops/ext2-non-atomic.h>
30#include <asm-generic/bitops/ext2-atomic.h> 42#include <asm-generic/bitops/ext2-atomic.h>
31#include <asm-generic/bitops/minix.h> 43#include <asm-generic/bitops/minix.h>
32 44
33#endif /* _ASM_GENERIC_BITOPS_H */ 45#endif /* __ASM_GENERIC_BITOPS_H */
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index 4657f3e410fc..c8946465e63a 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -2,6 +2,7 @@
2#define _ASM_GENERIC_BITOPS_ATOMIC_H_ 2#define _ASM_GENERIC_BITOPS_ATOMIC_H_
3 3
4#include <asm/types.h> 4#include <asm/types.h>
5#include <asm/system.h>
5 6
6#ifdef CONFIG_SMP 7#ifdef CONFIG_SMP
7#include <asm/spinlock.h> 8#include <asm/spinlock.h>
diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h
new file mode 100644
index 000000000000..4ae54e07de83
--- /dev/null
+++ b/include/asm-generic/bitsperlong.h
@@ -0,0 +1,32 @@
1#ifndef __ASM_GENERIC_BITS_PER_LONG
2#define __ASM_GENERIC_BITS_PER_LONG
3
4/*
5 * There seems to be no way of detecting this automatically from user
6 * space, so 64 bit architectures should override this in their
7 * bitsperlong.h. In particular, an architecture that supports
8 * both 32 and 64 bit user space must not rely on CONFIG_64BIT
9 * to decide it, but rather check a compiler provided macro.
10 */
11#ifndef __BITS_PER_LONG
12#define __BITS_PER_LONG 32
13#endif
14
15#ifdef __KERNEL__
16
17#ifdef CONFIG_64BIT
18#define BITS_PER_LONG 64
19#else
20#define BITS_PER_LONG 32
21#endif /* CONFIG_64BIT */
22
23/*
24 * FIXME: The check currently breaks x86-64 build, so it's
25 * temporarily disabled. Please fix x86-64 and reenable
26 */
27#if 0 && BITS_PER_LONG != __BITS_PER_LONG
28#error Inconsistent word size. Check asm/bitsperlong.h
29#endif
30
31#endif /* __KERNEL__ */
32#endif /* __ASM_GENERIC_BITS_PER_LONG */
diff --git a/include/asm-generic/bugs.h b/include/asm-generic/bugs.h
new file mode 100644
index 000000000000..6c4f62ea714d
--- /dev/null
+++ b/include/asm-generic/bugs.h
@@ -0,0 +1,10 @@
1#ifndef __ASM_GENERIC_BUGS_H
2#define __ASM_GENERIC_BUGS_H
3/*
4 * This file is included by 'init/main.c' to check for
5 * architecture-dependent bugs.
6 */
7
8static inline void check_bugs(void) { }
9
10#endif /* __ASM_GENERIC_BUGS_H */
diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
new file mode 100644
index 000000000000..1bfcfe5c2237
--- /dev/null
+++ b/include/asm-generic/cache.h
@@ -0,0 +1,12 @@
1#ifndef __ASM_GENERIC_CACHE_H
2#define __ASM_GENERIC_CACHE_H
3/*
4 * 32 bytes appears to be the most common cache line size,
5 * so make that the default here. Architectures with larger
6 * cache lines need to provide their own cache.h.
7 */
8
9#define L1_CACHE_SHIFT 5
10#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11
12#endif /* __ASM_GENERIC_CACHE_H */
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
new file mode 100644
index 000000000000..ba4ec39a1131
--- /dev/null
+++ b/include/asm-generic/cacheflush.h
@@ -0,0 +1,30 @@
1#ifndef __ASM_CACHEFLUSH_H
2#define __ASM_CACHEFLUSH_H
3
4/* Keep includes the same across arches. */
5#include <linux/mm.h>
6
7/*
8 * The cache doesn't need to be flushed when TLB entries change when
9 * the cache is mapped to physical memory, not virtual memory
10 */
11#define flush_cache_all() do { } while (0)
12#define flush_cache_mm(mm) do { } while (0)
13#define flush_cache_dup_mm(mm) do { } while (0)
14#define flush_cache_range(vma, start, end) do { } while (0)
15#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
16#define flush_dcache_page(page) do { } while (0)
17#define flush_dcache_mmap_lock(mapping) do { } while (0)
18#define flush_dcache_mmap_unlock(mapping) do { } while (0)
19#define flush_icache_range(start, end) do { } while (0)
20#define flush_icache_page(vma,pg) do { } while (0)
21#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
22#define flush_cache_vmap(start, end) do { } while (0)
23#define flush_cache_vunmap(start, end) do { } while (0)
24
25#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
26 memcpy(dst, src, len)
27#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
28 memcpy(dst, src, len)
29
30#endif /* __ASM_CACHEFLUSH_H */
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h
new file mode 100644
index 000000000000..4647c762d970
--- /dev/null
+++ b/include/asm-generic/checksum.h
@@ -0,0 +1,79 @@
1#ifndef __ASM_GENERIC_CHECKSUM_H
2#define __ASM_GENERIC_CHECKSUM_H
3
4/*
5 * computes the checksum of a memory block at buff, length len,
6 * and adds in "sum" (32-bit)
7 *
8 * returns a 32-bit number suitable for feeding into itself
9 * or csum_tcpudp_magic
10 *
11 * this function must be called with even lengths, except
12 * for the last fragment, which may be odd
13 *
14 * it's best to have buff aligned on a 32-bit boundary
15 */
16extern __wsum csum_partial(const void *buff, int len, __wsum sum);
17
18/*
19 * the same as csum_partial, but copies from src while it
20 * checksums
21 *
22 * here even more important to align src and dst on a 32-bit (or even
23 * better 64-bit) boundary
24 */
25extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum);
26
27/*
28 * the same as csum_partial_copy, but copies from user space.
29 *
30 * here even more important to align src and dst on a 32-bit (or even
31 * better 64-bit) boundary
32 */
33extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
34 int len, __wsum sum, int *csum_err);
35
36#define csum_partial_copy_nocheck(src, dst, len, sum) \
37 csum_partial_copy((src), (dst), (len), (sum))
38
39/*
40 * This is a version of ip_compute_csum() optimized for IP headers,
41 * which always checksum on 4 octet boundaries.
42 */
43extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
44
45/*
46 * Fold a partial checksum
47 */
48static inline __sum16 csum_fold(__wsum csum)
49{
50 u32 sum = (__force u32)csum;
51 sum = (sum & 0xffff) + (sum >> 16);
52 sum = (sum & 0xffff) + (sum >> 16);
53 return (__force __sum16)~sum;
54}
55
56#ifndef csum_tcpudp_nofold
57/*
58 * computes the checksum of the TCP/UDP pseudo-header
59 * returns a 16-bit checksum, already complemented
60 */
61extern __wsum
62csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
63 unsigned short proto, __wsum sum);
64#endif
65
66static inline __sum16
67csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
68 unsigned short proto, __wsum sum)
69{
70 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
71}
72
73/*
74 * this routine is used for miscellaneous IP-like checksums, mainly
75 * in icmp.c
76 */
77extern __sum16 ip_compute_csum(const void *buff, int len);
78
79#endif /* __ASM_GENERIC_CHECKSUM_H */
diff --git a/include/asm-generic/current.h b/include/asm-generic/current.h
new file mode 100644
index 000000000000..5e86f6ae7cab
--- /dev/null
+++ b/include/asm-generic/current.h
@@ -0,0 +1,9 @@
1#ifndef __ASM_GENERIC_CURRENT_H
2#define __ASM_GENERIC_CURRENT_H
3
4#include <linux/thread_info.h>
5
6#define get_current() (current_thread_info()->task)
7#define current get_current()
8
9#endif /* __ASM_GENERIC_CURRENT_H */
diff --git a/include/asm-generic/delay.h b/include/asm-generic/delay.h
new file mode 100644
index 000000000000..4586fec75ddb
--- /dev/null
+++ b/include/asm-generic/delay.h
@@ -0,0 +1,9 @@
1#ifndef __ASM_GENERIC_DELAY_H
2#define __ASM_GENERIC_DELAY_H
3
4extern void __udelay(unsigned long usecs);
5extern void __delay(unsigned long loops);
6
7#define udelay(n) __udelay(n)
8
9#endif /* __ASM_GENERIC_DELAY_H */
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
new file mode 100644
index 000000000000..5406a601185c
--- /dev/null
+++ b/include/asm-generic/dma-mapping-common.h
@@ -0,0 +1,190 @@
1#ifndef _ASM_GENERIC_DMA_MAPPING_H
2#define _ASM_GENERIC_DMA_MAPPING_H
3
4#include <linux/kmemcheck.h>
5#include <linux/scatterlist.h>
6#include <linux/dma-debug.h>
7#include <linux/dma-attrs.h>
8
9static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
10 size_t size,
11 enum dma_data_direction dir,
12 struct dma_attrs *attrs)
13{
14 struct dma_map_ops *ops = get_dma_ops(dev);
15 dma_addr_t addr;
16
17 kmemcheck_mark_initialized(ptr, size);
18 BUG_ON(!valid_dma_direction(dir));
19 addr = ops->map_page(dev, virt_to_page(ptr),
20 (unsigned long)ptr & ~PAGE_MASK, size,
21 dir, attrs);
22 debug_dma_map_page(dev, virt_to_page(ptr),
23 (unsigned long)ptr & ~PAGE_MASK, size,
24 dir, addr, true);
25 return addr;
26}
27
28static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
29 size_t size,
30 enum dma_data_direction dir,
31 struct dma_attrs *attrs)
32{
33 struct dma_map_ops *ops = get_dma_ops(dev);
34
35 BUG_ON(!valid_dma_direction(dir));
36 if (ops->unmap_page)
37 ops->unmap_page(dev, addr, size, dir, attrs);
38 debug_dma_unmap_page(dev, addr, size, dir, true);
39}
40
41static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
42 int nents, enum dma_data_direction dir,
43 struct dma_attrs *attrs)
44{
45 struct dma_map_ops *ops = get_dma_ops(dev);
46 int i, ents;
47 struct scatterlist *s;
48
49 for_each_sg(sg, s, nents, i)
50 kmemcheck_mark_initialized(sg_virt(s), s->length);
51 BUG_ON(!valid_dma_direction(dir));
52 ents = ops->map_sg(dev, sg, nents, dir, attrs);
53 debug_dma_map_sg(dev, sg, nents, ents, dir);
54
55 return ents;
56}
57
58static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
59 int nents, enum dma_data_direction dir,
60 struct dma_attrs *attrs)
61{
62 struct dma_map_ops *ops = get_dma_ops(dev);
63
64 BUG_ON(!valid_dma_direction(dir));
65 debug_dma_unmap_sg(dev, sg, nents, dir);
66 if (ops->unmap_sg)
67 ops->unmap_sg(dev, sg, nents, dir, attrs);
68}
69
70static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
71 size_t offset, size_t size,
72 enum dma_data_direction dir)
73{
74 struct dma_map_ops *ops = get_dma_ops(dev);
75 dma_addr_t addr;
76
77 kmemcheck_mark_initialized(page_address(page) + offset, size);
78 BUG_ON(!valid_dma_direction(dir));
79 addr = ops->map_page(dev, page, offset, size, dir, NULL);
80 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
81
82 return addr;
83}
84
85static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
86 size_t size, enum dma_data_direction dir)
87{
88 struct dma_map_ops *ops = get_dma_ops(dev);
89
90 BUG_ON(!valid_dma_direction(dir));
91 if (ops->unmap_page)
92 ops->unmap_page(dev, addr, size, dir, NULL);
93 debug_dma_unmap_page(dev, addr, size, dir, false);
94}
95
96static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
97 size_t size,
98 enum dma_data_direction dir)
99{
100 struct dma_map_ops *ops = get_dma_ops(dev);
101
102 BUG_ON(!valid_dma_direction(dir));
103 if (ops->sync_single_for_cpu)
104 ops->sync_single_for_cpu(dev, addr, size, dir);
105 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
106 flush_write_buffers();
107}
108
109static inline void dma_sync_single_for_device(struct device *dev,
110 dma_addr_t addr, size_t size,
111 enum dma_data_direction dir)
112{
113 struct dma_map_ops *ops = get_dma_ops(dev);
114
115 BUG_ON(!valid_dma_direction(dir));
116 if (ops->sync_single_for_device)
117 ops->sync_single_for_device(dev, addr, size, dir);
118 debug_dma_sync_single_for_device(dev, addr, size, dir);
119 flush_write_buffers();
120}
121
122static inline void dma_sync_single_range_for_cpu(struct device *dev,
123 dma_addr_t addr,
124 unsigned long offset,
125 size_t size,
126 enum dma_data_direction dir)
127{
128 struct dma_map_ops *ops = get_dma_ops(dev);
129
130 BUG_ON(!valid_dma_direction(dir));
131 if (ops->sync_single_range_for_cpu) {
132 ops->sync_single_range_for_cpu(dev, addr, offset, size, dir);
133 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
134
135 flush_write_buffers();
136 } else
137 dma_sync_single_for_cpu(dev, addr, size, dir);
138}
139
140static inline void dma_sync_single_range_for_device(struct device *dev,
141 dma_addr_t addr,
142 unsigned long offset,
143 size_t size,
144 enum dma_data_direction dir)
145{
146 struct dma_map_ops *ops = get_dma_ops(dev);
147
148 BUG_ON(!valid_dma_direction(dir));
149 if (ops->sync_single_range_for_device) {
150 ops->sync_single_range_for_device(dev, addr, offset, size, dir);
151 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
152
153 flush_write_buffers();
154 } else
155 dma_sync_single_for_device(dev, addr, size, dir);
156}
157
158static inline void
159dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
160 int nelems, enum dma_data_direction dir)
161{
162 struct dma_map_ops *ops = get_dma_ops(dev);
163
164 BUG_ON(!valid_dma_direction(dir));
165 if (ops->sync_sg_for_cpu)
166 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
167 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
168 flush_write_buffers();
169}
170
171static inline void
172dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
173 int nelems, enum dma_data_direction dir)
174{
175 struct dma_map_ops *ops = get_dma_ops(dev);
176
177 BUG_ON(!valid_dma_direction(dir));
178 if (ops->sync_sg_for_device)
179 ops->sync_sg_for_device(dev, sg, nelems, dir);
180 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
181
182 flush_write_buffers();
183}
184
185#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
186#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
187#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
188#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
189
190#endif
diff --git a/include/asm-generic/dma.h b/include/asm-generic/dma.h
new file mode 100644
index 000000000000..9dfc3a7f36d2
--- /dev/null
+++ b/include/asm-generic/dma.h
@@ -0,0 +1,15 @@
1#ifndef __ASM_GENERIC_DMA_H
2#define __ASM_GENERIC_DMA_H
3/*
4 * This file traditionally describes the i8237 PC style DMA controller.
5 * Most architectures don't have these any more and can get the minimal
6 * implementation from kernel/dma.c by not defining MAX_DMA_CHANNELS.
7 *
8 * Some code relies on seeing MAX_DMA_ADDRESS though.
9 */
10#define MAX_DMA_ADDRESS PAGE_OFFSET
11
12extern int request_dma(unsigned int dmanr, const char *device_id);
13extern void free_dma(unsigned int dmanr);
14
15#endif /* __ASM_GENERIC_DMA_H */
diff --git a/include/asm-generic/errno.h b/include/asm-generic/errno.h
index e8852c092fea..28cc03bf19e6 100644
--- a/include/asm-generic/errno.h
+++ b/include/asm-generic/errno.h
@@ -106,4 +106,6 @@
106#define EOWNERDEAD 130 /* Owner died */ 106#define EOWNERDEAD 130 /* Owner died */
107#define ENOTRECOVERABLE 131 /* State not recoverable */ 107#define ENOTRECOVERABLE 131 /* State not recoverable */
108 108
109#define ERFKILL 132 /* Operation not possible due to RF-kill */
110
109#endif 111#endif
diff --git a/include/asm-generic/fb.h b/include/asm-generic/fb.h
new file mode 100644
index 000000000000..fe8ca7fcea00
--- /dev/null
+++ b/include/asm-generic/fb.h
@@ -0,0 +1,12 @@
1#ifndef __ASM_GENERIC_FB_H_
2#define __ASM_GENERIC_FB_H_
3#include <linux/fb.h>
4
5#define fb_pgprotect(...) do {} while (0)
6
7static inline int fb_is_primary_device(struct fb_info *info)
8{
9 return 0;
10}
11
12#endif /* __ASM_GENERIC_FB_H_ */
diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
new file mode 100644
index 000000000000..67e7245dc9b3
--- /dev/null
+++ b/include/asm-generic/getorder.h
@@ -0,0 +1,24 @@
1#ifndef __ASM_GENERIC_GETORDER_H
2#define __ASM_GENERIC_GETORDER_H
3
4#ifndef __ASSEMBLY__
5
6#include <linux/compiler.h>
7
8/* Pure 2^n version of get_order */
9static inline __attribute_const__ int get_order(unsigned long size)
10{
11 int order;
12
13 size = (size - 1) >> (PAGE_SHIFT - 1);
14 order = -1;
15 do {
16 size >>= 1;
17 order++;
18 } while (size);
19 return order;
20}
21
22#endif /* __ASSEMBLY__ */
23
24#endif /* __ASM_GENERIC_GETORDER_H */
diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h
new file mode 100644
index 000000000000..23bb4dad4962
--- /dev/null
+++ b/include/asm-generic/hardirq.h
@@ -0,0 +1,21 @@
1#ifndef __ASM_GENERIC_HARDIRQ_H
2#define __ASM_GENERIC_HARDIRQ_H
3
4#include <linux/cache.h>
5#include <linux/threads.h>
6#include <linux/irq.h>
7
8typedef struct {
9 unsigned long __softirq_pending;
10} ____cacheline_aligned irq_cpustat_t;
11
12#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
13
14#ifndef ack_bad_irq
15static inline void ack_bad_irq(unsigned int irq)
16{
17 printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
18}
19#endif
20
21#endif /* __ASM_GENERIC_HARDIRQ_H */
diff --git a/include/asm-generic/hw_irq.h b/include/asm-generic/hw_irq.h
new file mode 100644
index 000000000000..89036d7b40e0
--- /dev/null
+++ b/include/asm-generic/hw_irq.h
@@ -0,0 +1,9 @@
1#ifndef __ASM_GENERIC_HW_IRQ_H
2#define __ASM_GENERIC_HW_IRQ_H
3/*
4 * hw_irq.h has internal declarations for the low-level interrupt
5 * controller, like the original i8259A.
6 * In general, this is not needed for new architectures.
7 */
8
9#endif /* __ASM_GENERIC_HW_IRQ_H */
diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
index 2af9b75d77db..1ca3efc976cc 100644
--- a/include/asm-generic/int-l64.h
+++ b/include/asm-generic/int-l64.h
@@ -8,6 +8,8 @@
8#ifndef _ASM_GENERIC_INT_L64_H 8#ifndef _ASM_GENERIC_INT_L64_H
9#define _ASM_GENERIC_INT_L64_H 9#define _ASM_GENERIC_INT_L64_H
10 10
11#include <asm/bitsperlong.h>
12
11#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
12/* 14/*
13 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the 15 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
index f9bc9ac29b36..f394147c0739 100644
--- a/include/asm-generic/int-ll64.h
+++ b/include/asm-generic/int-ll64.h
@@ -8,6 +8,8 @@
8#ifndef _ASM_GENERIC_INT_LL64_H 8#ifndef _ASM_GENERIC_INT_LL64_H
9#define _ASM_GENERIC_INT_LL64_H 9#define _ASM_GENERIC_INT_LL64_H
10 10
11#include <asm/bitsperlong.h>
12
11#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
12/* 14/*
13 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the 15 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
new file mode 100644
index 000000000000..bcee6365dca0
--- /dev/null
+++ b/include/asm-generic/io.h
@@ -0,0 +1,300 @@
1/* Generic I/O port emulation, based on MN10300 code
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef __ASM_GENERIC_IO_H
12#define __ASM_GENERIC_IO_H
13
14#include <asm/page.h> /* I/O is all done through memory accesses */
15#include <asm/cacheflush.h>
16#include <linux/types.h>
17
18#ifdef CONFIG_GENERIC_IOMAP
19#include <asm-generic/iomap.h>
20#endif
21
22#define mmiowb() do {} while (0)
23
24/*****************************************************************************/
25/*
26 * readX/writeX() are used to access memory mapped devices. On some
27 * architectures the memory mapped IO stuff needs to be accessed
28 * differently. On the simple architectures, we just read/write the
29 * memory location directly.
30 */
31static inline u8 __raw_readb(const volatile void __iomem *addr)
32{
33 return *(const volatile u8 __force *) addr;
34}
35
36static inline u16 __raw_readw(const volatile void __iomem *addr)
37{
38 return *(const volatile u16 __force *) addr;
39}
40
41static inline u32 __raw_readl(const volatile void __iomem *addr)
42{
43 return *(const volatile u32 __force *) addr;
44}
45
46#define readb __raw_readb
47#define readw(addr) __le16_to_cpu(__raw_readw(addr))
48#define readl(addr) __le32_to_cpu(__raw_readl(addr))
49
50static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
51{
52 *(volatile u8 __force *) addr = b;
53}
54
55static inline void __raw_writew(u16 b, volatile void __iomem *addr)
56{
57 *(volatile u16 __force *) addr = b;
58}
59
60static inline void __raw_writel(u32 b, volatile void __iomem *addr)
61{
62 *(volatile u32 __force *) addr = b;
63}
64
65#define writeb __raw_writeb
66#define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr)
67#define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr)
68
69#ifdef CONFIG_64BIT
70static inline u64 __raw_readq(const volatile void __iomem *addr)
71{
72 return *(const volatile u64 __force *) addr;
73}
74#define readq(addr) __le64_to_cpu(__raw_readq(addr))
75
76static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
77{
78 *(volatile u64 __force *) addr = b;
79}
80#define writeq(b,addr) __raw_writeq(__cpu_to_le64(b),addr)
81#endif
82
83/*****************************************************************************/
84/*
85 * traditional input/output functions
86 */
87
88static inline u8 inb(unsigned long addr)
89{
90 return readb((volatile void __iomem *) addr);
91}
92
93static inline u16 inw(unsigned long addr)
94{
95 return readw((volatile void __iomem *) addr);
96}
97
98static inline u32 inl(unsigned long addr)
99{
100 return readl((volatile void __iomem *) addr);
101}
102
103static inline void outb(u8 b, unsigned long addr)
104{
105 writeb(b, (volatile void __iomem *) addr);
106}
107
108static inline void outw(u16 b, unsigned long addr)
109{
110 writew(b, (volatile void __iomem *) addr);
111}
112
113static inline void outl(u32 b, unsigned long addr)
114{
115 writel(b, (volatile void __iomem *) addr);
116}
117
118#define inb_p(addr) inb(addr)
119#define inw_p(addr) inw(addr)
120#define inl_p(addr) inl(addr)
121#define outb_p(x, addr) outb((x), (addr))
122#define outw_p(x, addr) outw((x), (addr))
123#define outl_p(x, addr) outl((x), (addr))
124
125static inline void insb(unsigned long addr, void *buffer, int count)
126{
127 if (count) {
128 u8 *buf = buffer;
129 do {
130 u8 x = inb(addr);
131 *buf++ = x;
132 } while (--count);
133 }
134}
135
136static inline void insw(unsigned long addr, void *buffer, int count)
137{
138 if (count) {
139 u16 *buf = buffer;
140 do {
141 u16 x = inw(addr);
142 *buf++ = x;
143 } while (--count);
144 }
145}
146
147static inline void insl(unsigned long addr, void *buffer, int count)
148{
149 if (count) {
150 u32 *buf = buffer;
151 do {
152 u32 x = inl(addr);
153 *buf++ = x;
154 } while (--count);
155 }
156}
157
158static inline void outsb(unsigned long addr, const void *buffer, int count)
159{
160 if (count) {
161 const u8 *buf = buffer;
162 do {
163 outb(*buf++, addr);
164 } while (--count);
165 }
166}
167
168static inline void outsw(unsigned long addr, const void *buffer, int count)
169{
170 if (count) {
171 const u16 *buf = buffer;
172 do {
173 outw(*buf++, addr);
174 } while (--count);
175 }
176}
177
178static inline void outsl(unsigned long addr, const void *buffer, int count)
179{
180 if (count) {
181 const u32 *buf = buffer;
182 do {
183 outl(*buf++, addr);
184 } while (--count);
185 }
186}
187
188#ifndef CONFIG_GENERIC_IOMAP
189#define ioread8(addr) readb(addr)
190#define ioread16(addr) readw(addr)
191#define ioread32(addr) readl(addr)
192
193#define iowrite8(v, addr) writeb((v), (addr))
194#define iowrite16(v, addr) writew((v), (addr))
195#define iowrite32(v, addr) writel((v), (addr))
196
197#define ioread8_rep(p, dst, count) \
198 insb((unsigned long) (p), (dst), (count))
199#define ioread16_rep(p, dst, count) \
200 insw((unsigned long) (p), (dst), (count))
201#define ioread32_rep(p, dst, count) \
202 insl((unsigned long) (p), (dst), (count))
203
204#define iowrite8_rep(p, src, count) \
205 outsb((unsigned long) (p), (src), (count))
206#define iowrite16_rep(p, src, count) \
207 outsw((unsigned long) (p), (src), (count))
208#define iowrite32_rep(p, src, count) \
209 outsl((unsigned long) (p), (src), (count))
210#endif /* CONFIG_GENERIC_IOMAP */
211
212
213#define IO_SPACE_LIMIT 0xffffffff
214
215#ifdef __KERNEL__
216
217#include <linux/vmalloc.h>
218#define __io_virt(x) ((void __force *) (x))
219
220#ifndef CONFIG_GENERIC_IOMAP
221/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
222struct pci_dev;
223extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
224static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
225{
226}
227#endif /* CONFIG_GENERIC_IOMAP */
228
229/*
230 * Change virtual addresses to physical addresses and vv.
231 * These are pretty trivial
232 */
233static inline unsigned long virt_to_phys(volatile void *address)
234{
235 return __pa((unsigned long)address);
236}
237
238static inline void *phys_to_virt(unsigned long address)
239{
240 return __va(address);
241}
242
243/*
244 * Change "struct page" to physical address.
245 */
246static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
247{
248 return (void __iomem*) (unsigned long)offset;
249}
250
251#define __ioremap(offset, size, flags) ioremap(offset, size)
252
253#ifndef ioremap_nocache
254#define ioremap_nocache ioremap
255#endif
256
257#ifndef ioremap_wc
258#define ioremap_wc ioremap_nocache
259#endif
260
261static inline void iounmap(void *addr)
262{
263}
264
265#ifndef CONFIG_GENERIC_IOMAP
266static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
267{
268 return (void __iomem *) port;
269}
270
271static inline void ioport_unmap(void __iomem *p)
272{
273}
274#else /* CONFIG_GENERIC_IOMAP */
275extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
276extern void ioport_unmap(void __iomem *p);
277#endif /* CONFIG_GENERIC_IOMAP */
278
279#define xlate_dev_kmem_ptr(p) p
280#define xlate_dev_mem_ptr(p) ((void *) (p))
281
282#ifndef virt_to_bus
283static inline unsigned long virt_to_bus(volatile void *address)
284{
285 return ((unsigned long) address);
286}
287
288static inline void *bus_to_virt(unsigned long address)
289{
290 return (void *) address;
291}
292#endif
293
294#define memset_io(a, b, c) memset(__io_virt(a), (b), (c))
295#define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c))
296#define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c))
297
298#endif /* __KERNEL__ */
299
300#endif /* __ASM_GENERIC_IO_H */
diff --git a/include/asm-generic/ioctls.h b/include/asm-generic/ioctls.h
new file mode 100644
index 000000000000..a799e20a769e
--- /dev/null
+++ b/include/asm-generic/ioctls.h
@@ -0,0 +1,110 @@
1#ifndef __ASM_GENERIC_IOCTLS_H
2#define __ASM_GENERIC_IOCTLS_H
3
4#include <linux/ioctl.h>
5
6/*
7 * These are the most common definitions for tty ioctl numbers.
8 * Most of them do not use the recommended _IOC(), but there is
9 * probably some source code out there hardcoding the number,
10 * so we might as well use them for all new platforms.
11 *
12 * The architectures that use different values here typically
13 * try to be compatible with some Unix variants for the same
14 * architecture.
15 */
16
17/* 0x54 is just a magic number to make these relatively unique ('T') */
18
19#define TCGETS 0x5401
20#define TCSETS 0x5402
21#define TCSETSW 0x5403
22#define TCSETSF 0x5404
23#define TCGETA 0x5405
24#define TCSETA 0x5406
25#define TCSETAW 0x5407
26#define TCSETAF 0x5408
27#define TCSBRK 0x5409
28#define TCXONC 0x540A
29#define TCFLSH 0x540B
30#define TIOCEXCL 0x540C
31#define TIOCNXCL 0x540D
32#define TIOCSCTTY 0x540E
33#define TIOCGPGRP 0x540F
34#define TIOCSPGRP 0x5410
35#define TIOCOUTQ 0x5411
36#define TIOCSTI 0x5412
37#define TIOCGWINSZ 0x5413
38#define TIOCSWINSZ 0x5414
39#define TIOCMGET 0x5415
40#define TIOCMBIS 0x5416
41#define TIOCMBIC 0x5417
42#define TIOCMSET 0x5418
43#define TIOCGSOFTCAR 0x5419
44#define TIOCSSOFTCAR 0x541A
45#define FIONREAD 0x541B
46#define TIOCINQ FIONREAD
47#define TIOCLINUX 0x541C
48#define TIOCCONS 0x541D
49#define TIOCGSERIAL 0x541E
50#define TIOCSSERIAL 0x541F
51#define TIOCPKT 0x5420
52#define FIONBIO 0x5421
53#define TIOCNOTTY 0x5422
54#define TIOCSETD 0x5423
55#define TIOCGETD 0x5424
56#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
57#define TIOCSBRK 0x5427 /* BSD compatibility */
58#define TIOCCBRK 0x5428 /* BSD compatibility */
59#define TIOCGSID 0x5429 /* Return the session ID of FD */
60#define TCGETS2 _IOR('T', 0x2A, struct termios2)
61#define TCSETS2 _IOW('T', 0x2B, struct termios2)
62#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
63#define TCSETSF2 _IOW('T', 0x2D, struct termios2)
64#define TIOCGRS485 0x542E
65#define TIOCSRS485 0x542F
66#define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
67#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
68#define TCGETX 0x5432 /* SYS5 TCGETX compatibility */
69#define TCSETX 0x5433
70#define TCSETXF 0x5434
71#define TCSETXW 0x5435
72
73#define FIONCLEX 0x5450
74#define FIOCLEX 0x5451
75#define FIOASYNC 0x5452
76#define TIOCSERCONFIG 0x5453
77#define TIOCSERGWILD 0x5454
78#define TIOCSERSWILD 0x5455
79#define TIOCGLCKTRMIOS 0x5456
80#define TIOCSLCKTRMIOS 0x5457
81#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
82#define TIOCSERGETLSR 0x5459 /* Get line status register */
83#define TIOCSERGETMULTI 0x545A /* Get multiport config */
84#define TIOCSERSETMULTI 0x545B /* Set multiport config */
85
86#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
87#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
88
89/*
90 * some architectures define FIOQSIZE as 0x545E, which is used for
91 * TIOCGHAYESESP on others
92 */
93#ifndef FIOQSIZE
94# define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
95# define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
96# define FIOQSIZE 0x5460
97#endif
98
99/* Used for packet mode */
100#define TIOCPKT_DATA 0
101#define TIOCPKT_FLUSHREAD 1
102#define TIOCPKT_FLUSHWRITE 2
103#define TIOCPKT_STOP 4
104#define TIOCPKT_START 8
105#define TIOCPKT_NOSTOP 16
106#define TIOCPKT_DOSTOP 32
107
108#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
109
110#endif /* __ASM_GENERIC_IOCTLS_H */
diff --git a/include/asm-generic/ipcbuf.h b/include/asm-generic/ipcbuf.h
new file mode 100644
index 000000000000..76982b2a1b58
--- /dev/null
+++ b/include/asm-generic/ipcbuf.h
@@ -0,0 +1,34 @@
1#ifndef __ASM_GENERIC_IPCBUF_H
2#define __ASM_GENERIC_IPCBUF_H
3
4/*
5 * The generic ipc64_perm structure:
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * ipc64_perm was originally meant to be architecture specific, but
10 * everyone just ended up making identical copies without specific
11 * optimizations, so we may just as well all use the same one.
12 *
13 * Pad space is left for:
14 * - 32-bit mode_t on architectures that only had 16 bit
15 * - 32-bit seq
16 * - 2 miscellaneous 32-bit values
17 */
18
19struct ipc64_perm {
20 __kernel_key_t key;
21 __kernel_uid32_t uid;
22 __kernel_gid32_t gid;
23 __kernel_uid32_t cuid;
24 __kernel_gid32_t cgid;
25 __kernel_mode_t mode;
26 /* pad if mode_t is u16: */
27 unsigned char __pad1[4 - sizeof(__kernel_mode_t)];
28 unsigned short seq;
29 unsigned short __pad2;
30 unsigned long __unused1;
31 unsigned long __unused2;
32};
33
34#endif /* __ASM_GENERIC_IPCBUF_H */
diff --git a/include/asm-generic/irq.h b/include/asm-generic/irq.h
new file mode 100644
index 000000000000..b90ec0bc485f
--- /dev/null
+++ b/include/asm-generic/irq.h
@@ -0,0 +1,18 @@
1#ifndef __ASM_GENERIC_IRQ_H
2#define __ASM_GENERIC_IRQ_H
3
4/*
5 * NR_IRQS is the upper bound of how many interrupts can be handled
6 * in the platform. It is used to size the static irq_map array,
7 * so don't make it too big.
8 */
9#ifndef NR_IRQS
10#define NR_IRQS 64
11#endif
12
13static inline int irq_canonicalize(int irq)
14{
15 return irq;
16}
17
18#endif /* __ASM_GENERIC_IRQ_H */
diff --git a/include/asm-generic/irqflags.h b/include/asm-generic/irqflags.h
new file mode 100644
index 000000000000..9aebf618275a
--- /dev/null
+++ b/include/asm-generic/irqflags.h
@@ -0,0 +1,72 @@
1#ifndef __ASM_GENERIC_IRQFLAGS_H
2#define __ASM_GENERIC_IRQFLAGS_H
3
4/*
5 * All architectures should implement at least the first two functions,
6 * usually inline assembly will be the best way.
7 */
8#ifndef RAW_IRQ_DISABLED
9#define RAW_IRQ_DISABLED 0
10#define RAW_IRQ_ENABLED 1
11#endif
12
13/* read interrupt enabled status */
14#ifndef __raw_local_save_flags
15unsigned long __raw_local_save_flags(void);
16#endif
17
18/* set interrupt enabled status */
19#ifndef raw_local_irq_restore
20void raw_local_irq_restore(unsigned long flags);
21#endif
22
23/* get status and disable interrupts */
24#ifndef __raw_local_irq_save
25static inline unsigned long __raw_local_irq_save(void)
26{
27 unsigned long flags;
28 flags = __raw_local_save_flags();
29 raw_local_irq_restore(RAW_IRQ_DISABLED);
30 return flags;
31}
32#endif
33
34/* test flags */
35#ifndef raw_irqs_disabled_flags
36static inline int raw_irqs_disabled_flags(unsigned long flags)
37{
38 return flags == RAW_IRQ_DISABLED;
39}
40#endif
41
42/* unconditionally enable interrupts */
43#ifndef raw_local_irq_enable
44static inline void raw_local_irq_enable(void)
45{
46 raw_local_irq_restore(RAW_IRQ_ENABLED);
47}
48#endif
49
50/* unconditionally disable interrupts */
51#ifndef raw_local_irq_disable
52static inline void raw_local_irq_disable(void)
53{
54 raw_local_irq_restore(RAW_IRQ_DISABLED);
55}
56#endif
57
58/* test hardware interrupt enable bit */
59#ifndef raw_irqs_disabled
60static inline int raw_irqs_disabled(void)
61{
62 return raw_irqs_disabled_flags(__raw_local_save_flags());
63}
64#endif
65
66#define raw_local_save_flags(flags) \
67 do { (flags) = __raw_local_save_flags(); } while (0)
68
69#define raw_local_irq_save(flags) \
70 do { (flags) = __raw_local_irq_save(); } while (0)
71
72#endif /* __ASM_GENERIC_IRQFLAGS_H */
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
new file mode 100644
index 000000000000..eddbce0f9fb9
--- /dev/null
+++ b/include/asm-generic/kmap_types.h
@@ -0,0 +1,35 @@
1#ifndef _ASM_GENERIC_KMAP_TYPES_H
2#define _ASM_GENERIC_KMAP_TYPES_H
3
4#ifdef __WITH_KM_FENCE
5# define D(n) __KM_FENCE_##n ,
6#else
7# define D(n)
8#endif
9
10enum km_type {
11D(0) KM_BOUNCE_READ,
12D(1) KM_SKB_SUNRPC_DATA,
13D(2) KM_SKB_DATA_SOFTIRQ,
14D(3) KM_USER0,
15D(4) KM_USER1,
16D(5) KM_BIO_SRC_IRQ,
17D(6) KM_BIO_DST_IRQ,
18D(7) KM_PTE0,
19D(8) KM_PTE1,
20D(9) KM_IRQ0,
21D(10) KM_IRQ1,
22D(11) KM_SOFTIRQ0,
23D(12) KM_SOFTIRQ1,
24D(13) KM_SYNC_ICACHE,
25D(14) KM_SYNC_DCACHE,
26D(15) KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */
27D(16) KM_IRQ_PTE,
28D(17) KM_NMI,
29D(18) KM_NMI_PTE,
30D(19) KM_TYPE_NR
31};
32
33#undef D
34
35#endif
diff --git a/include/asm-generic/linkage.h b/include/asm-generic/linkage.h
new file mode 100644
index 000000000000..fef7a01e5415
--- /dev/null
+++ b/include/asm-generic/linkage.h
@@ -0,0 +1,8 @@
1#ifndef __ASM_GENERIC_LINKAGE_H
2#define __ASM_GENERIC_LINKAGE_H
3/*
4 * linux/linkage.h provides reasonable defaults.
5 * an architecture can override them by providing its own version.
6 */
7
8#endif /* __ASM_GENERIC_LINKAGE_H */
diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h
new file mode 100644
index 000000000000..3b69ad34189a
--- /dev/null
+++ b/include/asm-generic/mman-common.h
@@ -0,0 +1,41 @@
1#ifndef __ASM_GENERIC_MMAN_COMMON_H
2#define __ASM_GENERIC_MMAN_COMMON_H
3
4/*
5 Author: Michael S. Tsirkin <mst@mellanox.co.il>, Mellanox Technologies Ltd.
6 Based on: asm-xxx/mman.h
7*/
8
9#define PROT_READ 0x1 /* page can be read */
10#define PROT_WRITE 0x2 /* page can be written */
11#define PROT_EXEC 0x4 /* page can be executed */
12#define PROT_SEM 0x8 /* page may be used for atomic ops */
13#define PROT_NONE 0x0 /* page can not be accessed */
14#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
15#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
16
17#define MAP_SHARED 0x01 /* Share changes */
18#define MAP_PRIVATE 0x02 /* Changes are private */
19#define MAP_TYPE 0x0f /* Mask for type of mapping */
20#define MAP_FIXED 0x10 /* Interpret addr exactly */
21#define MAP_ANONYMOUS 0x20 /* don't use a file */
22
23#define MS_ASYNC 1 /* sync memory asynchronously */
24#define MS_INVALIDATE 2 /* invalidate the caches */
25#define MS_SYNC 4 /* synchronous memory sync */
26
27#define MADV_NORMAL 0 /* no further special treatment */
28#define MADV_RANDOM 1 /* expect random page references */
29#define MADV_SEQUENTIAL 2 /* expect sequential page references */
30#define MADV_WILLNEED 3 /* will need these pages */
31#define MADV_DONTNEED 4 /* don't need these pages */
32
33/* common parameters: try to keep these consistent across architectures */
34#define MADV_REMOVE 9 /* remove these pages & resources */
35#define MADV_DONTFORK 10 /* don't inherit across fork */
36#define MADV_DOFORK 11 /* do inherit across fork */
37
38/* compatibility flags */
39#define MAP_FILE 0
40
41#endif /* __ASM_GENERIC_MMAN_COMMON_H */
diff --git a/include/asm-generic/mman.h b/include/asm-generic/mman.h
index 5e3dde2ee5ad..7cab4de2bca6 100644
--- a/include/asm-generic/mman.h
+++ b/include/asm-generic/mman.h
@@ -1,41 +1,18 @@
1#ifndef _ASM_GENERIC_MMAN_H 1#ifndef __ASM_GENERIC_MMAN_H
2#define _ASM_GENERIC_MMAN_H 2#define __ASM_GENERIC_MMAN_H
3 3
4/* 4#include <asm-generic/mman-common.h>
5 Author: Michael S. Tsirkin <mst@mellanox.co.il>, Mellanox Technologies Ltd.
6 Based on: asm-xxx/mman.h
7*/
8 5
9#define PROT_READ 0x1 /* page can be read */ 6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
10#define PROT_WRITE 0x2 /* page can be written */ 7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
11#define PROT_EXEC 0x4 /* page can be executed */ 8#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
12#define PROT_SEM 0x8 /* page may be used for atomic ops */ 9#define MAP_LOCKED 0x2000 /* pages are locked */
13#define PROT_NONE 0x0 /* page can not be accessed */ 10#define MAP_NORESERVE 0x4000 /* don't check for reservations */
14#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ 11#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
15#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ 12#define MAP_NONBLOCK 0x10000 /* do not block on IO */
13#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
16 14
17#define MAP_SHARED 0x01 /* Share changes */ 15#define MCL_CURRENT 1 /* lock all current mappings */
18#define MAP_PRIVATE 0x02 /* Changes are private */ 16#define MCL_FUTURE 2 /* lock all future mappings */
19#define MAP_TYPE 0x0f /* Mask for type of mapping */
20#define MAP_FIXED 0x10 /* Interpret addr exactly */
21#define MAP_ANONYMOUS 0x20 /* don't use a file */
22 17
23#define MS_ASYNC 1 /* sync memory asynchronously */ 18#endif /* __ASM_GENERIC_MMAN_H */
24#define MS_INVALIDATE 2 /* invalidate the caches */
25#define MS_SYNC 4 /* synchronous memory sync */
26
27#define MADV_NORMAL 0 /* no further special treatment */
28#define MADV_RANDOM 1 /* expect random page references */
29#define MADV_SEQUENTIAL 2 /* expect sequential page references */
30#define MADV_WILLNEED 3 /* will need these pages */
31#define MADV_DONTNEED 4 /* don't need these pages */
32
33/* common parameters: try to keep these consistent across architectures */
34#define MADV_REMOVE 9 /* remove these pages & resources */
35#define MADV_DONTFORK 10 /* don't inherit across fork */
36#define MADV_DOFORK 11 /* do inherit across fork */
37
38/* compatibility flags */
39#define MAP_FILE 0
40
41#endif
diff --git a/include/asm-generic/mmu.h b/include/asm-generic/mmu.h
new file mode 100644
index 000000000000..4f4aa56d6b52
--- /dev/null
+++ b/include/asm-generic/mmu.h
@@ -0,0 +1,15 @@
1#ifndef __ASM_GENERIC_MMU_H
2#define __ASM_GENERIC_MMU_H
3
4/*
5 * This is the mmu.h header for nommu implementations.
6 * Architectures with an MMU need something more complex.
7 */
8#ifndef __ASSEMBLY__
9typedef struct {
10 struct vm_list_struct *vmlist;
11 unsigned long end_brk;
12} mm_context_t;
13#endif
14
15#endif /* __ASM_GENERIC_MMU_H */
diff --git a/include/asm-generic/mmu_context.h b/include/asm-generic/mmu_context.h
new file mode 100644
index 000000000000..a7eec910ba6c
--- /dev/null
+++ b/include/asm-generic/mmu_context.h
@@ -0,0 +1,45 @@
1#ifndef __ASM_GENERIC_MMU_CONTEXT_H
2#define __ASM_GENERIC_MMU_CONTEXT_H
3
4/*
5 * Generic hooks for NOMMU architectures, which do not need to do
6 * anything special here.
7 */
8
9#include <asm-generic/mm_hooks.h>
10
11struct task_struct;
12struct mm_struct;
13
14static inline void enter_lazy_tlb(struct mm_struct *mm,
15 struct task_struct *tsk)
16{
17}
18
19static inline int init_new_context(struct task_struct *tsk,
20 struct mm_struct *mm)
21{
22 return 0;
23}
24
25static inline void destroy_context(struct mm_struct *mm)
26{
27}
28
29static inline void deactivate_mm(struct task_struct *task,
30 struct mm_struct *mm)
31{
32}
33
34static inline void switch_mm(struct mm_struct *prev,
35 struct mm_struct *next,
36 struct task_struct *tsk)
37{
38}
39
40static inline void activate_mm(struct mm_struct *prev_mm,
41 struct mm_struct *next_mm)
42{
43}
44
45#endif /* __ASM_GENERIC_MMU_CONTEXT_H */
diff --git a/include/asm-generic/module.h b/include/asm-generic/module.h
new file mode 100644
index 000000000000..ed5b44de4c91
--- /dev/null
+++ b/include/asm-generic/module.h
@@ -0,0 +1,22 @@
1#ifndef __ASM_GENERIC_MODULE_H
2#define __ASM_GENERIC_MODULE_H
3
4/*
5 * Many architectures just need a simple module
6 * loader without arch specific data.
7 */
8struct mod_arch_specific
9{
10};
11
12#ifdef CONFIG_64BIT
13#define Elf_Shdr Elf64_Shdr
14#define Elf_Sym Elf64_Sym
15#define Elf_Ehdr Elf64_Ehdr
16#else
17#define Elf_Shdr Elf32_Shdr
18#define Elf_Sym Elf32_Sym
19#define Elf_Ehdr Elf32_Ehdr
20#endif
21
22#endif /* __ASM_GENERIC_MODULE_H */
diff --git a/include/asm-generic/msgbuf.h b/include/asm-generic/msgbuf.h
new file mode 100644
index 000000000000..aec850d9159e
--- /dev/null
+++ b/include/asm-generic/msgbuf.h
@@ -0,0 +1,47 @@
1#ifndef __ASM_GENERIC_MSGBUF_H
2#define __ASM_GENERIC_MSGBUF_H
3
4#include <asm/bitsperlong.h>
5/*
6 * generic msqid64_ds structure.
7 *
8 * Note extra padding because this structure is passed back and forth
9 * between kernel and user space.
10 *
11 * msqid64_ds was originally meant to be architecture specific, but
12 * everyone just ended up making identical copies without specific
13 * optimizations, so we may just as well all use the same one.
14 *
15 * 64 bit architectures typically define a 64 bit __kernel_time_t,
16 * so they do not need the first three padding words.
17 * On big-endian systems, the padding is in the wrong place.
18 *
19 * Pad space is left for:
20 * - 64-bit time_t to solve y2038 problem
21 * - 2 miscellaneous 32-bit values
22 */
23
24struct msqid64_ds {
25 struct ipc64_perm msg_perm;
26 __kernel_time_t msg_stime; /* last msgsnd time */
27#if __BITS_PER_LONG != 64
28 unsigned long __unused1;
29#endif
30 __kernel_time_t msg_rtime; /* last msgrcv time */
31#if __BITS_PER_LONG != 64
32 unsigned long __unused2;
33#endif
34 __kernel_time_t msg_ctime; /* last change time */
35#if __BITS_PER_LONG != 64
36 unsigned long __unused3;
37#endif
38 unsigned long msg_cbytes; /* current number of bytes on queue */
39 unsigned long msg_qnum; /* number of messages in queue */
40 unsigned long msg_qbytes; /* max number of bytes on queue */
41 __kernel_pid_t msg_lspid; /* pid of last msgsnd */
42 __kernel_pid_t msg_lrpid; /* last receive pid */
43 unsigned long __unused4;
44 unsigned long __unused5;
45};
46
47#endif /* __ASM_GENERIC_MSGBUF_H */
diff --git a/include/asm-generic/mutex.h b/include/asm-generic/mutex.h
new file mode 100644
index 000000000000..fe91ab502793
--- /dev/null
+++ b/include/asm-generic/mutex.h
@@ -0,0 +1,9 @@
1#ifndef __ASM_GENERIC_MUTEX_H
2#define __ASM_GENERIC_MUTEX_H
3/*
4 * Pull in the generic implementation for the mutex fastpath,
5 * which is a reasonable default on many architectures.
6 */
7
8#include <asm-generic/mutex-dec.h>
9#endif /* __ASM_GENERIC_MUTEX_H */
diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h
index 14db733b8e68..75fec18cdc59 100644
--- a/include/asm-generic/page.h
+++ b/include/asm-generic/page.h
@@ -1,24 +1,99 @@
1#ifndef _ASM_GENERIC_PAGE_H 1#ifndef __ASM_GENERIC_PAGE_H
2#define _ASM_GENERIC_PAGE_H 2#define __ASM_GENERIC_PAGE_H
3/*
4 * Generic page.h implementation, for NOMMU architectures.
5 * This provides the dummy definitions for the memory management.
6 */
7
8#ifdef CONFIG_MMU
9#error need to prove a real asm/page.h
10#endif
11
12
13/* PAGE_SHIFT determines the page size */
14
15#define PAGE_SHIFT 12
16#ifdef __ASSEMBLY__
17#define PAGE_SIZE (1 << PAGE_SHIFT)
18#else
19#define PAGE_SIZE (1UL << PAGE_SHIFT)
20#endif
21#define PAGE_MASK (~(PAGE_SIZE-1))
22
23#include <asm/setup.h>
24
25#ifndef __ASSEMBLY__
26
27#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
28#define free_user_page(page, addr) free_page(addr)
29
30#define clear_page(page) memset((page), 0, PAGE_SIZE)
31#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
32
33#define clear_user_page(page, vaddr, pg) clear_page(page)
34#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
35
36/*
37 * These are used to make use of C type-checking..
38 */
39typedef struct {
40 unsigned long pte;
41} pte_t;
42typedef struct {
43 unsigned long pmd[16];
44} pmd_t;
45typedef struct {
46 unsigned long pgd;
47} pgd_t;
48typedef struct {
49 unsigned long pgprot;
50} pgprot_t;
51typedef struct page *pgtable_t;
52
53#define pte_val(x) ((x).pte)
54#define pmd_val(x) ((&x)->pmd[0])
55#define pgd_val(x) ((x).pgd)
56#define pgprot_val(x) ((x).pgprot)
57
58#define __pte(x) ((pte_t) { (x) } )
59#define __pmd(x) ((pmd_t) { (x) } )
60#define __pgd(x) ((pgd_t) { (x) } )
61#define __pgprot(x) ((pgprot_t) { (x) } )
62
63extern unsigned long memory_start;
64extern unsigned long memory_end;
65
66#endif /* !__ASSEMBLY__ */
67
68#ifdef CONFIG_KERNEL_RAM_BASE_ADDRESS
69#define PAGE_OFFSET (CONFIG_KERNEL_RAM_BASE_ADDRESS)
70#else
71#define PAGE_OFFSET (0)
72#endif
3 73
4#ifndef __ASSEMBLY__ 74#ifndef __ASSEMBLY__
5 75
6#include <linux/compiler.h> 76#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
77#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
78
79#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
80#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
81
82#define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
83#define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
84
85#ifndef page_to_phys
86#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
87#endif
88
89#define pfn_valid(pfn) ((pfn) < max_mapnr)
7 90
8/* Pure 2^n version of get_order */ 91#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
9static __inline__ __attribute_const__ int get_order(unsigned long size) 92 ((void *)(kaddr) < (void *)memory_end))
10{
11 int order;
12 93
13 size = (size - 1) >> (PAGE_SHIFT - 1); 94#endif /* __ASSEMBLY__ */
14 order = -1;
15 do {
16 size >>= 1;
17 order++;
18 } while (size);
19 return order;
20}
21 95
22#endif /* __ASSEMBLY__ */ 96#include <asm-generic/memory_model.h>
97#include <asm-generic/getorder.h>
23 98
24#endif /* _ASM_GENERIC_PAGE_H */ 99#endif /* __ASM_GENERIC_PAGE_H */
diff --git a/include/asm-generic/param.h b/include/asm-generic/param.h
new file mode 100644
index 000000000000..cdf8251bfb6c
--- /dev/null
+++ b/include/asm-generic/param.h
@@ -0,0 +1,24 @@
1#ifndef __ASM_GENERIC_PARAM_H
2#define __ASM_GENERIC_PARAM_H
3
4#ifdef __KERNEL__
5# define HZ CONFIG_HZ /* Internal kernel timer frequency */
6# define USER_HZ 100 /* some user interfaces are */
7# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */
8#endif
9
10#ifndef HZ
11#define HZ 100
12#endif
13
14#ifndef EXEC_PAGESIZE
15#define EXEC_PAGESIZE 4096
16#endif
17
18#ifndef NOGROUP
19#define NOGROUP (-1)
20#endif
21
22#define MAXHOSTNAMELEN 64 /* max length of hostname */
23
24#endif /* __ASM_GENERIC_PARAM_H */
diff --git a/include/asm-generic/parport.h b/include/asm-generic/parport.h
new file mode 100644
index 000000000000..40528cb977e8
--- /dev/null
+++ b/include/asm-generic/parport.h
@@ -0,0 +1,23 @@
1#ifndef __ASM_GENERIC_PARPORT_H
2#define __ASM_GENERIC_PARPORT_H
3
4/*
5 * An ISA bus may have i8255 parallel ports at well-known
6 * locations in the I/O space, which are scanned by
7 * parport_pc_find_isa_ports.
8 *
9 * Without ISA support, the driver will only attach
10 * to devices on the PCI bus.
11 */
12
13static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma);
14static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma)
15{
16#ifdef CONFIG_ISA
17 return parport_pc_find_isa_ports(autoirq, autodma);
18#else
19 return 0;
20#endif
21}
22
23#endif /* __ASM_GENERIC_PARPORT_H */
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h
index c36a77d3bf44..b4326b5466eb 100644
--- a/include/asm-generic/pci.h
+++ b/include/asm-generic/pci.h
@@ -30,19 +30,6 @@ pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
30 res->end = region->end; 30 res->end = region->end;
31} 31}
32 32
33static inline struct resource *
34pcibios_select_root(struct pci_dev *pdev, struct resource *res)
35{
36 struct resource *root = NULL;
37
38 if (res->flags & IORESOURCE_IO)
39 root = &ioport_resource;
40 if (res->flags & IORESOURCE_MEM)
41 root = &iomem_resource;
42
43 return root;
44}
45
46#define pcibios_scan_all_fns(a, b) 0 33#define pcibios_scan_all_fns(a, b) 0
47 34
48#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ 35#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
@@ -52,4 +39,12 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
52} 39}
53#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */ 40#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */
54 41
42/*
43 * By default, assume that no iommu is in use and that the PCI
44 * space is mapped to address physical 0.
45 */
46#ifndef PCI_DMA_BUS_IS_PHYS
47#define PCI_DMA_BUS_IS_PHYS (1)
55#endif 48#endif
49
50#endif /* _ASM_GENERIC_PCI_H */
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
new file mode 100644
index 000000000000..9e429d08b1f8
--- /dev/null
+++ b/include/asm-generic/pgalloc.h
@@ -0,0 +1,12 @@
1#ifndef __ASM_GENERIC_PGALLOC_H
2#define __ASM_GENERIC_PGALLOC_H
3/*
4 * an empty file is enough for a nommu architecture
5 */
6#ifdef CONFIG_MMU
7#error need to implement an architecture specific asm/pgalloc.h
8#endif
9
10#define check_pgt_cache() do { } while (0)
11
12#endif /* __ASM_GENERIC_PGALLOC_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 8e6d0ca70aba..e2bd73e8f9c0 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -129,6 +129,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
129#define move_pte(pte, prot, old_addr, new_addr) (pte) 129#define move_pte(pte, prot, old_addr, new_addr) (pte)
130#endif 130#endif
131 131
132#ifndef pgprot_noncached
133#define pgprot_noncached(prot) (prot)
134#endif
135
132#ifndef pgprot_writecombine 136#ifndef pgprot_writecombine
133#define pgprot_writecombine pgprot_noncached 137#define pgprot_writecombine pgprot_noncached
134#endif 138#endif
@@ -280,17 +284,18 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
280#endif 284#endif
281 285
282/* 286/*
283 * A facility to provide batching of the reload of page tables with the 287 * A facility to provide batching of the reload of page tables and
284 * actual context switch code for paravirtualized guests. By convention, 288 * other process state with the actual context switch code for
285 * only one of the lazy modes (CPU, MMU) should be active at any given 289 * paravirtualized guests. By convention, only one of the batched
286 * time, entry should never be nested, and entry and exits should always 290 * update (lazy) modes (CPU, MMU) should be active at any given time,
287 * be paired. This is for sanity of maintaining and reasoning about the 291 * entry should never be nested, and entry and exits should always be
288 * kernel code. 292 * paired. This is for sanity of maintaining and reasoning about the
293 * kernel code. In this case, the exit (end of the context switch) is
294 * in architecture-specific code, and so doesn't need a generic
295 * definition.
289 */ 296 */
290#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE 297#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
291#define arch_enter_lazy_cpu_mode() do {} while (0) 298#define arch_start_context_switch(prev) do {} while (0)
292#define arch_leave_lazy_cpu_mode() do {} while (0)
293#define arch_flush_lazy_cpu_mode() do {} while (0)
294#endif 299#endif
295 300
296#ifndef __HAVE_PFNMAP_TRACKING 301#ifndef __HAVE_PFNMAP_TRACKING
diff --git a/include/asm-generic/posix_types.h b/include/asm-generic/posix_types.h
new file mode 100644
index 000000000000..3dab00860e71
--- /dev/null
+++ b/include/asm-generic/posix_types.h
@@ -0,0 +1,165 @@
1#ifndef __ASM_GENERIC_POSIX_TYPES_H
2#define __ASM_GENERIC_POSIX_TYPES_H
3
4#include <asm/bitsperlong.h>
5/*
6 * This file is generally used by user-level software, so you need to
7 * be a little careful about namespace pollution etc.
8 *
9 * First the types that are often defined in different ways across
10 * architectures, so that you can override them.
11 */
12
13#ifndef __kernel_ino_t
14typedef unsigned long __kernel_ino_t;
15#endif
16
17#ifndef __kernel_mode_t
18typedef unsigned int __kernel_mode_t;
19#endif
20
21#ifndef __kernel_nlink_t
22typedef unsigned long __kernel_nlink_t;
23#endif
24
25#ifndef __kernel_pid_t
26typedef int __kernel_pid_t;
27#endif
28
29#ifndef __kernel_ipc_pid_t
30typedef int __kernel_ipc_pid_t;
31#endif
32
33#ifndef __kernel_uid_t
34typedef unsigned int __kernel_uid_t;
35typedef unsigned int __kernel_gid_t;
36#endif
37
38#ifndef __kernel_suseconds_t
39typedef long __kernel_suseconds_t;
40#endif
41
42#ifndef __kernel_daddr_t
43typedef int __kernel_daddr_t;
44#endif
45
46#ifndef __kernel_uid32_t
47typedef __kernel_uid_t __kernel_uid32_t;
48typedef __kernel_gid_t __kernel_gid32_t;
49#endif
50
51#ifndef __kernel_old_uid_t
52typedef __kernel_uid_t __kernel_old_uid_t;
53typedef __kernel_gid_t __kernel_old_gid_t;
54#endif
55
56#ifndef __kernel_old_dev_t
57typedef unsigned int __kernel_old_dev_t;
58#endif
59
60/*
61 * Most 32 bit architectures use "unsigned int" size_t,
62 * and all 64 bit architectures use "unsigned long" size_t.
63 */
64#ifndef __kernel_size_t
65#if __BITS_PER_LONG != 64
66typedef unsigned int __kernel_size_t;
67typedef int __kernel_ssize_t;
68typedef int __kernel_ptrdiff_t;
69#else
70typedef unsigned long __kernel_size_t;
71typedef long __kernel_ssize_t;
72typedef long __kernel_ptrdiff_t;
73#endif
74#endif
75
76/*
77 * anything below here should be completely generic
78 */
79typedef long __kernel_off_t;
80typedef long long __kernel_loff_t;
81typedef long __kernel_time_t;
82typedef long __kernel_clock_t;
83typedef int __kernel_timer_t;
84typedef int __kernel_clockid_t;
85typedef char * __kernel_caddr_t;
86typedef unsigned short __kernel_uid16_t;
87typedef unsigned short __kernel_gid16_t;
88
89typedef struct {
90 int val[2];
91} __kernel_fsid_t;
92
93#ifdef __KERNEL__
94
95#undef __FD_SET
96static inline void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
97{
98 unsigned long __tmp = __fd / __NFDBITS;
99 unsigned long __rem = __fd % __NFDBITS;
100 __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
101}
102
103#undef __FD_CLR
104static inline void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
105{
106 unsigned long __tmp = __fd / __NFDBITS;
107 unsigned long __rem = __fd % __NFDBITS;
108 __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
109}
110
111#undef __FD_ISSET
112static inline int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
113{
114 unsigned long __tmp = __fd / __NFDBITS;
115 unsigned long __rem = __fd % __NFDBITS;
116 return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
117}
118
119/*
120 * This will unroll the loop for the normal constant case (8 ints,
121 * for a 256-bit fd_set)
122 */
123#undef __FD_ZERO
124static inline void __FD_ZERO(__kernel_fd_set *__p)
125{
126 unsigned long *__tmp = __p->fds_bits;
127 int __i;
128
129 if (__builtin_constant_p(__FDSET_LONGS)) {
130 switch (__FDSET_LONGS) {
131 case 16:
132 __tmp[ 0] = 0; __tmp[ 1] = 0;
133 __tmp[ 2] = 0; __tmp[ 3] = 0;
134 __tmp[ 4] = 0; __tmp[ 5] = 0;
135 __tmp[ 6] = 0; __tmp[ 7] = 0;
136 __tmp[ 8] = 0; __tmp[ 9] = 0;
137 __tmp[10] = 0; __tmp[11] = 0;
138 __tmp[12] = 0; __tmp[13] = 0;
139 __tmp[14] = 0; __tmp[15] = 0;
140 return;
141
142 case 8:
143 __tmp[ 0] = 0; __tmp[ 1] = 0;
144 __tmp[ 2] = 0; __tmp[ 3] = 0;
145 __tmp[ 4] = 0; __tmp[ 5] = 0;
146 __tmp[ 6] = 0; __tmp[ 7] = 0;
147 return;
148
149 case 4:
150 __tmp[ 0] = 0; __tmp[ 1] = 0;
151 __tmp[ 2] = 0; __tmp[ 3] = 0;
152 return;
153 }
154 }
155 __i = __FDSET_LONGS;
156 while (__i) {
157 __i--;
158 *__tmp = 0;
159 __tmp++;
160 }
161}
162
163#endif /* __KERNEL__ */
164
165#endif /* __ASM_GENERIC_POSIX_TYPES_H */
diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h
index 763e3b060f43..fa86f240c874 100644
--- a/include/asm-generic/rtc.h
+++ b/include/asm-generic/rtc.h
@@ -202,7 +202,7 @@ static inline unsigned int get_rtc_ss(void)
202{ 202{
203 struct rtc_time h; 203 struct rtc_time h;
204 204
205 __get_rtc_time(&h); 205 get_rtc_time(&h);
206 return h.tm_sec; 206 return h.tm_sec;
207} 207}
208 208
diff --git a/include/asm-generic/scatterlist.h b/include/asm-generic/scatterlist.h
new file mode 100644
index 000000000000..8b9454496a7c
--- /dev/null
+++ b/include/asm-generic/scatterlist.h
@@ -0,0 +1,43 @@
1#ifndef __ASM_GENERIC_SCATTERLIST_H
2#define __ASM_GENERIC_SCATTERLIST_H
3
4#include <linux/types.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
11 unsigned int offset;
12 unsigned int length;
13 dma_addr_t dma_address;
14 unsigned int dma_length;
15};
16
17/*
18 * These macros should be used after a dma_map_sg call has been done
19 * to get bus addresses of each of the SG entries and their lengths.
20 * You should only work with the number of sg entries pci_map_sg
21 * returns, or alternatively stop on the first sg_dma_len(sg) which
22 * is 0.
23 */
24#define sg_dma_address(sg) ((sg)->dma_address)
25#ifndef sg_dma_len
26/*
27 * Normally, you have an iommu on 64 bit machines, but not on 32 bit
28 * machines. Architectures that are differnt should override this.
29 */
30#if __BITS_PER_LONG == 64
31#define sg_dma_len(sg) ((sg)->dma_length)
32#else
33#define sg_dma_len(sg) ((sg)->length)
34#endif /* 64 bit */
35#endif /* sg_dma_len */
36
37#ifndef ISA_DMA_THRESHOLD
38#define ISA_DMA_THRESHOLD (~0UL)
39#endif
40
41#define ARCH_HAS_SG_CHAIN
42
43#endif /* __ASM_GENERIC_SCATTERLIST_H */
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 4ce48e878530..d083561337f2 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -14,6 +14,9 @@ extern char __kprobes_text_start[], __kprobes_text_end[];
14extern char __initdata_begin[], __initdata_end[]; 14extern char __initdata_begin[], __initdata_end[];
15extern char __start_rodata[], __end_rodata[]; 15extern char __start_rodata[], __end_rodata[];
16 16
17/* Start and end of .ctors section - used for constructor calls. */
18extern char __ctors_start[], __ctors_end[];
19
17/* function descriptor handling (if any). Override 20/* function descriptor handling (if any). Override
18 * in asm/sections.h */ 21 * in asm/sections.h */
19#ifndef dereference_function_descriptor 22#ifndef dereference_function_descriptor
diff --git a/include/asm-generic/segment.h b/include/asm-generic/segment.h
new file mode 100644
index 000000000000..5580eace622c
--- /dev/null
+++ b/include/asm-generic/segment.h
@@ -0,0 +1,9 @@
1#ifndef __ASM_GENERIC_SEGMENT_H
2#define __ASM_GENERIC_SEGMENT_H
3/*
4 * Only here because we have some old header files that expect it...
5 *
6 * New architectures probably don't want to have their own version.
7 */
8
9#endif /* __ASM_GENERIC_SEGMENT_H */
diff --git a/include/asm-generic/sembuf.h b/include/asm-generic/sembuf.h
new file mode 100644
index 000000000000..4cb2c13e5090
--- /dev/null
+++ b/include/asm-generic/sembuf.h
@@ -0,0 +1,38 @@
1#ifndef __ASM_GENERIC_SEMBUF_H
2#define __ASM_GENERIC_SEMBUF_H
3
4#include <asm/bitsperlong.h>
5
6/*
7 * The semid64_ds structure for x86 architecture.
8 * Note extra padding because this structure is passed back and forth
9 * between kernel and user space.
10 *
11 * semid64_ds was originally meant to be architecture specific, but
12 * everyone just ended up making identical copies without specific
13 * optimizations, so we may just as well all use the same one.
14 *
15 * 64 bit architectures typically define a 64 bit __kernel_time_t,
16 * so they do not need the first two padding words.
17 * On big-endian systems, the padding is in the wrong place.
18 *
19 * Pad space is left for:
20 * - 64-bit time_t to solve y2038 problem
21 * - 2 miscellaneous 32-bit values
22 */
23struct semid64_ds {
24 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
25 __kernel_time_t sem_otime; /* last semop time */
26#if __BITS_PER_LONG != 64
27 unsigned long __unused1;
28#endif
29 __kernel_time_t sem_ctime; /* last change time */
30#if __BITS_PER_LONG != 64
31 unsigned long __unused2;
32#endif
33 unsigned long sem_nsems; /* no. of semaphores in array */
34 unsigned long __unused3;
35 unsigned long __unused4;
36};
37
38#endif /* __ASM_GENERIC_SEMBUF_H */
diff --git a/include/asm-generic/serial.h b/include/asm-generic/serial.h
new file mode 100644
index 000000000000..5e291090fe04
--- /dev/null
+++ b/include/asm-generic/serial.h
@@ -0,0 +1,13 @@
1#ifndef __ASM_GENERIC_SERIAL_H
2#define __ASM_GENERIC_SERIAL_H
3
4/*
5 * This should not be an architecture specific #define, oh well.
6 *
7 * Traditionally, it just describes i8250 and related serial ports
8 * that have this clock rate.
9 */
10
11#define BASE_BAUD (1843200 / 16)
12
13#endif /* __ASM_GENERIC_SERIAL_H */
diff --git a/include/asm-generic/setup.h b/include/asm-generic/setup.h
new file mode 100644
index 000000000000..6fc26a51003c
--- /dev/null
+++ b/include/asm-generic/setup.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_GENERIC_SETUP_H
2#define __ASM_GENERIC_SETUP_H
3
4#define COMMAND_LINE_SIZE 512
5
6#endif /* __ASM_GENERIC_SETUP_H */
diff --git a/include/asm-generic/shmbuf.h b/include/asm-generic/shmbuf.h
new file mode 100644
index 000000000000..5768fa60ac82
--- /dev/null
+++ b/include/asm-generic/shmbuf.h
@@ -0,0 +1,59 @@
1#ifndef __ASM_GENERIC_SHMBUF_H
2#define __ASM_GENERIC_SHMBUF_H
3
4#include <asm/bitsperlong.h>
5
6/*
7 * The shmid64_ds structure for x86 architecture.
8 * Note extra padding because this structure is passed back and forth
9 * between kernel and user space.
10 *
11 * shmid64_ds was originally meant to be architecture specific, but
12 * everyone just ended up making identical copies without specific
13 * optimizations, so we may just as well all use the same one.
14 *
15 * 64 bit architectures typically define a 64 bit __kernel_time_t,
16 * so they do not need the first two padding words.
17 * On big-endian systems, the padding is in the wrong place.
18 *
19 *
20 * Pad space is left for:
21 * - 64-bit time_t to solve y2038 problem
22 * - 2 miscellaneous 32-bit values
23 */
24
25struct shmid64_ds {
26 struct ipc64_perm shm_perm; /* operation perms */
27 size_t shm_segsz; /* size of segment (bytes) */
28 __kernel_time_t shm_atime; /* last attach time */
29#if __BITS_PER_LONG != 64
30 unsigned long __unused1;
31#endif
32 __kernel_time_t shm_dtime; /* last detach time */
33#if __BITS_PER_LONG != 64
34 unsigned long __unused2;
35#endif
36 __kernel_time_t shm_ctime; /* last change time */
37#if __BITS_PER_LONG != 64
38 unsigned long __unused3;
39#endif
40 __kernel_pid_t shm_cpid; /* pid of creator */
41 __kernel_pid_t shm_lpid; /* pid of last operator */
42 unsigned long shm_nattch; /* no. of current attaches */
43 unsigned long __unused4;
44 unsigned long __unused5;
45};
46
47struct shminfo64 {
48 unsigned long shmmax;
49 unsigned long shmmin;
50 unsigned long shmmni;
51 unsigned long shmseg;
52 unsigned long shmall;
53 unsigned long __unused1;
54 unsigned long __unused2;
55 unsigned long __unused3;
56 unsigned long __unused4;
57};
58
59#endif /* __ASM_GENERIC_SHMBUF_H */
diff --git a/include/asm-generic/shmparam.h b/include/asm-generic/shmparam.h
new file mode 100644
index 000000000000..51a3852de733
--- /dev/null
+++ b/include/asm-generic/shmparam.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_GENERIC_SHMPARAM_H
2#define __ASM_GENERIC_SHMPARAM_H
3
4#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
5
6#endif /* _ASM_GENERIC_SHMPARAM_H */
diff --git a/include/asm-generic/signal-defs.h b/include/asm-generic/signal-defs.h
new file mode 100644
index 000000000000..00f95df54297
--- /dev/null
+++ b/include/asm-generic/signal-defs.h
@@ -0,0 +1,28 @@
1#ifndef __ASM_GENERIC_SIGNAL_DEFS_H
2#define __ASM_GENERIC_SIGNAL_DEFS_H
3
4#include <linux/compiler.h>
5
6#ifndef SIG_BLOCK
7#define SIG_BLOCK 0 /* for blocking signals */
8#endif
9#ifndef SIG_UNBLOCK
10#define SIG_UNBLOCK 1 /* for unblocking signals */
11#endif
12#ifndef SIG_SETMASK
13#define SIG_SETMASK 2 /* for setting the signal mask */
14#endif
15
16#ifndef __ASSEMBLY__
17typedef void __signalfn_t(int);
18typedef __signalfn_t __user *__sighandler_t;
19
20typedef void __restorefn_t(void);
21typedef __restorefn_t __user *__sigrestore_t;
22
23#define SIG_DFL ((__force __sighandler_t)0) /* default signal handling */
24#define SIG_IGN ((__force __sighandler_t)1) /* ignore signal */
25#define SIG_ERR ((__force __sighandler_t)-1) /* error return from signal */
26#endif
27
28#endif /* __ASM_GENERIC_SIGNAL_DEFS_H */
diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h
index dae1d8720076..555c0aee8a47 100644
--- a/include/asm-generic/signal.h
+++ b/include/asm-generic/signal.h
@@ -1,28 +1,131 @@
1#ifndef __ASM_GENERIC_SIGNAL_H 1#ifndef __ASM_GENERIC_SIGNAL_H
2#define __ASM_GENERIC_SIGNAL_H 2#define __ASM_GENERIC_SIGNAL_H
3 3
4#include <linux/compiler.h> 4#include <linux/types.h>
5 5
6#ifndef SIG_BLOCK 6#define _NSIG 64
7#define SIG_BLOCK 0 /* for blocking signals */ 7#define _NSIG_BPW __BITS_PER_LONG
8#endif 8#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
9#ifndef SIG_UNBLOCK 9
10#define SIG_UNBLOCK 1 /* for unblocking signals */ 10#define SIGHUP 1
11#endif 11#define SIGINT 2
12#ifndef SIG_SETMASK 12#define SIGQUIT 3
13#define SIG_SETMASK 2 /* for setting the signal mask */ 13#define SIGILL 4
14#define SIGTRAP 5
15#define SIGABRT 6
16#define SIGIOT 6
17#define SIGBUS 7
18#define SIGFPE 8
19#define SIGKILL 9
20#define SIGUSR1 10
21#define SIGSEGV 11
22#define SIGUSR2 12
23#define SIGPIPE 13
24#define SIGALRM 14
25#define SIGTERM 15
26#define SIGSTKFLT 16
27#define SIGCHLD 17
28#define SIGCONT 18
29#define SIGSTOP 19
30#define SIGTSTP 20
31#define SIGTTIN 21
32#define SIGTTOU 22
33#define SIGURG 23
34#define SIGXCPU 24
35#define SIGXFSZ 25
36#define SIGVTALRM 26
37#define SIGPROF 27
38#define SIGWINCH 28
39#define SIGIO 29
40#define SIGPOLL SIGIO
41/*
42#define SIGLOST 29
43*/
44#define SIGPWR 30
45#define SIGSYS 31
46#define SIGUNUSED 31
47
48/* These should not be considered constants from userland. */
49#define SIGRTMIN 32
50#ifndef SIGRTMAX
51#define SIGRTMAX _NSIG
14#endif 52#endif
15 53
54/*
55 * SA_FLAGS values:
56 *
57 * SA_ONSTACK indicates that a registered stack_t will be used.
58 * SA_RESTART flag to get restarting signals (which were the default long ago)
59 * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
60 * SA_RESETHAND clears the handler when the signal is delivered.
61 * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
62 * SA_NODEFER prevents the current signal from being masked in the handler.
63 *
64 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
65 * Unix names RESETHAND and NODEFER respectively.
66 */
67#define SA_NOCLDSTOP 0x00000001
68#define SA_NOCLDWAIT 0x00000002
69#define SA_SIGINFO 0x00000004
70#define SA_ONSTACK 0x08000000
71#define SA_RESTART 0x10000000
72#define SA_NODEFER 0x40000000
73#define SA_RESETHAND 0x80000000
74
75#define SA_NOMASK SA_NODEFER
76#define SA_ONESHOT SA_RESETHAND
77
78/*
79 * New architectures should not define the obsolete
80 * SA_RESTORER 0x04000000
81 */
82
83/*
84 * sigaltstack controls
85 */
86#define SS_ONSTACK 1
87#define SS_DISABLE 2
88
89#define MINSIGSTKSZ 2048
90#define SIGSTKSZ 8192
91
16#ifndef __ASSEMBLY__ 92#ifndef __ASSEMBLY__
17typedef void __signalfn_t(int); 93typedef struct {
18typedef __signalfn_t __user *__sighandler_t; 94 unsigned long sig[_NSIG_WORDS];
95} sigset_t;
96
97/* not actually used, but required for linux/syscalls.h */
98typedef unsigned long old_sigset_t;
19 99
20typedef void __restorefn_t(void); 100#include <asm-generic/signal-defs.h>
21typedef __restorefn_t __user *__sigrestore_t;
22 101
23#define SIG_DFL ((__force __sighandler_t)0) /* default signal handling */ 102struct sigaction {
24#define SIG_IGN ((__force __sighandler_t)1) /* ignore signal */ 103 __sighandler_t sa_handler;
25#define SIG_ERR ((__force __sighandler_t)-1) /* error return from signal */ 104 unsigned long sa_flags;
105#ifdef SA_RESTORER
106 __sigrestore_t sa_restorer;
26#endif 107#endif
108 sigset_t sa_mask; /* mask last for extensibility */
109};
110
111struct k_sigaction {
112 struct sigaction sa;
113};
114
115typedef struct sigaltstack {
116 void __user *ss_sp;
117 int ss_flags;
118 size_t ss_size;
119} stack_t;
120
121#ifdef __KERNEL__
122
123#include <asm/sigcontext.h>
124#undef __HAVE_ARCH_SIG_BITOPS
125
126#define ptrace_signal_deliver(regs, cookie) do { } while (0)
127
128#endif /* __KERNEL__ */
129#endif /* __ASSEMBLY__ */
27 130
28#endif /* __ASM_GENERIC_SIGNAL_H */ 131#endif /* _ASM_GENERIC_SIGNAL_H */
diff --git a/include/asm-generic/socket.h b/include/asm-generic/socket.h
new file mode 100644
index 000000000000..5d79e409241c
--- /dev/null
+++ b/include/asm-generic/socket.h
@@ -0,0 +1,63 @@
1#ifndef __ASM_GENERIC_SOCKET_H
2#define __ASM_GENERIC_SOCKET_H
3
4#include <asm/sockios.h>
5
6/* For setsockopt(2) */
7#define SOL_SOCKET 1
8
9#define SO_DEBUG 1
10#define SO_REUSEADDR 2
11#define SO_TYPE 3
12#define SO_ERROR 4
13#define SO_DONTROUTE 5
14#define SO_BROADCAST 6
15#define SO_SNDBUF 7
16#define SO_RCVBUF 8
17#define SO_SNDBUFFORCE 32
18#define SO_RCVBUFFORCE 33
19#define SO_KEEPALIVE 9
20#define SO_OOBINLINE 10
21#define SO_NO_CHECK 11
22#define SO_PRIORITY 12
23#define SO_LINGER 13
24#define SO_BSDCOMPAT 14
25/* To add :#define SO_REUSEPORT 15 */
26
27#ifndef SO_PASSCRED /* powerpc only differs in these */
28#define SO_PASSCRED 16
29#define SO_PEERCRED 17
30#define SO_RCVLOWAT 18
31#define SO_SNDLOWAT 19
32#define SO_RCVTIMEO 20
33#define SO_SNDTIMEO 21
34#endif
35
36/* Security levels - as per NRL IPv6 - don't actually do anything */
37#define SO_SECURITY_AUTHENTICATION 22
38#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
39#define SO_SECURITY_ENCRYPTION_NETWORK 24
40
41#define SO_BINDTODEVICE 25
42
43/* Socket filtering */
44#define SO_ATTACH_FILTER 26
45#define SO_DETACH_FILTER 27
46
47#define SO_PEERNAME 28
48#define SO_TIMESTAMP 29
49#define SCM_TIMESTAMP SO_TIMESTAMP
50
51#define SO_ACCEPTCONN 30
52
53#define SO_PEERSEC 31
54#define SO_PASSSEC 34
55#define SO_TIMESTAMPNS 35
56#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
57
58#define SO_MARK 36
59
60#define SO_TIMESTAMPING 37
61#define SCM_TIMESTAMPING SO_TIMESTAMPING
62
63#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/asm-generic/sockios.h b/include/asm-generic/sockios.h
new file mode 100644
index 000000000000..9a61a369b901
--- /dev/null
+++ b/include/asm-generic/sockios.h
@@ -0,0 +1,13 @@
1#ifndef __ASM_GENERIC_SOCKIOS_H
2#define __ASM_GENERIC_SOCKIOS_H
3
4/* Socket-level I/O control calls. */
5#define FIOSETOWN 0x8901
6#define SIOCSPGRP 0x8902
7#define FIOGETOWN 0x8903
8#define SIOCGPGRP 0x8904
9#define SIOCATMARK 0x8905
10#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
11#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
12
13#endif /* __ASM_GENERIC_SOCKIOS_H */
diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h
new file mode 100644
index 000000000000..1547a03ac50f
--- /dev/null
+++ b/include/asm-generic/spinlock.h
@@ -0,0 +1,11 @@
1#ifndef __ASM_GENERIC_SPINLOCK_H
2#define __ASM_GENERIC_SPINLOCK_H
3/*
4 * You need to implement asm/spinlock.h for SMP support. The generic
5 * version does not handle SMP.
6 */
7#ifdef CONFIG_SMP
8#error need an architecture specific asm/spinlock.h
9#endif
10
11#endif /* __ASM_GENERIC_SPINLOCK_H */
diff --git a/include/asm-generic/stat.h b/include/asm-generic/stat.h
new file mode 100644
index 000000000000..47e64170305d
--- /dev/null
+++ b/include/asm-generic/stat.h
@@ -0,0 +1,72 @@
1#ifndef __ASM_GENERIC_STAT_H
2#define __ASM_GENERIC_STAT_H
3
4/*
5 * Everybody gets this wrong and has to stick with it for all
6 * eternity. Hopefully, this version gets used by new architectures
7 * so they don't fall into the same traps.
8 *
9 * stat64 is copied from powerpc64, with explicit padding added.
10 * stat is the same structure layout on 64-bit, without the 'long long'
11 * types.
12 *
13 * By convention, 64 bit architectures use the stat interface, while
14 * 32 bit architectures use the stat64 interface. Note that we don't
15 * provide an __old_kernel_stat here, which new architecture should
16 * not have to start with.
17 */
18
19#include <asm/bitsperlong.h>
20
21#define STAT_HAVE_NSEC 1
22
23struct stat {
24 unsigned long st_dev; /* Device. */
25 unsigned long st_ino; /* File serial number. */
26 unsigned int st_mode; /* File mode. */
27 unsigned int st_nlink; /* Link count. */
28 unsigned int st_uid; /* User ID of the file's owner. */
29 unsigned int st_gid; /* Group ID of the file's group. */
30 unsigned long st_rdev; /* Device number, if device. */
31 unsigned long __pad1;
32 long st_size; /* Size of file, in bytes. */
33 int st_blksize; /* Optimal block size for I/O. */
34 int __pad2;
35 long st_blocks; /* Number 512-byte blocks allocated. */
36 int st_atime; /* Time of last access. */
37 unsigned int st_atime_nsec;
38 int st_mtime; /* Time of last modification. */
39 unsigned int st_mtime_nsec;
40 int st_ctime; /* Time of last status change. */
41 unsigned int st_ctime_nsec;
42 unsigned int __unused4;
43 unsigned int __unused5;
44};
45
46#if __BITS_PER_LONG != 64
47/* This matches struct stat64 in glibc2.1. Only used for 32 bit. */
48struct stat64 {
49 unsigned long long st_dev; /* Device. */
50 unsigned long long st_ino; /* File serial number. */
51 unsigned int st_mode; /* File mode. */
52 unsigned int st_nlink; /* Link count. */
53 unsigned int st_uid; /* User ID of the file's owner. */
54 unsigned int st_gid; /* Group ID of the file's group. */
55 unsigned long long st_rdev; /* Device number, if device. */
56 unsigned long long __pad1;
57 long long st_size; /* Size of file, in bytes. */
58 int st_blksize; /* Optimal block size for I/O. */
59 int __pad2;
60 long long st_blocks; /* Number 512-byte blocks allocated. */
61 int st_atime; /* Time of last access. */
62 unsigned int st_atime_nsec;
63 int st_mtime; /* Time of last modification. */
64 unsigned int st_mtime_nsec;
65 int st_ctime; /* Time of last status change. */
66 unsigned int st_ctime_nsec;
67 unsigned int __unused4;
68 unsigned int __unused5;
69};
70#endif
71
72#endif /* __ASM_GENERIC_STAT_H */
diff --git a/include/asm-generic/string.h b/include/asm-generic/string.h
new file mode 100644
index 000000000000..de5e0201459f
--- /dev/null
+++ b/include/asm-generic/string.h
@@ -0,0 +1,10 @@
1#ifndef __ASM_GENERIC_STRING_H
2#define __ASM_GENERIC_STRING_H
3/*
4 * The kernel provides all required functions in lib/string.c
5 *
6 * Architectures probably want to provide at least their own optimized
7 * memcpy and memset functions though.
8 */
9
10#endif /* __ASM_GENERIC_STRING_H */
diff --git a/include/asm-generic/swab.h b/include/asm-generic/swab.h
new file mode 100644
index 000000000000..a8e9029d9eba
--- /dev/null
+++ b/include/asm-generic/swab.h
@@ -0,0 +1,18 @@
1#ifndef _ASM_GENERIC_SWAB_H
2#define _ASM_GENERIC_SWAB_H
3
4#include <asm/bitsperlong.h>
5
6/*
7 * 32 bit architectures typically (but not always) want to
8 * set __SWAB_64_THRU_32__. In user space, this is only
9 * valid if the compiler supports 64 bit data types.
10 */
11
12#if __BITS_PER_LONG == 32
13#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
14#define __SWAB_64_THRU_32__
15#endif
16#endif
17
18#endif /* _ASM_GENERIC_SWAB_H */
diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h
new file mode 100644
index 000000000000..df84e3b04555
--- /dev/null
+++ b/include/asm-generic/syscalls.h
@@ -0,0 +1,60 @@
1#ifndef __ASM_GENERIC_SYSCALLS_H
2#define __ASM_GENERIC_SYSCALLS_H
3
4#include <linux/compiler.h>
5#include <linux/linkage.h>
6
7/*
8 * Calling conventions for these system calls can differ, so
9 * it's possible to override them.
10 */
11#ifndef sys_clone
12asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp,
13 void __user *parent_tid, void __user *child_tid,
14 struct pt_regs *regs);
15#endif
16
17#ifndef sys_fork
18asmlinkage long sys_fork(struct pt_regs *regs);
19#endif
20
21#ifndef sys_vfork
22asmlinkage long sys_vfork(struct pt_regs *regs);
23#endif
24
25#ifndef sys_execve
26asmlinkage long sys_execve(char __user *filename, char __user * __user *argv,
27 char __user * __user *envp, struct pt_regs *regs);
28#endif
29
30#ifndef sys_mmap2
31asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
32 unsigned long prot, unsigned long flags,
33 unsigned long fd, unsigned long pgoff);
34#endif
35
36#ifndef sys_mmap
37asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
38 unsigned long prot, unsigned long flags,
39 unsigned long fd, off_t pgoff);
40#endif
41
42#ifndef sys_sigaltstack
43asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *,
44 struct pt_regs *);
45#endif
46
47#ifndef sys_rt_sigreturn
48asmlinkage long sys_rt_sigreturn(struct pt_regs *regs);
49#endif
50
51#ifndef sys_rt_sigsuspend
52asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
53#endif
54
55#ifndef sys_rt_sigaction
56asmlinkage long sys_rt_sigaction(int sig, const struct sigaction __user *act,
57 struct sigaction __user *oact, size_t sigsetsize);
58#endif
59
60#endif /* __ASM_GENERIC_SYSCALLS_H */
diff --git a/include/asm-generic/system.h b/include/asm-generic/system.h
new file mode 100644
index 000000000000..efa403b5e121
--- /dev/null
+++ b/include/asm-generic/system.h
@@ -0,0 +1,161 @@
1/* Generic system definitions, based on MN10300 definitions.
2 *
3 * It should be possible to use these on really simple architectures,
4 * but it serves more as a starting point for new ports.
5 *
6 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
7 * Written by David Howells (dhowells@redhat.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public Licence
11 * as published by the Free Software Foundation; either version
12 * 2 of the Licence, or (at your option) any later version.
13 */
14#ifndef __ASM_GENERIC_SYSTEM_H
15#define __ASM_GENERIC_SYSTEM_H
16
17#ifdef __KERNEL__
18#ifndef __ASSEMBLY__
19
20#include <linux/types.h>
21#include <linux/irqflags.h>
22
23#include <asm/cmpxchg-local.h>
24
25struct task_struct;
26
27/* context switching is now performed out-of-line in switch_to.S */
28extern struct task_struct *__switch_to(struct task_struct *,
29 struct task_struct *);
30#define switch_to(prev, next, last) \
31 do { \
32 ((last) = __switch_to((prev), (next))); \
33 } while (0)
34
35#define arch_align_stack(x) (x)
36
37#define nop() asm volatile ("nop")
38
39#endif /* !__ASSEMBLY__ */
40
41/*
42 * Force strict CPU ordering.
43 * And yes, this is required on UP too when we're talking
44 * to devices.
45 *
46 * This implementation only contains a compiler barrier.
47 */
48
49#define mb() asm volatile ("": : :"memory")
50#define rmb() mb()
51#define wmb() asm volatile ("": : :"memory")
52
53#ifdef CONFIG_SMP
54#define smp_mb() mb()
55#define smp_rmb() rmb()
56#define smp_wmb() wmb()
57#else
58#define smp_mb() barrier()
59#define smp_rmb() barrier()
60#define smp_wmb() barrier()
61#endif
62
63#define set_mb(var, value) do { var = value; mb(); } while (0)
64#define set_wmb(var, value) do { var = value; wmb(); } while (0)
65
66#define read_barrier_depends() do {} while (0)
67#define smp_read_barrier_depends() do {} while (0)
68
69/*
70 * we make sure local_irq_enable() doesn't cause priority inversion
71 */
72#ifndef __ASSEMBLY__
73
74/* This function doesn't exist, so you'll get a linker error
75 * if something tries to do an invalid xchg(). */
76extern void __xchg_called_with_bad_pointer(void);
77
78static inline
79unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
80{
81 unsigned long ret, flags;
82
83 switch (size) {
84 case 1:
85#ifdef __xchg_u8
86 return __xchg_u8(x, ptr);
87#else
88 local_irq_save(flags);
89 ret = *(volatile u8 *)ptr;
90 *(volatile u8 *)ptr = x;
91 local_irq_restore(flags);
92 return ret;
93#endif /* __xchg_u8 */
94
95 case 2:
96#ifdef __xchg_u16
97 return __xchg_u16(x, ptr);
98#else
99 local_irq_save(flags);
100 ret = *(volatile u16 *)ptr;
101 *(volatile u16 *)ptr = x;
102 local_irq_restore(flags);
103 return ret;
104#endif /* __xchg_u16 */
105
106 case 4:
107#ifdef __xchg_u32
108 return __xchg_u32(x, ptr);
109#else
110 local_irq_save(flags);
111 ret = *(volatile u32 *)ptr;
112 *(volatile u32 *)ptr = x;
113 local_irq_restore(flags);
114 return ret;
115#endif /* __xchg_u32 */
116
117#ifdef CONFIG_64BIT
118 case 8:
119#ifdef __xchg_u64
120 return __xchg_u64(x, ptr);
121#else
122 local_irq_save(flags);
123 ret = *(volatile u64 *)ptr;
124 *(volatile u64 *)ptr = x;
125 local_irq_restore(flags);
126 return ret;
127#endif /* __xchg_u64 */
128#endif /* CONFIG_64BIT */
129
130 default:
131 __xchg_called_with_bad_pointer();
132 return x;
133 }
134}
135
136#define xchg(ptr, x) \
137 ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
138
139static inline unsigned long __cmpxchg(volatile unsigned long *m,
140 unsigned long old, unsigned long new)
141{
142 unsigned long retval;
143 unsigned long flags;
144
145 local_irq_save(flags);
146 retval = *m;
147 if (retval == old)
148 *m = new;
149 local_irq_restore(flags);
150 return retval;
151}
152
153#define cmpxchg(ptr, o, n) \
154 ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
155 (unsigned long)(o), \
156 (unsigned long)(n)))
157
158#endif /* !__ASSEMBLY__ */
159
160#endif /* __KERNEL__ */
161#endif /* __ASM_GENERIC_SYSTEM_H */
diff --git a/include/asm-generic/termbits.h b/include/asm-generic/termbits.h
new file mode 100644
index 000000000000..1c9773d48cb0
--- /dev/null
+++ b/include/asm-generic/termbits.h
@@ -0,0 +1,198 @@
1#ifndef __ASM_GENERIC_TERMBITS_H
2#define __ASM_GENERIC_TERMBITS_H
3
4#include <linux/posix_types.h>
5
6typedef unsigned char cc_t;
7typedef unsigned int speed_t;
8typedef unsigned int tcflag_t;
9
10#define NCCS 19
11struct termios {
12 tcflag_t c_iflag; /* input mode flags */
13 tcflag_t c_oflag; /* output mode flags */
14 tcflag_t c_cflag; /* control mode flags */
15 tcflag_t c_lflag; /* local mode flags */
16 cc_t c_line; /* line discipline */
17 cc_t c_cc[NCCS]; /* control characters */
18};
19
20struct termios2 {
21 tcflag_t c_iflag; /* input mode flags */
22 tcflag_t c_oflag; /* output mode flags */
23 tcflag_t c_cflag; /* control mode flags */
24 tcflag_t c_lflag; /* local mode flags */
25 cc_t c_line; /* line discipline */
26 cc_t c_cc[NCCS]; /* control characters */
27 speed_t c_ispeed; /* input speed */
28 speed_t c_ospeed; /* output speed */
29};
30
31struct ktermios {
32 tcflag_t c_iflag; /* input mode flags */
33 tcflag_t c_oflag; /* output mode flags */
34 tcflag_t c_cflag; /* control mode flags */
35 tcflag_t c_lflag; /* local mode flags */
36 cc_t c_line; /* line discipline */
37 cc_t c_cc[NCCS]; /* control characters */
38 speed_t c_ispeed; /* input speed */
39 speed_t c_ospeed; /* output speed */
40};
41
42/* c_cc characters */
43#define VINTR 0
44#define VQUIT 1
45#define VERASE 2
46#define VKILL 3
47#define VEOF 4
48#define VTIME 5
49#define VMIN 6
50#define VSWTC 7
51#define VSTART 8
52#define VSTOP 9
53#define VSUSP 10
54#define VEOL 11
55#define VREPRINT 12
56#define VDISCARD 13
57#define VWERASE 14
58#define VLNEXT 15
59#define VEOL2 16
60
61/* c_iflag bits */
62#define IGNBRK 0000001
63#define BRKINT 0000002
64#define IGNPAR 0000004
65#define PARMRK 0000010
66#define INPCK 0000020
67#define ISTRIP 0000040
68#define INLCR 0000100
69#define IGNCR 0000200
70#define ICRNL 0000400
71#define IUCLC 0001000
72#define IXON 0002000
73#define IXANY 0004000
74#define IXOFF 0010000
75#define IMAXBEL 0020000
76#define IUTF8 0040000
77
78/* c_oflag bits */
79#define OPOST 0000001
80#define OLCUC 0000002
81#define ONLCR 0000004
82#define OCRNL 0000010
83#define ONOCR 0000020
84#define ONLRET 0000040
85#define OFILL 0000100
86#define OFDEL 0000200
87#define NLDLY 0000400
88#define NL0 0000000
89#define NL1 0000400
90#define CRDLY 0003000
91#define CR0 0000000
92#define CR1 0001000
93#define CR2 0002000
94#define CR3 0003000
95#define TABDLY 0014000
96#define TAB0 0000000
97#define TAB1 0004000
98#define TAB2 0010000
99#define TAB3 0014000
100#define XTABS 0014000
101#define BSDLY 0020000
102#define BS0 0000000
103#define BS1 0020000
104#define VTDLY 0040000
105#define VT0 0000000
106#define VT1 0040000
107#define FFDLY 0100000
108#define FF0 0000000
109#define FF1 0100000
110
111/* c_cflag bit meaning */
112#define CBAUD 0010017
113#define B0 0000000 /* hang up */
114#define B50 0000001
115#define B75 0000002
116#define B110 0000003
117#define B134 0000004
118#define B150 0000005
119#define B200 0000006
120#define B300 0000007
121#define B600 0000010
122#define B1200 0000011
123#define B1800 0000012
124#define B2400 0000013
125#define B4800 0000014
126#define B9600 0000015
127#define B19200 0000016
128#define B38400 0000017
129#define EXTA B19200
130#define EXTB B38400
131#define CSIZE 0000060
132#define CS5 0000000
133#define CS6 0000020
134#define CS7 0000040
135#define CS8 0000060
136#define CSTOPB 0000100
137#define CREAD 0000200
138#define PARENB 0000400
139#define PARODD 0001000
140#define HUPCL 0002000
141#define CLOCAL 0004000
142#define CBAUDEX 0010000
143#define BOTHER 0010000
144#define B57600 0010001
145#define B115200 0010002
146#define B230400 0010003
147#define B460800 0010004
148#define B500000 0010005
149#define B576000 0010006
150#define B921600 0010007
151#define B1000000 0010010
152#define B1152000 0010011
153#define B1500000 0010012
154#define B2000000 0010013
155#define B2500000 0010014
156#define B3000000 0010015
157#define B3500000 0010016
158#define B4000000 0010017
159#define CIBAUD 002003600000 /* input baud rate */
160#define CMSPAR 010000000000 /* mark or space (stick) parity */
161#define CRTSCTS 020000000000 /* flow control */
162
163#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
164
165/* c_lflag bits */
166#define ISIG 0000001
167#define ICANON 0000002
168#define XCASE 0000004
169#define ECHO 0000010
170#define ECHOE 0000020
171#define ECHOK 0000040
172#define ECHONL 0000100
173#define NOFLSH 0000200
174#define TOSTOP 0000400
175#define ECHOCTL 0001000
176#define ECHOPRT 0002000
177#define ECHOKE 0004000
178#define FLUSHO 0010000
179#define PENDIN 0040000
180#define IEXTEN 0100000
181
182/* tcflow() and TCXONC use these */
183#define TCOOFF 0
184#define TCOON 1
185#define TCIOFF 2
186#define TCION 3
187
188/* tcflush() and TCFLSH use these */
189#define TCIFLUSH 0
190#define TCOFLUSH 1
191#define TCIOFLUSH 2
192
193/* tcsetattr uses these */
194#define TCSANOW 0
195#define TCSADRAIN 1
196#define TCSAFLUSH 2
197
198#endif /* __ASM_GENERIC_TERMBITS_H */
diff --git a/include/asm-generic/termios-base.h b/include/asm-generic/termios-base.h
new file mode 100644
index 000000000000..0a769feb22b0
--- /dev/null
+++ b/include/asm-generic/termios-base.h
@@ -0,0 +1,77 @@
1/* termios.h: generic termios/termio user copying/translation
2 */
3
4#ifndef _ASM_GENERIC_TERMIOS_BASE_H
5#define _ASM_GENERIC_TERMIOS_BASE_H
6
7#include <asm/uaccess.h>
8
9#ifndef __ARCH_TERMIO_GETPUT
10
11/*
12 * Translate a "termio" structure into a "termios". Ugh.
13 */
14static inline int user_termio_to_kernel_termios(struct ktermios *termios,
15 struct termio __user *termio)
16{
17 unsigned short tmp;
18
19 if (get_user(tmp, &termio->c_iflag) < 0)
20 goto fault;
21 termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp;
22
23 if (get_user(tmp, &termio->c_oflag) < 0)
24 goto fault;
25 termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp;
26
27 if (get_user(tmp, &termio->c_cflag) < 0)
28 goto fault;
29 termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp;
30
31 if (get_user(tmp, &termio->c_lflag) < 0)
32 goto fault;
33 termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp;
34
35 if (get_user(termios->c_line, &termio->c_line) < 0)
36 goto fault;
37
38 if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0)
39 goto fault;
40
41 return 0;
42
43 fault:
44 return -EFAULT;
45}
46
47/*
48 * Translate a "termios" structure into a "termio". Ugh.
49 */
50static inline int kernel_termios_to_user_termio(struct termio __user *termio,
51 struct ktermios *termios)
52{
53 if (put_user(termios->c_iflag, &termio->c_iflag) < 0 ||
54 put_user(termios->c_oflag, &termio->c_oflag) < 0 ||
55 put_user(termios->c_cflag, &termio->c_cflag) < 0 ||
56 put_user(termios->c_lflag, &termio->c_lflag) < 0 ||
57 put_user(termios->c_line, &termio->c_line) < 0 ||
58 copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0)
59 return -EFAULT;
60
61 return 0;
62}
63
64#ifndef user_termios_to_kernel_termios
65#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
66#endif
67
68#ifndef kernel_termios_to_user_termios
69#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
70#endif
71
72#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
73#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
74
75#endif /* __ARCH_TERMIO_GETPUT */
76
77#endif /* _ASM_GENERIC_TERMIOS_BASE_H */
diff --git a/include/asm-generic/termios.h b/include/asm-generic/termios.h
index 7d39ecc92d94..d0922adc56d4 100644
--- a/include/asm-generic/termios.h
+++ b/include/asm-generic/termios.h
@@ -1,18 +1,68 @@
1/* termios.h: generic termios/termio user copying/translation
2 */
3
4#ifndef _ASM_GENERIC_TERMIOS_H 1#ifndef _ASM_GENERIC_TERMIOS_H
5#define _ASM_GENERIC_TERMIOS_H 2#define _ASM_GENERIC_TERMIOS_H
3/*
4 * Most architectures have straight copies of the x86 code, with
5 * varying levels of bug fixes on top. Usually it's a good idea
6 * to use this generic version instead, but be careful to avoid
7 * ABI changes.
8 * New architectures should not provide their own version.
9 */
10
11#include <asm/termbits.h>
12#include <asm/ioctls.h>
13
14struct winsize {
15 unsigned short ws_row;
16 unsigned short ws_col;
17 unsigned short ws_xpixel;
18 unsigned short ws_ypixel;
19};
20
21#define NCC 8
22struct termio {
23 unsigned short c_iflag; /* input mode flags */
24 unsigned short c_oflag; /* output mode flags */
25 unsigned short c_cflag; /* control mode flags */
26 unsigned short c_lflag; /* local mode flags */
27 unsigned char c_line; /* line discipline */
28 unsigned char c_cc[NCC]; /* control characters */
29};
30
31/* modem lines */
32#define TIOCM_LE 0x001
33#define TIOCM_DTR 0x002
34#define TIOCM_RTS 0x004
35#define TIOCM_ST 0x008
36#define TIOCM_SR 0x010
37#define TIOCM_CTS 0x020
38#define TIOCM_CAR 0x040
39#define TIOCM_RNG 0x080
40#define TIOCM_DSR 0x100
41#define TIOCM_CD TIOCM_CAR
42#define TIOCM_RI TIOCM_RNG
43#define TIOCM_OUT1 0x2000
44#define TIOCM_OUT2 0x4000
45#define TIOCM_LOOP 0x8000
46
47/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
48
49#ifdef __KERNEL__
6 50
7#include <asm/uaccess.h> 51#include <asm/uaccess.h>
8 52
9#ifndef __ARCH_TERMIO_GETPUT 53/* intr=^C quit=^\ erase=del kill=^U
54 eof=^D vtime=\0 vmin=\1 sxtc=\0
55 start=^Q stop=^S susp=^Z eol=\0
56 reprint=^R discard=^U werase=^W lnext=^V
57 eol2=\0
58*/
59#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
10 60
11/* 61/*
12 * Translate a "termio" structure into a "termios". Ugh. 62 * Translate a "termio" structure into a "termios". Ugh.
13 */ 63 */
14static inline int user_termio_to_kernel_termios(struct ktermios *termios, 64static inline int user_termio_to_kernel_termios(struct ktermios *termios,
15 struct termio __user *termio) 65 const struct termio __user *termio)
16{ 66{
17 unsigned short tmp; 67 unsigned short tmp;
18 68
@@ -61,17 +111,44 @@ static inline int kernel_termios_to_user_termio(struct termio __user *termio,
61 return 0; 111 return 0;
62} 112}
63 113
64#ifndef user_termios_to_kernel_termios 114#ifdef TCGETS2
65#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) 115static inline int user_termios_to_kernel_termios(struct ktermios *k,
66#endif 116 struct termios2 __user *u)
117{
118 return copy_from_user(k, u, sizeof(struct termios2));
119}
67 120
68#ifndef kernel_termios_to_user_termios 121static inline int kernel_termios_to_user_termios(struct termios2 __user *u,
69#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) 122 struct ktermios *k)
70#endif 123{
124 return copy_to_user(u, k, sizeof(struct termios2));
125}
71 126
72#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) 127static inline int user_termios_to_kernel_termios_1(struct ktermios *k,
73#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) 128 struct termios __user *u)
129{
130 return copy_from_user(k, u, sizeof(struct termios));
131}
132
133static inline int kernel_termios_to_user_termios_1(struct termios __user *u,
134 struct ktermios *k)
135{
136 return copy_to_user(u, k, sizeof(struct termios));
137}
138#else /* TCGETS2 */
139static inline int user_termios_to_kernel_termios(struct ktermios *k,
140 struct termios __user *u)
141{
142 return copy_from_user(k, u, sizeof(struct termios));
143}
144
145static inline int kernel_termios_to_user_termios(struct termios __user *u,
146 struct ktermios *k)
147{
148 return copy_to_user(u, k, sizeof(struct termios));
149}
150#endif /* TCGETS2 */
74 151
75#endif /* __ARCH_TERMIO_GETPUT */ 152#endif /* __KERNEL__ */
76 153
77#endif /* _ASM_GENERIC_TERMIOS_H */ 154#endif /* _ASM_GENERIC_TERMIOS_H */
diff --git a/include/asm-generic/timex.h b/include/asm-generic/timex.h
new file mode 100644
index 000000000000..b2243cb8d6f6
--- /dev/null
+++ b/include/asm-generic/timex.h
@@ -0,0 +1,22 @@
1#ifndef __ASM_GENERIC_TIMEX_H
2#define __ASM_GENERIC_TIMEX_H
3
4/*
5 * If you have a cycle counter, return the value here.
6 */
7typedef unsigned long cycles_t;
8#ifndef get_cycles
9static inline cycles_t get_cycles(void)
10{
11 return 0;
12}
13#endif
14
15/*
16 * Architectures are encouraged to implement read_current_timer
17 * and define this in order to avoid the expensive delay loop
18 * calibration during boot.
19 */
20#undef ARCH_HAS_READ_CURRENT_TIMER
21
22#endif /* __ASM_GENERIC_TIMEX_H */
diff --git a/include/asm-generic/tlbflush.h b/include/asm-generic/tlbflush.h
new file mode 100644
index 000000000000..c7af037024c7
--- /dev/null
+++ b/include/asm-generic/tlbflush.h
@@ -0,0 +1,18 @@
1#ifndef __ASM_GENERIC_TLBFLUSH_H
2#define __ASM_GENERIC_TLBFLUSH_H
3/*
4 * This is a dummy tlbflush implementation that can be used on all
5 * nommu architectures.
6 * If you have an MMU, you need to write your own functions.
7 */
8#ifdef CONFIG_MMU
9#error need to implement an architecture specific asm/tlbflush.h
10#endif
11
12static inline void flush_tlb_mm(struct mm_struct *mm)
13{
14 BUG();
15}
16
17
18#endif /* __ASM_GENERIC_TLBFLUSH_H */
diff --git a/include/asm-generic/types.h b/include/asm-generic/types.h
new file mode 100644
index 000000000000..fba7d33ca3f2
--- /dev/null
+++ b/include/asm-generic/types.h
@@ -0,0 +1,42 @@
1#ifndef _ASM_GENERIC_TYPES_H
2#define _ASM_GENERIC_TYPES_H
3/*
4 * int-ll64 is used practically everywhere now,
5 * so use it as a reasonable default.
6 */
7#include <asm-generic/int-ll64.h>
8
9#ifndef __ASSEMBLY__
10
11typedef unsigned short umode_t;
12
13#endif /* __ASSEMBLY__ */
14
15/*
16 * These aren't exported outside the kernel to avoid name space clashes
17 */
18#ifdef __KERNEL__
19#ifndef __ASSEMBLY__
20/*
21 * DMA addresses may be very different from physical addresses
22 * and pointers. i386 and powerpc may have 64 bit DMA on 32 bit
23 * systems, while sparc64 uses 32 bit DMA addresses for 64 bit
24 * physical addresses.
25 * This default defines dma_addr_t to have the same size as
26 * phys_addr_t, which is the most common way.
27 * Do not define the dma64_addr_t type, which never really
28 * worked.
29 */
30#ifndef dma_addr_t
31#ifdef CONFIG_PHYS_ADDR_T_64BIT
32typedef u64 dma_addr_t;
33#else
34typedef u32 dma_addr_t;
35#endif /* CONFIG_PHYS_ADDR_T_64BIT */
36#endif /* dma_addr_t */
37
38#endif /* __ASSEMBLY__ */
39
40#endif /* __KERNEL__ */
41
42#endif /* _ASM_GENERIC_TYPES_H */
diff --git a/include/asm-generic/uaccess-unaligned.h b/include/asm-generic/uaccess-unaligned.h
new file mode 100644
index 000000000000..67deb898f0c5
--- /dev/null
+++ b/include/asm-generic/uaccess-unaligned.h
@@ -0,0 +1,26 @@
1#ifndef __ASM_GENERIC_UACCESS_UNALIGNED_H
2#define __ASM_GENERIC_UACCESS_UNALIGNED_H
3
4/*
5 * This macro should be used instead of __get_user() when accessing
6 * values at locations that are not known to be aligned.
7 */
8#define __get_user_unaligned(x, ptr) \
9({ \
10 __typeof__ (*(ptr)) __x; \
11 __copy_from_user(&__x, (ptr), sizeof(*(ptr))) ? -EFAULT : 0; \
12 (x) = __x; \
13})
14
15
16/*
17 * This macro should be used instead of __put_user() when accessing
18 * values at locations that are not known to be aligned.
19 */
20#define __put_user_unaligned(x, ptr) \
21({ \
22 __typeof__ (*(ptr)) __x = (x); \
23 __copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \
24})
25
26#endif /* __ASM_GENERIC_UACCESS_UNALIGNED_H */
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 549cb3a1640a..b218b8513d04 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -1,26 +1,327 @@
1#ifndef _ASM_GENERIC_UACCESS_H_ 1#ifndef __ASM_GENERIC_UACCESS_H
2#define _ASM_GENERIC_UACCESS_H_ 2#define __ASM_GENERIC_UACCESS_H
3 3
4/* 4/*
5 * This macro should be used instead of __get_user() when accessing 5 * User space memory access functions, these should work
6 * values at locations that are not known to be aligned. 6 * on a ny machine that has kernel and user data in the same
7 * address space, e.g. all NOMMU machines.
7 */ 8 */
8#define __get_user_unaligned(x, ptr) \ 9#include <linux/sched.h>
9({ \ 10#include <linux/mm.h>
10 __typeof__ (*(ptr)) __x; \ 11#include <linux/string.h>
11 __copy_from_user(&__x, (ptr), sizeof(*(ptr))) ? -EFAULT : 0; \ 12
12 (x) = __x; \ 13#include <asm/segment.h>
13}) 14
15#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
16
17#ifndef KERNEL_DS
18#define KERNEL_DS MAKE_MM_SEG(~0UL)
19#endif
20
21#ifndef USER_DS
22#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
23#endif
24
25#ifndef get_fs
26#define get_ds() (KERNEL_DS)
27#define get_fs() (current_thread_info()->addr_limit)
28
29static inline void set_fs(mm_segment_t fs)
30{
31 current_thread_info()->addr_limit = fs;
32}
33#endif
14 34
35#define segment_eq(a, b) ((a).seg == (b).seg)
36
37#define VERIFY_READ 0
38#define VERIFY_WRITE 1
39
40#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
41
42/*
43 * The architecture should really override this if possible, at least
44 * doing a check on the get_fs()
45 */
46#ifndef __access_ok
47static inline int __access_ok(unsigned long addr, unsigned long size)
48{
49 return 1;
50}
51#endif
15 52
16/* 53/*
17 * This macro should be used instead of __put_user() when accessing 54 * The exception table consists of pairs of addresses: the first is the
18 * values at locations that are not known to be aligned. 55 * address of an instruction that is allowed to fault, and the second is
56 * the address at which the program should continue. No registers are
57 * modified, so it is entirely up to the continuation code to figure out
58 * what to do.
59 *
60 * All the routines below use bits of fixup code that are out of line
61 * with the main instruction path. This means when everything is well,
62 * we don't even have to jump over them. Further, they do not intrude
63 * on our cache or tlb entries.
19 */ 64 */
20#define __put_user_unaligned(x, ptr) \ 65
21({ \ 66struct exception_table_entry
22 __typeof__ (*(ptr)) __x = (x); \ 67{
23 __copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \ 68 unsigned long insn, fixup;
69};
70
71/* Returns 0 if exception not found and fixup otherwise. */
72extern unsigned long search_exception_table(unsigned long);
73
74/*
75 * architectures with an MMU should override these two
76 */
77#ifndef __copy_from_user
78static inline __must_check long __copy_from_user(void *to,
79 const void __user * from, unsigned long n)
80{
81 if (__builtin_constant_p(n)) {
82 switch(n) {
83 case 1:
84 *(u8 *)to = *(u8 __force *)from;
85 return 0;
86 case 2:
87 *(u16 *)to = *(u16 __force *)from;
88 return 0;
89 case 4:
90 *(u32 *)to = *(u32 __force *)from;
91 return 0;
92#ifdef CONFIG_64BIT
93 case 8:
94 *(u64 *)to = *(u64 __force *)from;
95 return 0;
96#endif
97 default:
98 break;
99 }
100 }
101
102 memcpy(to, (const void __force *)from, n);
103 return 0;
104}
105#endif
106
107#ifndef __copy_to_user
108static inline __must_check long __copy_to_user(void __user *to,
109 const void *from, unsigned long n)
110{
111 if (__builtin_constant_p(n)) {
112 switch(n) {
113 case 1:
114 *(u8 __force *)to = *(u8 *)from;
115 return 0;
116 case 2:
117 *(u16 __force *)to = *(u16 *)from;
118 return 0;
119 case 4:
120 *(u32 __force *)to = *(u32 *)from;
121 return 0;
122#ifdef CONFIG_64BIT
123 case 8:
124 *(u64 __force *)to = *(u64 *)from;
125 return 0;
126#endif
127 default:
128 break;
129 }
130 }
131
132 memcpy((void __force *)to, from, n);
133 return 0;
134}
135#endif
136
137/*
138 * These are the main single-value transfer routines. They automatically
139 * use the right size if we just have the right pointer type.
140 * This version just falls back to copy_{from,to}_user, which should
141 * provide a fast-path for small values.
142 */
143#define __put_user(x, ptr) \
144({ \
145 __typeof__(*(ptr)) __x = (x); \
146 int __pu_err = -EFAULT; \
147 __chk_user_ptr(ptr); \
148 switch (sizeof (*(ptr))) { \
149 case 1: \
150 case 2: \
151 case 4: \
152 case 8: \
153 __pu_err = __put_user_fn(sizeof (*(ptr)), \
154 ptr, &__x); \
155 break; \
156 default: \
157 __put_user_bad(); \
158 break; \
159 } \
160 __pu_err; \
161})
162
163#define put_user(x, ptr) \
164({ \
165 might_sleep(); \
166 access_ok(VERIFY_WRITE, ptr, sizeof(*ptr)) ? \
167 __put_user(x, ptr) : \
168 -EFAULT; \
24}) 169})
25 170
26#endif /* _ASM_GENERIC_UACCESS_H */ 171static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
172{
173 size = __copy_to_user(ptr, x, size);
174 return size ? -EFAULT : size;
175}
176
177extern int __put_user_bad(void) __attribute__((noreturn));
178
179#define __get_user(x, ptr) \
180({ \
181 int __gu_err = -EFAULT; \
182 __chk_user_ptr(ptr); \
183 switch (sizeof(*(ptr))) { \
184 case 1: { \
185 unsigned char __x; \
186 __gu_err = __get_user_fn(sizeof (*(ptr)), \
187 ptr, &__x); \
188 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
189 break; \
190 }; \
191 case 2: { \
192 unsigned short __x; \
193 __gu_err = __get_user_fn(sizeof (*(ptr)), \
194 ptr, &__x); \
195 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
196 break; \
197 }; \
198 case 4: { \
199 unsigned int __x; \
200 __gu_err = __get_user_fn(sizeof (*(ptr)), \
201 ptr, &__x); \
202 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
203 break; \
204 }; \
205 case 8: { \
206 unsigned long long __x; \
207 __gu_err = __get_user_fn(sizeof (*(ptr)), \
208 ptr, &__x); \
209 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
210 break; \
211 }; \
212 default: \
213 __get_user_bad(); \
214 break; \
215 } \
216 __gu_err; \
217})
218
219#define get_user(x, ptr) \
220({ \
221 might_sleep(); \
222 access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ? \
223 __get_user(x, ptr) : \
224 -EFAULT; \
225})
226
227static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
228{
229 size = __copy_from_user(x, ptr, size);
230 return size ? -EFAULT : size;
231}
232
233extern int __get_user_bad(void) __attribute__((noreturn));
234
235#ifndef __copy_from_user_inatomic
236#define __copy_from_user_inatomic __copy_from_user
237#endif
238
239#ifndef __copy_to_user_inatomic
240#define __copy_to_user_inatomic __copy_to_user
241#endif
242
243static inline long copy_from_user(void *to,
244 const void __user * from, unsigned long n)
245{
246 might_sleep();
247 if (access_ok(VERIFY_READ, from, n))
248 return __copy_from_user(to, from, n);
249 else
250 return n;
251}
252
253static inline long copy_to_user(void __user *to,
254 const void *from, unsigned long n)
255{
256 might_sleep();
257 if (access_ok(VERIFY_WRITE, to, n))
258 return __copy_to_user(to, from, n);
259 else
260 return n;
261}
262
263/*
264 * Copy a null terminated string from userspace.
265 */
266#ifndef __strncpy_from_user
267static inline long
268__strncpy_from_user(char *dst, const char __user *src, long count)
269{
270 char *tmp;
271 strncpy(dst, (const char __force *)src, count);
272 for (tmp = dst; *tmp && count > 0; tmp++, count--)
273 ;
274 return (tmp - dst);
275}
276#endif
277
278static inline long
279strncpy_from_user(char *dst, const char __user *src, long count)
280{
281 if (!access_ok(VERIFY_READ, src, 1))
282 return -EFAULT;
283 return __strncpy_from_user(dst, src, count);
284}
285
286/*
287 * Return the size of a string (including the ending 0)
288 *
289 * Return 0 on exception, a value greater than N if too long
290 */
291#ifndef strnlen_user
292static inline long strnlen_user(const char __user *src, long n)
293{
294 if (!access_ok(VERIFY_READ, src, 1))
295 return 0;
296 return strlen((void * __force)src) + 1;
297}
298#endif
299
300static inline long strlen_user(const char __user *src)
301{
302 return strnlen_user(src, 32767);
303}
304
305/*
306 * Zero Userspace
307 */
308#ifndef __clear_user
309static inline __must_check unsigned long
310__clear_user(void __user *to, unsigned long n)
311{
312 memset((void __force *)to, 0, n);
313 return 0;
314}
315#endif
316
317static inline __must_check unsigned long
318clear_user(void __user *to, unsigned long n)
319{
320 might_sleep();
321 if (!access_ok(VERIFY_WRITE, to, n))
322 return n;
323
324 return __clear_user(to, n);
325}
326
327#endif /* __ASM_GENERIC_UACCESS_H */
diff --git a/include/asm-generic/ucontext.h b/include/asm-generic/ucontext.h
new file mode 100644
index 000000000000..ad77343e8a9a
--- /dev/null
+++ b/include/asm-generic/ucontext.h
@@ -0,0 +1,12 @@
1#ifndef __ASM_GENERIC_UCONTEXT_H
2#define __ASM_GENERIC_UCONTEXT_H
3
4struct ucontext {
5 unsigned long uc_flags;
6 struct ucontext *uc_link;
7 stack_t uc_stack;
8 struct sigcontext uc_mcontext;
9 sigset_t uc_sigmask; /* mask last for extensibility */
10};
11
12#endif /* __ASM_GENERIC_UCONTEXT_H */
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
new file mode 100644
index 000000000000..03cf5936bad6
--- /dev/null
+++ b/include/asm-generic/unaligned.h
@@ -0,0 +1,30 @@
1#ifndef __ASM_GENERIC_UNALIGNED_H
2#define __ASM_GENERIC_UNALIGNED_H
3
4/*
5 * This is the most generic implementation of unaligned accesses
6 * and should work almost anywhere.
7 *
8 * If an architecture can handle unaligned accesses in hardware,
9 * it may want to use the linux/unaligned/access_ok.h implementation
10 * instead.
11 */
12#include <asm/byteorder.h>
13
14#if defined(__LITTLE_ENDIAN)
15# include <linux/unaligned/le_struct.h>
16# include <linux/unaligned/be_byteshift.h>
17# include <linux/unaligned/generic.h>
18# define get_unaligned __get_unaligned_le
19# define put_unaligned __put_unaligned_le
20#elif defined(__BIG_ENDIAN)
21# include <linux/unaligned/be_struct.h>
22# include <linux/unaligned/le_byteshift.h>
23# include <linux/unaligned/generic.h>
24# define get_unaligned __get_unaligned_be
25# define put_unaligned __put_unaligned_be
26#else
27# error need to define endianess
28#endif
29
30#endif /* __ASM_GENERIC_UNALIGNED_H */
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
new file mode 100644
index 000000000000..1125e5a1ee5d
--- /dev/null
+++ b/include/asm-generic/unistd.h
@@ -0,0 +1,859 @@
1#if !defined(_ASM_GENERIC_UNISTD_H) || defined(__SYSCALL)
2#define _ASM_GENERIC_UNISTD_H
3
4#include <asm/bitsperlong.h>
5
6/*
7 * This file contains the system call numbers, based on the
8 * layout of the x86-64 architecture, which embeds the
9 * pointer to the syscall in the table.
10 *
11 * As a basic principle, no duplication of functionality
12 * should be added, e.g. we don't use lseek when llseek
13 * is present. New architectures should use this file
14 * and implement the less feature-full calls in user space.
15 */
16
17#ifndef __SYSCALL
18#define __SYSCALL(x, y)
19#endif
20
21#if __BITS_PER_LONG == 32
22#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _32)
23#else
24#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64)
25#endif
26
27#define __NR_io_setup 0
28__SYSCALL(__NR_io_setup, sys_io_setup)
29#define __NR_io_destroy 1
30__SYSCALL(__NR_io_destroy, sys_io_destroy)
31#define __NR_io_submit 2
32__SYSCALL(__NR_io_submit, sys_io_submit)
33#define __NR_io_cancel 3
34__SYSCALL(__NR_io_cancel, sys_io_cancel)
35#define __NR_io_getevents 4
36__SYSCALL(__NR_io_getevents, sys_io_getevents)
37
38/* fs/xattr.c */
39#define __NR_setxattr 5
40__SYSCALL(__NR_setxattr, sys_setxattr)
41#define __NR_lsetxattr 6
42__SYSCALL(__NR_lsetxattr, sys_lsetxattr)
43#define __NR_fsetxattr 7
44__SYSCALL(__NR_fsetxattr, sys_fsetxattr)
45#define __NR_getxattr 8
46__SYSCALL(__NR_getxattr, sys_getxattr)
47#define __NR_lgetxattr 9
48__SYSCALL(__NR_lgetxattr, sys_lgetxattr)
49#define __NR_fgetxattr 10
50__SYSCALL(__NR_fgetxattr, sys_fgetxattr)
51#define __NR_listxattr 11
52__SYSCALL(__NR_listxattr, sys_listxattr)
53#define __NR_llistxattr 12
54__SYSCALL(__NR_llistxattr, sys_llistxattr)
55#define __NR_flistxattr 13
56__SYSCALL(__NR_flistxattr, sys_flistxattr)
57#define __NR_removexattr 14
58__SYSCALL(__NR_removexattr, sys_removexattr)
59#define __NR_lremovexattr 15
60__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
61#define __NR_fremovexattr 16
62__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
63
64/* fs/dcache.c */
65#define __NR_getcwd 17
66__SYSCALL(__NR_getcwd, sys_getcwd)
67
68/* fs/cookies.c */
69#define __NR_lookup_dcookie 18
70__SYSCALL(__NR_lookup_dcookie, sys_lookup_dcookie)
71
72/* fs/eventfd.c */
73#define __NR_eventfd2 19
74__SYSCALL(__NR_eventfd2, sys_eventfd2)
75
76/* fs/eventpoll.c */
77#define __NR_epoll_create1 20
78__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
79#define __NR_epoll_ctl 21
80__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
81#define __NR_epoll_pwait 22
82__SYSCALL(__NR_epoll_pwait, sys_epoll_pwait)
83
84/* fs/fcntl.c */
85#define __NR_dup 23
86__SYSCALL(__NR_dup, sys_dup)
87#define __NR_dup3 24
88__SYSCALL(__NR_dup3, sys_dup3)
89#define __NR3264_fcntl 25
90__SC_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl)
91
92/* fs/inotify_user.c */
93#define __NR_inotify_init1 26
94__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
95#define __NR_inotify_add_watch 27
96__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
97#define __NR_inotify_rm_watch 28
98__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
99
100/* fs/ioctl.c */
101#define __NR_ioctl 29
102__SYSCALL(__NR_ioctl, sys_ioctl)
103
104/* fs/ioprio.c */
105#define __NR_ioprio_set 30
106__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
107#define __NR_ioprio_get 31
108__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
109
110/* fs/locks.c */
111#define __NR_flock 32
112__SYSCALL(__NR_flock, sys_flock)
113
114/* fs/namei.c */
115#define __NR_mknodat 33
116__SYSCALL(__NR_mknodat, sys_mknodat)
117#define __NR_mkdirat 34
118__SYSCALL(__NR_mkdirat, sys_mkdirat)
119#define __NR_unlinkat 35
120__SYSCALL(__NR_unlinkat, sys_unlinkat)
121#define __NR_symlinkat 36
122__SYSCALL(__NR_symlinkat, sys_symlinkat)
123#define __NR_linkat 37
124__SYSCALL(__NR_linkat, sys_linkat)
125#define __NR_renameat 38
126__SYSCALL(__NR_renameat, sys_renameat)
127
128/* fs/namespace.c */
129#define __NR_umount2 39
130__SYSCALL(__NR_umount2, sys_umount)
131#define __NR_mount 40
132__SYSCALL(__NR_mount, sys_mount)
133#define __NR_pivot_root 41
134__SYSCALL(__NR_pivot_root, sys_pivot_root)
135
136/* fs/nfsctl.c */
137#define __NR_nfsservctl 42
138__SYSCALL(__NR_nfsservctl, sys_nfsservctl)
139
140/* fs/open.c */
141#define __NR3264_statfs 43
142__SC_3264(__NR3264_statfs, sys_statfs64, sys_statfs)
143#define __NR3264_fstatfs 44
144__SC_3264(__NR3264_fstatfs, sys_fstatfs64, sys_fstatfs)
145#define __NR3264_truncate 45
146__SC_3264(__NR3264_truncate, sys_truncate64, sys_truncate)
147#define __NR3264_ftruncate 46
148__SC_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate)
149
150#define __NR_fallocate 47
151__SYSCALL(__NR_fallocate, sys_fallocate)
152#define __NR_faccessat 48
153__SYSCALL(__NR_faccessat, sys_faccessat)
154#define __NR_chdir 49
155__SYSCALL(__NR_chdir, sys_chdir)
156#define __NR_fchdir 50
157__SYSCALL(__NR_fchdir, sys_fchdir)
158#define __NR_chroot 51
159__SYSCALL(__NR_chroot, sys_chroot)
160#define __NR_fchmod 52
161__SYSCALL(__NR_fchmod, sys_fchmod)
162#define __NR_fchmodat 53
163__SYSCALL(__NR_fchmodat, sys_fchmodat)
164#define __NR_fchownat 54
165__SYSCALL(__NR_fchownat, sys_fchownat)
166#define __NR_fchown 55
167__SYSCALL(__NR_fchown, sys_fchown)
168#define __NR_openat 56
169__SYSCALL(__NR_openat, sys_openat)
170#define __NR_close 57
171__SYSCALL(__NR_close, sys_close)
172#define __NR_vhangup 58
173__SYSCALL(__NR_vhangup, sys_vhangup)
174
175/* fs/pipe.c */
176#define __NR_pipe2 59
177__SYSCALL(__NR_pipe2, sys_pipe2)
178
179/* fs/quota.c */
180#define __NR_quotactl 60
181__SYSCALL(__NR_quotactl, sys_quotactl)
182
183/* fs/readdir.c */
184#define __NR_getdents64 61
185__SYSCALL(__NR_getdents64, sys_getdents64)
186
187/* fs/read_write.c */
188#define __NR3264_lseek 62
189__SC_3264(__NR3264_lseek, sys_llseek, sys_lseek)
190#define __NR_read 63
191__SYSCALL(__NR_read, sys_read)
192#define __NR_write 64
193__SYSCALL(__NR_write, sys_write)
194#define __NR_readv 65
195__SYSCALL(__NR_readv, sys_readv)
196#define __NR_writev 66
197__SYSCALL(__NR_writev, sys_writev)
198#define __NR_pread64 67
199__SYSCALL(__NR_pread64, sys_pread64)
200#define __NR_pwrite64 68
201__SYSCALL(__NR_pwrite64, sys_pwrite64)
202#define __NR_preadv 69
203__SYSCALL(__NR_preadv, sys_preadv)
204#define __NR_pwritev 70
205__SYSCALL(__NR_pwritev, sys_pwritev)
206
207/* fs/sendfile.c */
208#define __NR3264_sendfile 71
209__SC_3264(__NR3264_sendfile, sys_sendfile64, sys_sendfile)
210
211/* fs/select.c */
212#define __NR_pselect6 72
213__SYSCALL(__NR_pselect6, sys_pselect6)
214#define __NR_ppoll 73
215__SYSCALL(__NR_ppoll, sys_ppoll)
216
217/* fs/signalfd.c */
218#define __NR_signalfd4 74
219__SYSCALL(__NR_signalfd4, sys_signalfd4)
220
221/* fs/splice.c */
222#define __NR_vmsplice 75
223__SYSCALL(__NR_vmsplice, sys_vmsplice)
224#define __NR_splice 76
225__SYSCALL(__NR_splice, sys_splice)
226#define __NR_tee 77
227__SYSCALL(__NR_tee, sys_tee)
228
229/* fs/stat.c */
230#define __NR_readlinkat 78
231__SYSCALL(__NR_readlinkat, sys_readlinkat)
232#define __NR3264_fstatat 79
233__SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
234#define __NR3264_fstat 80
235__SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat)
236
237/* fs/sync.c */
238#define __NR_sync 81
239__SYSCALL(__NR_sync, sys_sync)
240#define __NR_fsync 82
241__SYSCALL(__NR_fsync, sys_fsync)
242#define __NR_fdatasync 83
243__SYSCALL(__NR_fdatasync, sys_fdatasync)
244#define __NR_sync_file_range 84
245__SYSCALL(__NR_sync_file_range, sys_sync_file_range) /* .long sys_sync_file_range2, */
246
247/* fs/timerfd.c */
248#define __NR_timerfd_create 85
249__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
250#define __NR_timerfd_settime 86
251__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime)
252#define __NR_timerfd_gettime 87
253__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime)
254
255/* fs/utimes.c */
256#define __NR_utimensat 88
257__SYSCALL(__NR_utimensat, sys_utimensat)
258
259/* kernel/acct.c */
260#define __NR_acct 89
261__SYSCALL(__NR_acct, sys_acct)
262
263/* kernel/capability.c */
264#define __NR_capget 90
265__SYSCALL(__NR_capget, sys_capget)
266#define __NR_capset 91
267__SYSCALL(__NR_capset, sys_capset)
268
269/* kernel/exec_domain.c */
270#define __NR_personality 92
271__SYSCALL(__NR_personality, sys_personality)
272
273/* kernel/exit.c */
274#define __NR_exit 93
275__SYSCALL(__NR_exit, sys_exit)
276#define __NR_exit_group 94
277__SYSCALL(__NR_exit_group, sys_exit_group)
278#define __NR_waitid 95
279__SYSCALL(__NR_waitid, sys_waitid)
280
281/* kernel/fork.c */
282#define __NR_set_tid_address 96
283__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
284#define __NR_unshare 97
285__SYSCALL(__NR_unshare, sys_unshare)
286
287/* kernel/futex.c */
288#define __NR_futex 98
289__SYSCALL(__NR_futex, sys_futex)
290#define __NR_set_robust_list 99
291__SYSCALL(__NR_set_robust_list, sys_set_robust_list)
292#define __NR_get_robust_list 100
293__SYSCALL(__NR_get_robust_list, sys_get_robust_list)
294
295/* kernel/hrtimer.c */
296#define __NR_nanosleep 101
297__SYSCALL(__NR_nanosleep, sys_nanosleep)
298
299/* kernel/itimer.c */
300#define __NR_getitimer 102
301__SYSCALL(__NR_getitimer, sys_getitimer)
302#define __NR_setitimer 103
303__SYSCALL(__NR_setitimer, sys_setitimer)
304
305/* kernel/kexec.c */
306#define __NR_kexec_load 104
307__SYSCALL(__NR_kexec_load, sys_kexec_load)
308
309/* kernel/module.c */
310#define __NR_init_module 105
311__SYSCALL(__NR_init_module, sys_init_module)
312#define __NR_delete_module 106
313__SYSCALL(__NR_delete_module, sys_delete_module)
314
315/* kernel/posix-timers.c */
316#define __NR_timer_create 107
317__SYSCALL(__NR_timer_create, sys_timer_create)
318#define __NR_timer_gettime 108
319__SYSCALL(__NR_timer_gettime, sys_timer_gettime)
320#define __NR_timer_getoverrun 109
321__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
322#define __NR_timer_settime 110
323__SYSCALL(__NR_timer_settime, sys_timer_settime)
324#define __NR_timer_delete 111
325__SYSCALL(__NR_timer_delete, sys_timer_delete)
326#define __NR_clock_settime 112
327__SYSCALL(__NR_clock_settime, sys_clock_settime)
328#define __NR_clock_gettime 113
329__SYSCALL(__NR_clock_gettime, sys_clock_gettime)
330#define __NR_clock_getres 114
331__SYSCALL(__NR_clock_getres, sys_clock_getres)
332#define __NR_clock_nanosleep 115
333__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep)
334
335/* kernel/printk.c */
336#define __NR_syslog 116
337__SYSCALL(__NR_syslog, sys_syslog)
338
339/* kernel/ptrace.c */
340#define __NR_ptrace 117
341__SYSCALL(__NR_ptrace, sys_ptrace)
342
343/* kernel/sched.c */
344#define __NR_sched_setparam 118
345__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
346#define __NR_sched_setscheduler 119
347__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler)
348#define __NR_sched_getscheduler 120
349__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
350#define __NR_sched_getparam 121
351__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
352#define __NR_sched_setaffinity 122
353__SYSCALL(__NR_sched_setaffinity, sys_sched_setaffinity)
354#define __NR_sched_getaffinity 123
355__SYSCALL(__NR_sched_getaffinity, sys_sched_getaffinity)
356#define __NR_sched_yield 124
357__SYSCALL(__NR_sched_yield, sys_sched_yield)
358#define __NR_sched_get_priority_max 125
359__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
360#define __NR_sched_get_priority_min 126
361__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
362#define __NR_sched_rr_get_interval 127
363__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval)
364
365/* kernel/signal.c */
366#define __NR_restart_syscall 128
367__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
368#define __NR_kill 129
369__SYSCALL(__NR_kill, sys_kill)
370#define __NR_tkill 130
371__SYSCALL(__NR_tkill, sys_tkill)
372#define __NR_tgkill 131
373__SYSCALL(__NR_tgkill, sys_tgkill)
374#define __NR_sigaltstack 132
375__SYSCALL(__NR_sigaltstack, sys_sigaltstack)
376#define __NR_rt_sigsuspend 133
377__SYSCALL(__NR_rt_sigsuspend, sys_rt_sigsuspend) /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
378#define __NR_rt_sigaction 134
379__SYSCALL(__NR_rt_sigaction, sys_rt_sigaction) /* __ARCH_WANT_SYS_RT_SIGACTION */
380#define __NR_rt_sigprocmask 135
381__SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask)
382#define __NR_rt_sigpending 136
383__SYSCALL(__NR_rt_sigpending, sys_rt_sigpending)
384#define __NR_rt_sigtimedwait 137
385__SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait)
386#define __NR_rt_sigqueueinfo 138
387__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo)
388#define __NR_rt_sigreturn 139
389__SYSCALL(__NR_rt_sigreturn, sys_rt_sigreturn) /* sys_rt_sigreturn_wrapper, */
390
391/* kernel/sys.c */
392#define __NR_setpriority 140
393__SYSCALL(__NR_setpriority, sys_setpriority)
394#define __NR_getpriority 141
395__SYSCALL(__NR_getpriority, sys_getpriority)
396#define __NR_reboot 142
397__SYSCALL(__NR_reboot, sys_reboot)
398#define __NR_setregid 143
399__SYSCALL(__NR_setregid, sys_setregid)
400#define __NR_setgid 144
401__SYSCALL(__NR_setgid, sys_setgid)
402#define __NR_setreuid 145
403__SYSCALL(__NR_setreuid, sys_setreuid)
404#define __NR_setuid 146
405__SYSCALL(__NR_setuid, sys_setuid)
406#define __NR_setresuid 147
407__SYSCALL(__NR_setresuid, sys_setresuid)
408#define __NR_getresuid 148
409__SYSCALL(__NR_getresuid, sys_getresuid)
410#define __NR_setresgid 149
411__SYSCALL(__NR_setresgid, sys_setresgid)
412#define __NR_getresgid 150
413__SYSCALL(__NR_getresgid, sys_getresgid)
414#define __NR_setfsuid 151
415__SYSCALL(__NR_setfsuid, sys_setfsuid)
416#define __NR_setfsgid 152
417__SYSCALL(__NR_setfsgid, sys_setfsgid)
418#define __NR_times 153
419__SYSCALL(__NR_times, sys_times)
420#define __NR_setpgid 154
421__SYSCALL(__NR_setpgid, sys_setpgid)
422#define __NR_getpgid 155
423__SYSCALL(__NR_getpgid, sys_getpgid)
424#define __NR_getsid 156
425__SYSCALL(__NR_getsid, sys_getsid)
426#define __NR_setsid 157
427__SYSCALL(__NR_setsid, sys_setsid)
428#define __NR_getgroups 158
429__SYSCALL(__NR_getgroups, sys_getgroups)
430#define __NR_setgroups 159
431__SYSCALL(__NR_setgroups, sys_setgroups)
432#define __NR_uname 160
433__SYSCALL(__NR_uname, sys_newuname)
434#define __NR_sethostname 161
435__SYSCALL(__NR_sethostname, sys_sethostname)
436#define __NR_setdomainname 162
437__SYSCALL(__NR_setdomainname, sys_setdomainname)
438#define __NR_getrlimit 163
439__SYSCALL(__NR_getrlimit, sys_getrlimit)
440#define __NR_setrlimit 164
441__SYSCALL(__NR_setrlimit, sys_setrlimit)
442#define __NR_getrusage 165
443__SYSCALL(__NR_getrusage, sys_getrusage)
444#define __NR_umask 166
445__SYSCALL(__NR_umask, sys_umask)
446#define __NR_prctl 167
447__SYSCALL(__NR_prctl, sys_prctl)
448#define __NR_getcpu 168
449__SYSCALL(__NR_getcpu, sys_getcpu)
450
451/* kernel/time.c */
452#define __NR_gettimeofday 169
453__SYSCALL(__NR_gettimeofday, sys_gettimeofday)
454#define __NR_settimeofday 170
455__SYSCALL(__NR_settimeofday, sys_settimeofday)
456#define __NR_adjtimex 171
457__SYSCALL(__NR_adjtimex, sys_adjtimex)
458
459/* kernel/timer.c */
460#define __NR_getpid 172
461__SYSCALL(__NR_getpid, sys_getpid)
462#define __NR_getppid 173
463__SYSCALL(__NR_getppid, sys_getppid)
464#define __NR_getuid 174
465__SYSCALL(__NR_getuid, sys_getuid)
466#define __NR_geteuid 175
467__SYSCALL(__NR_geteuid, sys_geteuid)
468#define __NR_getgid 176
469__SYSCALL(__NR_getgid, sys_getgid)
470#define __NR_getegid 177
471__SYSCALL(__NR_getegid, sys_getegid)
472#define __NR_gettid 178
473__SYSCALL(__NR_gettid, sys_gettid)
474#define __NR_sysinfo 179
475__SYSCALL(__NR_sysinfo, sys_sysinfo)
476
477/* ipc/mqueue.c */
478#define __NR_mq_open 180
479__SYSCALL(__NR_mq_open, sys_mq_open)
480#define __NR_mq_unlink 181
481__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
482#define __NR_mq_timedsend 182
483__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend)
484#define __NR_mq_timedreceive 183
485__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive)
486#define __NR_mq_notify 184
487__SYSCALL(__NR_mq_notify, sys_mq_notify)
488#define __NR_mq_getsetattr 185
489__SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr)
490
491/* ipc/msg.c */
492#define __NR_msgget 186
493__SYSCALL(__NR_msgget, sys_msgget)
494#define __NR_msgctl 187
495__SYSCALL(__NR_msgctl, sys_msgctl)
496#define __NR_msgrcv 188
497__SYSCALL(__NR_msgrcv, sys_msgrcv)
498#define __NR_msgsnd 189
499__SYSCALL(__NR_msgsnd, sys_msgsnd)
500
501/* ipc/sem.c */
502#define __NR_semget 190
503__SYSCALL(__NR_semget, sys_semget)
504#define __NR_semctl 191
505__SYSCALL(__NR_semctl, sys_semctl)
506#define __NR_semtimedop 192
507__SYSCALL(__NR_semtimedop, sys_semtimedop)
508#define __NR_semop 193
509__SYSCALL(__NR_semop, sys_semop)
510
511/* ipc/shm.c */
512#define __NR_shmget 194
513__SYSCALL(__NR_shmget, sys_shmget)
514#define __NR_shmctl 195
515__SYSCALL(__NR_shmctl, sys_shmctl)
516#define __NR_shmat 196
517__SYSCALL(__NR_shmat, sys_shmat)
518#define __NR_shmdt 197
519__SYSCALL(__NR_shmdt, sys_shmdt)
520
521/* net/socket.c */
522#define __NR_socket 198
523__SYSCALL(__NR_socket, sys_socket)
524#define __NR_socketpair 199
525__SYSCALL(__NR_socketpair, sys_socketpair)
526#define __NR_bind 200
527__SYSCALL(__NR_bind, sys_bind)
528#define __NR_listen 201
529__SYSCALL(__NR_listen, sys_listen)
530#define __NR_accept 202
531__SYSCALL(__NR_accept, sys_accept)
532#define __NR_connect 203
533__SYSCALL(__NR_connect, sys_connect)
534#define __NR_getsockname 204
535__SYSCALL(__NR_getsockname, sys_getsockname)
536#define __NR_getpeername 205
537__SYSCALL(__NR_getpeername, sys_getpeername)
538#define __NR_sendto 206
539__SYSCALL(__NR_sendto, sys_sendto)
540#define __NR_recvfrom 207
541__SYSCALL(__NR_recvfrom, sys_recvfrom)
542#define __NR_setsockopt 208
543__SYSCALL(__NR_setsockopt, sys_setsockopt)
544#define __NR_getsockopt 209
545__SYSCALL(__NR_getsockopt, sys_getsockopt)
546#define __NR_shutdown 210
547__SYSCALL(__NR_shutdown, sys_shutdown)
548#define __NR_sendmsg 211
549__SYSCALL(__NR_sendmsg, sys_sendmsg)
550#define __NR_recvmsg 212
551__SYSCALL(__NR_recvmsg, sys_recvmsg)
552
553/* mm/filemap.c */
554#define __NR_readahead 213
555__SYSCALL(__NR_readahead, sys_readahead)
556
557/* mm/nommu.c, also with MMU */
558#define __NR_brk 214
559__SYSCALL(__NR_brk, sys_brk)
560#define __NR_munmap 215
561__SYSCALL(__NR_munmap, sys_munmap)
562#define __NR_mremap 216
563__SYSCALL(__NR_mremap, sys_mremap)
564
565/* security/keys/keyctl.c */
566#define __NR_add_key 217
567__SYSCALL(__NR_add_key, sys_add_key)
568#define __NR_request_key 218
569__SYSCALL(__NR_request_key, sys_request_key)
570#define __NR_keyctl 219
571__SYSCALL(__NR_keyctl, sys_keyctl)
572
573/* arch/example/kernel/sys_example.c */
574#define __NR_clone 220
575__SYSCALL(__NR_clone, sys_clone) /* .long sys_clone_wrapper */
576#define __NR_execve 221
577__SYSCALL(__NR_execve, sys_execve) /* .long sys_execve_wrapper */
578
579#define __NR3264_mmap 222
580__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap)
581/* mm/fadvise.c */
582#define __NR3264_fadvise64 223
583__SC_3264(__NR3264_fadvise64, sys_fadvise64_64, sys_fadvise64)
584
585/* mm/, CONFIG_MMU only */
586#ifndef __ARCH_NOMMU
587#define __NR_swapon 224
588__SYSCALL(__NR_swapon, sys_swapon)
589#define __NR_swapoff 225
590__SYSCALL(__NR_swapoff, sys_swapoff)
591#define __NR_mprotect 226
592__SYSCALL(__NR_mprotect, sys_mprotect)
593#define __NR_msync 227
594__SYSCALL(__NR_msync, sys_msync)
595#define __NR_mlock 228
596__SYSCALL(__NR_mlock, sys_mlock)
597#define __NR_munlock 229
598__SYSCALL(__NR_munlock, sys_munlock)
599#define __NR_mlockall 230
600__SYSCALL(__NR_mlockall, sys_mlockall)
601#define __NR_munlockall 231
602__SYSCALL(__NR_munlockall, sys_munlockall)
603#define __NR_mincore 232
604__SYSCALL(__NR_mincore, sys_mincore)
605#define __NR_madvise 233
606__SYSCALL(__NR_madvise, sys_madvise)
607#define __NR_remap_file_pages 234
608__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
609#define __NR_mbind 235
610__SYSCALL(__NR_mbind, sys_mbind)
611#define __NR_get_mempolicy 236
612__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy)
613#define __NR_set_mempolicy 237
614__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy)
615#define __NR_migrate_pages 238
616__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
617#define __NR_move_pages 239
618__SYSCALL(__NR_move_pages, sys_move_pages)
619#endif
620
621#define __NR_rt_tgsigqueueinfo 240
622__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
623#define __NR_perf_counter_open 241
624__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open)
625
626#undef __NR_syscalls
627#define __NR_syscalls 242
628
629/*
630 * All syscalls below here should go away really,
631 * these are provided for both review and as a porting
632 * help for the C library version.
633*
634 * Last chance: are any of these important enought to
635 * enable by default?
636 */
637#ifdef __ARCH_WANT_SYSCALL_NO_AT
638#define __NR_open 1024
639__SYSCALL(__NR_open, sys_open)
640#define __NR_link 1025
641__SYSCALL(__NR_link, sys_link)
642#define __NR_unlink 1026
643__SYSCALL(__NR_unlink, sys_unlink)
644#define __NR_mknod 1027
645__SYSCALL(__NR_mknod, sys_mknod)
646#define __NR_chmod 1028
647__SYSCALL(__NR_chmod, sys_chmod)
648#define __NR_chown 1029
649__SYSCALL(__NR_chown, sys_chown)
650#define __NR_mkdir 1030
651__SYSCALL(__NR_mkdir, sys_mkdir)
652#define __NR_rmdir 1031
653__SYSCALL(__NR_rmdir, sys_rmdir)
654#define __NR_lchown 1032
655__SYSCALL(__NR_lchown, sys_lchown)
656#define __NR_access 1033
657__SYSCALL(__NR_access, sys_access)
658#define __NR_rename 1034
659__SYSCALL(__NR_rename, sys_rename)
660#define __NR_readlink 1035
661__SYSCALL(__NR_readlink, sys_readlink)
662#define __NR_symlink 1036
663__SYSCALL(__NR_symlink, sys_symlink)
664#define __NR_utimes 1037
665__SYSCALL(__NR_utimes, sys_utimes)
666#define __NR3264_stat 1038
667__SC_3264(__NR3264_stat, sys_stat64, sys_newstat)
668#define __NR3264_lstat 1039
669__SC_3264(__NR3264_lstat, sys_lstat64, sys_newlstat)
670
671#undef __NR_syscalls
672#define __NR_syscalls (__NR3264_lstat+1)
673#endif /* __ARCH_WANT_SYSCALL_NO_AT */
674
675#ifdef __ARCH_WANT_SYSCALL_NO_FLAGS
676#define __NR_pipe 1040
677__SYSCALL(__NR_pipe, sys_pipe)
678#define __NR_dup2 1041
679__SYSCALL(__NR_dup2, sys_dup2)
680#define __NR_epoll_create 1042
681__SYSCALL(__NR_epoll_create, sys_epoll_create)
682#define __NR_inotify_init 1043
683__SYSCALL(__NR_inotify_init, sys_inotify_init)
684#define __NR_eventfd 1044
685__SYSCALL(__NR_eventfd, sys_eventfd)
686#define __NR_signalfd 1045
687__SYSCALL(__NR_signalfd, sys_signalfd)
688
689#undef __NR_syscalls
690#define __NR_syscalls (__NR_signalfd+1)
691#endif /* __ARCH_WANT_SYSCALL_NO_FLAGS */
692
693#if __BITS_PER_LONG == 32 && defined(__ARCH_WANT_SYSCALL_OFF_T)
694#define __NR_sendfile 1046
695__SYSCALL(__NR_sendfile, sys_sendfile)
696#define __NR_ftruncate 1047
697__SYSCALL(__NR_ftruncate, sys_ftruncate)
698#define __NR_truncate 1048
699__SYSCALL(__NR_truncate, sys_truncate)
700#define __NR_stat 1049
701__SYSCALL(__NR_stat, sys_newstat)
702#define __NR_lstat 1050
703__SYSCALL(__NR_lstat, sys_newlstat)
704#define __NR_fstat 1051
705__SYSCALL(__NR_fstat, sys_newfstat)
706#define __NR_fcntl 1052
707__SYSCALL(__NR_fcntl, sys_fcntl)
708#define __NR_fadvise64 1053
709#define __ARCH_WANT_SYS_FADVISE64
710__SYSCALL(__NR_fadvise64, sys_fadvise64)
711#define __NR_newfstatat 1054
712#define __ARCH_WANT_SYS_NEWFSTATAT
713__SYSCALL(__NR_newfstatat, sys_newfstatat)
714#define __NR_fstatfs 1055
715__SYSCALL(__NR_fstatfs, sys_fstatfs)
716#define __NR_statfs 1056
717__SYSCALL(__NR_statfs, sys_statfs)
718#define __NR_lseek 1057
719__SYSCALL(__NR_lseek, sys_lseek)
720#define __NR_mmap 1058
721__SYSCALL(__NR_mmap, sys_mmap)
722
723#undef __NR_syscalls
724#define __NR_syscalls (__NR_mmap+1)
725#endif /* 32 bit off_t syscalls */
726
727#ifdef __ARCH_WANT_SYSCALL_DEPRECATED
728#define __NR_alarm 1059
729#define __ARCH_WANT_SYS_ALARM
730__SYSCALL(__NR_alarm, sys_alarm)
731#define __NR_getpgrp 1060
732#define __ARCH_WANT_SYS_GETPGRP
733__SYSCALL(__NR_getpgrp, sys_getpgrp)
734#define __NR_pause 1061
735#define __ARCH_WANT_SYS_PAUSE
736__SYSCALL(__NR_pause, sys_pause)
737#define __NR_time 1062
738#define __ARCH_WANT_SYS_TIME
739__SYSCALL(__NR_time, sys_time)
740#define __NR_utime 1063
741#define __ARCH_WANT_SYS_UTIME
742__SYSCALL(__NR_utime, sys_utime)
743
744#define __NR_creat 1064
745__SYSCALL(__NR_creat, sys_creat)
746#define __NR_getdents 1065
747#define __ARCH_WANT_SYS_GETDENTS
748__SYSCALL(__NR_getdents, sys_getdents)
749#define __NR_futimesat 1066
750__SYSCALL(__NR_futimesat, sys_futimesat)
751#define __NR_select 1067
752#define __ARCH_WANT_SYS_SELECT
753__SYSCALL(__NR_select, sys_select)
754#define __NR_poll 1068
755__SYSCALL(__NR_poll, sys_poll)
756#define __NR_epoll_wait 1069
757__SYSCALL(__NR_epoll_wait, sys_epoll_wait)
758#define __NR_ustat 1070
759__SYSCALL(__NR_ustat, sys_ustat)
760#define __NR_vfork 1071
761__SYSCALL(__NR_vfork, sys_vfork)
762#define __NR_wait4 1072
763__SYSCALL(__NR_wait4, sys_wait4)
764#define __NR_recv 1073
765__SYSCALL(__NR_recv, sys_recv)
766#define __NR_send 1074
767__SYSCALL(__NR_send, sys_send)
768#define __NR_bdflush 1075
769__SYSCALL(__NR_bdflush, sys_bdflush)
770#define __NR_umount 1076
771__SYSCALL(__NR_umount, sys_oldumount)
772#define __ARCH_WANT_SYS_OLDUMOUNT
773#define __NR_uselib 1077
774__SYSCALL(__NR_uselib, sys_uselib)
775#define __NR__sysctl 1078
776__SYSCALL(__NR__sysctl, sys_sysctl)
777
778#define __NR_fork 1079
779#ifdef CONFIG_MMU
780__SYSCALL(__NR_fork, sys_fork)
781#else
782__SYSCALL(__NR_fork, sys_ni_syscall)
783#endif /* CONFIG_MMU */
784
785#undef __NR_syscalls
786#define __NR_syscalls (__NR_fork+1)
787
788#endif /* __ARCH_WANT_SYSCALL_DEPRECATED */
789
790/*
791 * 32 bit systems traditionally used different
792 * syscalls for off_t and loff_t arguments, while
793 * 64 bit systems only need the off_t version.
794 * For new 32 bit platforms, there is no need to
795 * implement the old 32 bit off_t syscalls, so
796 * they take different names.
797 * Here we map the numbers so that both versions
798 * use the same syscall table layout.
799 */
800#if __BITS_PER_LONG == 64
801#define __NR_fcntl __NR3264_fcntl
802#define __NR_statfs __NR3264_statfs
803#define __NR_fstatfs __NR3264_fstatfs
804#define __NR_truncate __NR3264_truncate
805#define __NR_ftruncate __NR3264_truncate
806#define __NR_lseek __NR3264_lseek
807#define __NR_sendfile __NR3264_sendfile
808#define __NR_newfstatat __NR3264_fstatat
809#define __NR_fstat __NR3264_fstat
810#define __NR_mmap __NR3264_mmap
811#define __NR_fadvise64 __NR3264_fadvise64
812#ifdef __NR3264_stat
813#define __NR_stat __NR3264_stat
814#define __NR_lstat __NR3264_lstat
815#endif
816#else
817#define __NR_fcntl64 __NR3264_fcntl
818#define __NR_statfs64 __NR3264_statfs
819#define __NR_fstatfs64 __NR3264_fstatfs
820#define __NR_truncate64 __NR3264_truncate
821#define __NR_ftruncate64 __NR3264_truncate
822#define __NR_llseek __NR3264_lseek
823#define __NR_sendfile64 __NR3264_sendfile
824#define __NR_fstatat64 __NR3264_fstatat
825#define __NR_fstat64 __NR3264_fstat
826#define __NR_mmap2 __NR3264_mmap
827#define __NR_fadvise64_64 __NR3264_fadvise64
828#ifdef __NR3264_stat
829#define __NR_stat64 __NR3264_stat
830#define __NR_lstat64 __NR3264_lstat
831#endif
832#endif
833
834#ifdef __KERNEL__
835
836/*
837 * These are required system calls, we should
838 * invert the logic eventually and let them
839 * be selected by default.
840 */
841#if __BITS_PER_LONG == 32
842#define __ARCH_WANT_STAT64
843#define __ARCH_WANT_SYS_LLSEEK
844#endif
845#define __ARCH_WANT_SYS_RT_SIGACTION
846#define __ARCH_WANT_SYS_RT_SIGSUSPEND
847
848/*
849 * "Conditional" syscalls
850 *
851 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
852 * but it doesn't work on all toolchains, so we just do it by hand
853 */
854#ifndef cond_syscall
855#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
856#endif
857
858#endif /* __KERNEL__ */
859#endif /* _ASM_GENERIC_UNISTD_H */
diff --git a/include/asm-generic/user.h b/include/asm-generic/user.h
new file mode 100644
index 000000000000..8b9c3c960aeb
--- /dev/null
+++ b/include/asm-generic/user.h
@@ -0,0 +1,8 @@
1#ifndef __ASM_GENERIC_USER_H
2#define __ASM_GENERIC_USER_H
3/*
4 * This file may define a 'struct user' structure. However, it it only
5 * used for a.out file, which are not supported on new architectures.
6 */
7
8#endif /* __ASM_GENERIC_USER_H */
diff --git a/include/asm-generic/vga.h b/include/asm-generic/vga.h
new file mode 100644
index 000000000000..36c8ff52016b
--- /dev/null
+++ b/include/asm-generic/vga.h
@@ -0,0 +1,24 @@
1/*
2 * Access to VGA videoram
3 *
4 * (c) 1998 Martin Mares <mj@ucw.cz>
5 */
6#ifndef __ASM_GENERIC_VGA_H
7#define __ASM_GENERIC_VGA_H
8
9/*
10 * On most architectures that support VGA, we can just
11 * recalculate addresses and then access the videoram
12 * directly without any black magic.
13 *
14 * Everyone else needs to ioremap the address and use
15 * proper I/O accesses.
16 */
17#ifndef VGA_MAP_MEM
18#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x)
19#endif
20
21#define vga_readb(x) (*(x))
22#define vga_writeb(x, y) (*(y) = (x))
23
24#endif /* _ASM_GENERIC_VGA_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 89853bcd27a6..92b73b6140ff 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -1,4 +1,57 @@
1#include <linux/section-names.h> 1/*
2 * Helper macros to support writing architecture specific
3 * linker scripts.
4 *
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
7 *
8 * OUTPUT_FORMAT(...)
9 * OUTPUT_ARCH(...)
10 * ENTRY(...)
11 * SECTIONS
12 * {
13 * . = START;
14 * __init_begin = .;
15 * HEAD_TEXT_SECTION
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU(PAGE_SIZE)
19 * __init_end = .;
20 *
21 * _stext = .;
22 * TEXT_SECTION = 0
23 * _etext = .;
24 *
25 * _sdata = .;
26 * RO_DATA_SECTION(PAGE_SIZE)
27 * RW_DATA_SECTION(...)
28 * _edata = .;
29 *
30 * EXCEPTION_TABLE(...)
31 * NOTES
32 *
33 * __bss_start = .;
34 * BSS_SECTION(0, 0)
35 * __bss_stop = .;
36 * _end = .;
37 *
38 * /DISCARD/ : {
39 * EXIT_TEXT
40 * EXIT_DATA
41 * EXIT_CALL
42 * }
43 * STABS_DEBUG
44 * DWARF_DEBUG
45 * }
46 *
47 * [__init_begin, __init_end] is the init section that may be freed after init
48 * [_stext, _etext] is the text section
49 * [_sdata, _edata] is the data section
50 *
51 * Some of the included output section have their own set of constants.
52 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
53 * [__nosave_begin, __nosave_end] for the nosave data
54 */
2 55
3#ifndef LOAD_OFFSET 56#ifndef LOAD_OFFSET
4#define LOAD_OFFSET 0 57#define LOAD_OFFSET 0
@@ -63,7 +116,7 @@
63#define BRANCH_PROFILE() 116#define BRANCH_PROFILE()
64#endif 117#endif
65 118
66#ifdef CONFIG_EVENT_TRACER 119#ifdef CONFIG_EVENT_TRACING
67#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \ 120#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
68 *(_ftrace_events) \ 121 *(_ftrace_events) \
69 VMLINUX_SYMBOL(__stop_ftrace_events) = .; 122 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
@@ -116,7 +169,36 @@
116 FTRACE_EVENTS() \ 169 FTRACE_EVENTS() \
117 TRACE_SYSCALLS() 170 TRACE_SYSCALLS()
118 171
119#define RO_DATA(align) \ 172/*
173 * Data section helpers
174 */
175#define NOSAVE_DATA \
176 . = ALIGN(PAGE_SIZE); \
177 VMLINUX_SYMBOL(__nosave_begin) = .; \
178 *(.data.nosave) \
179 . = ALIGN(PAGE_SIZE); \
180 VMLINUX_SYMBOL(__nosave_end) = .;
181
182#define PAGE_ALIGNED_DATA(page_align) \
183 . = ALIGN(page_align); \
184 *(.data.page_aligned)
185
186#define READ_MOSTLY_DATA(align) \
187 . = ALIGN(align); \
188 *(.data.read_mostly)
189
190#define CACHELINE_ALIGNED_DATA(align) \
191 . = ALIGN(align); \
192 *(.data.cacheline_aligned)
193
194#define INIT_TASK(align) \
195 . = ALIGN(align); \
196 *(.data.init_task)
197
198/*
199 * Read only Data
200 */
201#define RO_DATA_SECTION(align) \
120 . = ALIGN((align)); \ 202 . = ALIGN((align)); \
121 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 203 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
122 VMLINUX_SYMBOL(__start_rodata) = .; \ 204 VMLINUX_SYMBOL(__start_rodata) = .; \
@@ -270,9 +352,10 @@
270 } \ 352 } \
271 . = ALIGN((align)); 353 . = ALIGN((align));
272 354
273/* RODATA provided for backward compatibility. 355/* RODATA & RO_DATA provided for backward compatibility.
274 * All archs are supposed to use RO_DATA() */ 356 * All archs are supposed to use RO_DATA() */
275#define RODATA RO_DATA(4096) 357#define RODATA RO_DATA_SECTION(4096)
358#define RO_DATA(align) RO_DATA_SECTION(align)
276 359
277#define SECURITY_INIT \ 360#define SECURITY_INIT \
278 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 361 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
@@ -330,16 +413,51 @@
330#endif 413#endif
331 414
332/* Section used for early init (in .S files) */ 415/* Section used for early init (in .S files) */
333#define HEAD_TEXT *(HEAD_TEXT_SECTION) 416#define HEAD_TEXT *(.head.text)
417
418#define HEAD_TEXT_SECTION \
419 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
420 HEAD_TEXT \
421 }
422
423/*
424 * Exception table
425 */
426#define EXCEPTION_TABLE(align) \
427 . = ALIGN(align); \
428 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
429 VMLINUX_SYMBOL(__start___ex_table) = .; \
430 *(__ex_table) \
431 VMLINUX_SYMBOL(__stop___ex_table) = .; \
432 }
433
434/*
435 * Init task
436 */
437#define INIT_TASK_DATA(align) \
438 . = ALIGN(align); \
439 .data.init_task : { \
440 INIT_TASK \
441 }
442
443#ifdef CONFIG_CONSTRUCTORS
444#define KERNEL_CTORS() VMLINUX_SYMBOL(__ctors_start) = .; \
445 *(.ctors) \
446 VMLINUX_SYMBOL(__ctors_end) = .;
447#else
448#define KERNEL_CTORS()
449#endif
334 450
335/* init and exit section handling */ 451/* init and exit section handling */
336#define INIT_DATA \ 452#define INIT_DATA \
337 *(.init.data) \ 453 *(.init.data) \
338 DEV_DISCARD(init.data) \ 454 DEV_DISCARD(init.data) \
339 DEV_DISCARD(init.rodata) \
340 CPU_DISCARD(init.data) \ 455 CPU_DISCARD(init.data) \
341 CPU_DISCARD(init.rodata) \
342 MEM_DISCARD(init.data) \ 456 MEM_DISCARD(init.data) \
457 KERNEL_CTORS() \
458 *(.init.rodata) \
459 DEV_DISCARD(init.rodata) \
460 CPU_DISCARD(init.rodata) \
343 MEM_DISCARD(init.rodata) 461 MEM_DISCARD(init.rodata)
344 462
345#define INIT_TEXT \ 463#define INIT_TEXT \
@@ -363,9 +481,35 @@
363 CPU_DISCARD(exit.text) \ 481 CPU_DISCARD(exit.text) \
364 MEM_DISCARD(exit.text) 482 MEM_DISCARD(exit.text)
365 483
366 /* DWARF debug sections. 484#define EXIT_CALL \
367 Symbols in the DWARF debugging sections are relative to 485 *(.exitcall.exit)
368 the beginning of the section so we begin them at 0. */ 486
487/*
488 * bss (Block Started by Symbol) - uninitialized data
489 * zeroed during startup
490 */
491#define SBSS \
492 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
493 *(.sbss) \
494 *(.scommon) \
495 }
496
497#define BSS(bss_align) \
498 . = ALIGN(bss_align); \
499 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
500 VMLINUX_SYMBOL(__bss_start) = .; \
501 *(.bss.page_aligned) \
502 *(.dynbss) \
503 *(.bss) \
504 *(COMMON) \
505 VMLINUX_SYMBOL(__bss_stop) = .; \
506 }
507
508/*
509 * DWARF debug sections.
510 * Symbols in the DWARF debugging sections are relative to
511 * the beginning of the section so we begin them at 0.
512 */
369#define DWARF_DEBUG \ 513#define DWARF_DEBUG \
370 /* DWARF 1 */ \ 514 /* DWARF 1 */ \
371 .debug 0 : { *(.debug) } \ 515 .debug 0 : { *(.debug) } \
@@ -432,6 +576,12 @@
432 VMLINUX_SYMBOL(__stop_notes) = .; \ 576 VMLINUX_SYMBOL(__stop_notes) = .; \
433 } 577 }
434 578
579#define INIT_SETUP(initsetup_align) \
580 . = ALIGN(initsetup_align); \
581 VMLINUX_SYMBOL(__setup_start) = .; \
582 *(.init.setup) \
583 VMLINUX_SYMBOL(__setup_end) = .;
584
435#define INITCALLS \ 585#define INITCALLS \
436 *(.initcallearly.init) \ 586 *(.initcallearly.init) \
437 VMLINUX_SYMBOL(__early_initcall_end) = .; \ 587 VMLINUX_SYMBOL(__early_initcall_end) = .; \
@@ -453,6 +603,31 @@
453 *(.initcall7.init) \ 603 *(.initcall7.init) \
454 *(.initcall7s.init) 604 *(.initcall7s.init)
455 605
606#define INIT_CALLS \
607 VMLINUX_SYMBOL(__initcall_start) = .; \
608 INITCALLS \
609 VMLINUX_SYMBOL(__initcall_end) = .;
610
611#define CON_INITCALL \
612 VMLINUX_SYMBOL(__con_initcall_start) = .; \
613 *(.con_initcall.init) \
614 VMLINUX_SYMBOL(__con_initcall_end) = .;
615
616#define SECURITY_INITCALL \
617 VMLINUX_SYMBOL(__security_initcall_start) = .; \
618 *(.security_initcall.init) \
619 VMLINUX_SYMBOL(__security_initcall_end) = .;
620
621#ifdef CONFIG_BLK_DEV_INITRD
622#define INIT_RAM_FS \
623 . = ALIGN(PAGE_SIZE); \
624 VMLINUX_SYMBOL(__initramfs_start) = .; \
625 *(.init.ramfs) \
626 VMLINUX_SYMBOL(__initramfs_end) = .;
627#else
628#define INIT_RAM_FS
629#endif
630
456/** 631/**
457 * PERCPU_VADDR - define output section for percpu area 632 * PERCPU_VADDR - define output section for percpu area
458 * @vaddr: explicit base address (optional) 633 * @vaddr: explicit base address (optional)
@@ -509,3 +684,58 @@
509 *(.data.percpu.shared_aligned) \ 684 *(.data.percpu.shared_aligned) \
510 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 685 VMLINUX_SYMBOL(__per_cpu_end) = .; \
511 } 686 }
687
688
689/*
690 * Definition of the high level *_SECTION macros
691 * They will fit only a subset of the architectures
692 */
693
694
695/*
696 * Writeable data.
697 * All sections are combined in a single .data section.
698 * The sections following CONSTRUCTORS are arranged so their
699 * typical alignment matches.
700 * A cacheline is typical/always less than a PAGE_SIZE so
701 * the sections that has this restriction (or similar)
702 * is located before the ones requiring PAGE_SIZE alignment.
703 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
704 * matches the requirment of PAGE_ALIGNED_DATA.
705 *
706 * use 0 as page_align if page_aligned data is not used */
707#define RW_DATA_SECTION(cacheline, nosave, pagealigned, inittask) \
708 . = ALIGN(PAGE_SIZE); \
709 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
710 INIT_TASK(inittask) \
711 CACHELINE_ALIGNED_DATA(cacheline) \
712 READ_MOSTLY_DATA(cacheline) \
713 DATA_DATA \
714 CONSTRUCTORS \
715 NOSAVE_DATA(nosave) \
716 PAGE_ALIGNED_DATA(pagealigned) \
717 }
718
719#define INIT_TEXT_SECTION(inittext_align) \
720 . = ALIGN(inittext_align); \
721 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
722 VMLINUX_SYMBOL(_sinittext) = .; \
723 INIT_TEXT \
724 VMLINUX_SYMBOL(_einittext) = .; \
725 }
726
727#define INIT_DATA_SECTION(initsetup_align) \
728 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
729 INIT_DATA \
730 INIT_SETUP(initsetup_align) \
731 INIT_CALLS \
732 CON_INITCALL \
733 SECURITY_INITCALL \
734 INIT_RAM_FS \
735 }
736
737#define BSS_SECTION(sbss_align, bss_align) \
738 SBSS \
739 BSS(bss_align) \
740 . = ALIGN(4);
741