diff options
author | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2011-03-25 11:41:20 -0400 |
---|---|---|
committer | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2011-03-25 11:41:20 -0400 |
commit | 7bf7e370d5919112c223a269462cd0b546903829 (patch) | |
tree | 03ccc715239df14ae168277dbccc9d9cf4d8a2c8 /arch/arm/include | |
parent | 68b1a1e786f29c900fa1c516a402e24f0ece622a (diff) | |
parent | d39dd11c3e6a7af5c20bfac40594db36cf270f42 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-linus-1
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6: (9356 commits)
[media] rc: update for bitop name changes
fs: simplify iget & friends
fs: pull inode->i_lock up out of writeback_single_inode
fs: rename inode_lock to inode_hash_lock
fs: move i_wb_list out from under inode_lock
fs: move i_sb_list out from under inode_lock
fs: remove inode_lock from iput_final and prune_icache
fs: Lock the inode LRU list separately
fs: factor inode disposal
fs: protect inode->i_state with inode->i_lock
lib, arch: add filter argument to show_mem and fix private implementations
SLUB: Write to per cpu data when allocating it
slub: Fix debugobjects with lockless fastpath
autofs4: Do not potentially dereference NULL pointer returned by fget() in autofs_dev_ioctl_setpipefd()
autofs4 - remove autofs4_lock
autofs4 - fix d_manage() return on rcu-walk
autofs4 - fix autofs4_expire_indirect() traversal
autofs4 - fix dentry leak in autofs4_expire_direct()
autofs4 - reinstate last used update on access
vfs - check non-mountpoint dentry might block in __follow_mount_rcu()
...
NOTE!
This merge commit was created to fix compilation error. The block
tree was merged upstream and removed the 'elv_queue_empty()'
function which the new 'mtdswap' driver is using. So a simple
merge of the mtd tree with upstream does not compile. And the
mtd tree has already be published, so re-basing it is not an option.
To fix this unfortunate situation, I had to merge upstream into the
mtd-2.6.git tree without committing, put the fixup patch on top of
this, and then commit this. The result is that we do not have commits
which do not compile.
In other words, this merge commit "merges" 3 things: the MTD tree, the
upstream tree, and the fixup patch.
Diffstat (limited to 'arch/arm/include')
41 files changed, 1208 insertions, 831 deletions
diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h index 93d04acaa31f..92f10cb5c70c 100644 --- a/arch/arm/include/asm/a.out-core.h +++ b/arch/arm/include/asm/a.out-core.h | |||
@@ -32,11 +32,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) | |||
32 | dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT; | 32 | dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT; |
33 | dump->u_ssize = 0; | 33 | dump->u_ssize = 0; |
34 | 34 | ||
35 | dump->u_debugreg[0] = tsk->thread.debug.bp[0].address; | 35 | memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg)); |
36 | dump->u_debugreg[1] = tsk->thread.debug.bp[1].address; | ||
37 | dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm; | ||
38 | dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm; | ||
39 | dump->u_debugreg[4] = tsk->thread.debug.nsaved; | ||
40 | 36 | ||
41 | if (dump->start_stack < 0x04000000) | 37 | if (dump->start_stack < 0x04000000) |
42 | dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT; | 38 | dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT; |
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index 7b1bb2bbaf88..6b7403fd8f54 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h | |||
@@ -149,14 +149,18 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) | |||
149 | */ | 149 | */ |
150 | 150 | ||
151 | /* | 151 | /* |
152 | * Native endian assembly bitops. nr = 0 -> word 0 bit 0. | ||
153 | */ | ||
154 | extern void _set_bit(int nr, volatile unsigned long * p); | ||
155 | extern void _clear_bit(int nr, volatile unsigned long * p); | ||
156 | extern void _change_bit(int nr, volatile unsigned long * p); | ||
157 | extern int _test_and_set_bit(int nr, volatile unsigned long * p); | ||
158 | extern int _test_and_clear_bit(int nr, volatile unsigned long * p); | ||
159 | extern int _test_and_change_bit(int nr, volatile unsigned long * p); | ||
160 | |||
161 | /* | ||
152 | * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. | 162 | * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. |
153 | */ | 163 | */ |
154 | extern void _set_bit_le(int nr, volatile unsigned long * p); | ||
155 | extern void _clear_bit_le(int nr, volatile unsigned long * p); | ||
156 | extern void _change_bit_le(int nr, volatile unsigned long * p); | ||
157 | extern int _test_and_set_bit_le(int nr, volatile unsigned long * p); | ||
158 | extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p); | ||
159 | extern int _test_and_change_bit_le(int nr, volatile unsigned long * p); | ||
160 | extern int _find_first_zero_bit_le(const void * p, unsigned size); | 164 | extern int _find_first_zero_bit_le(const void * p, unsigned size); |
161 | extern int _find_next_zero_bit_le(const void * p, int size, int offset); | 165 | extern int _find_next_zero_bit_le(const void * p, int size, int offset); |
162 | extern int _find_first_bit_le(const unsigned long *p, unsigned size); | 166 | extern int _find_first_bit_le(const unsigned long *p, unsigned size); |
@@ -165,12 +169,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset); | |||
165 | /* | 169 | /* |
166 | * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. | 170 | * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. |
167 | */ | 171 | */ |
168 | extern void _set_bit_be(int nr, volatile unsigned long * p); | ||
169 | extern void _clear_bit_be(int nr, volatile unsigned long * p); | ||
170 | extern void _change_bit_be(int nr, volatile unsigned long * p); | ||
171 | extern int _test_and_set_bit_be(int nr, volatile unsigned long * p); | ||
172 | extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p); | ||
173 | extern int _test_and_change_bit_be(int nr, volatile unsigned long * p); | ||
174 | extern int _find_first_zero_bit_be(const void * p, unsigned size); | 172 | extern int _find_first_zero_bit_be(const void * p, unsigned size); |
175 | extern int _find_next_zero_bit_be(const void * p, int size, int offset); | 173 | extern int _find_next_zero_bit_be(const void * p, int size, int offset); |
176 | extern int _find_first_bit_be(const unsigned long *p, unsigned size); | 174 | extern int _find_first_bit_be(const unsigned long *p, unsigned size); |
@@ -180,33 +178,26 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); | |||
180 | /* | 178 | /* |
181 | * The __* form of bitops are non-atomic and may be reordered. | 179 | * The __* form of bitops are non-atomic and may be reordered. |
182 | */ | 180 | */ |
183 | #define ATOMIC_BITOP_LE(name,nr,p) \ | 181 | #define ATOMIC_BITOP(name,nr,p) \ |
184 | (__builtin_constant_p(nr) ? \ | 182 | (__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p)) |
185 | ____atomic_##name(nr, p) : \ | ||
186 | _##name##_le(nr,p)) | ||
187 | |||
188 | #define ATOMIC_BITOP_BE(name,nr,p) \ | ||
189 | (__builtin_constant_p(nr) ? \ | ||
190 | ____atomic_##name(nr, p) : \ | ||
191 | _##name##_be(nr,p)) | ||
192 | #else | 183 | #else |
193 | #define ATOMIC_BITOP_LE(name,nr,p) _##name##_le(nr,p) | 184 | #define ATOMIC_BITOP(name,nr,p) _##name(nr,p) |
194 | #define ATOMIC_BITOP_BE(name,nr,p) _##name##_be(nr,p) | ||
195 | #endif | 185 | #endif |
196 | 186 | ||
197 | #define NONATOMIC_BITOP(name,nr,p) \ | 187 | /* |
198 | (____nonatomic_##name(nr, p)) | 188 | * Native endian atomic definitions. |
189 | */ | ||
190 | #define set_bit(nr,p) ATOMIC_BITOP(set_bit,nr,p) | ||
191 | #define clear_bit(nr,p) ATOMIC_BITOP(clear_bit,nr,p) | ||
192 | #define change_bit(nr,p) ATOMIC_BITOP(change_bit,nr,p) | ||
193 | #define test_and_set_bit(nr,p) ATOMIC_BITOP(test_and_set_bit,nr,p) | ||
194 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP(test_and_clear_bit,nr,p) | ||
195 | #define test_and_change_bit(nr,p) ATOMIC_BITOP(test_and_change_bit,nr,p) | ||
199 | 196 | ||
200 | #ifndef __ARMEB__ | 197 | #ifndef __ARMEB__ |
201 | /* | 198 | /* |
202 | * These are the little endian, atomic definitions. | 199 | * These are the little endian, atomic definitions. |
203 | */ | 200 | */ |
204 | #define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p) | ||
205 | #define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p) | ||
206 | #define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p) | ||
207 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) | ||
208 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) | ||
209 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) | ||
210 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) | 201 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) |
211 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) | 202 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) |
212 | #define find_first_bit(p,sz) _find_first_bit_le(p,sz) | 203 | #define find_first_bit(p,sz) _find_first_bit_le(p,sz) |
@@ -215,16 +206,9 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); | |||
215 | #define WORD_BITOFF_TO_LE(x) ((x)) | 206 | #define WORD_BITOFF_TO_LE(x) ((x)) |
216 | 207 | ||
217 | #else | 208 | #else |
218 | |||
219 | /* | 209 | /* |
220 | * These are the big endian, atomic definitions. | 210 | * These are the big endian, atomic definitions. |
221 | */ | 211 | */ |
222 | #define set_bit(nr,p) ATOMIC_BITOP_BE(set_bit,nr,p) | ||
223 | #define clear_bit(nr,p) ATOMIC_BITOP_BE(clear_bit,nr,p) | ||
224 | #define change_bit(nr,p) ATOMIC_BITOP_BE(change_bit,nr,p) | ||
225 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p) | ||
226 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p) | ||
227 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p) | ||
228 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) | 212 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) |
229 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) | 213 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) |
230 | #define find_first_bit(p,sz) _find_first_bit_be(p,sz) | 214 | #define find_first_bit(p,sz) _find_first_bit_be(p,sz) |
@@ -303,41 +287,63 @@ static inline int fls(int x) | |||
303 | #include <asm-generic/bitops/hweight.h> | 287 | #include <asm-generic/bitops/hweight.h> |
304 | #include <asm-generic/bitops/lock.h> | 288 | #include <asm-generic/bitops/lock.h> |
305 | 289 | ||
306 | /* | 290 | static inline void __set_bit_le(int nr, void *addr) |
307 | * Ext2 is defined to use little-endian byte ordering. | 291 | { |
308 | * These do not need to be atomic. | 292 | __set_bit(WORD_BITOFF_TO_LE(nr), addr); |
309 | */ | 293 | } |
310 | #define ext2_set_bit(nr,p) \ | 294 | |
311 | __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 295 | static inline void __clear_bit_le(int nr, void *addr) |
312 | #define ext2_set_bit_atomic(lock,nr,p) \ | 296 | { |
313 | test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 297 | __clear_bit(WORD_BITOFF_TO_LE(nr), addr); |
314 | #define ext2_clear_bit(nr,p) \ | 298 | } |
315 | __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 299 | |
316 | #define ext2_clear_bit_atomic(lock,nr,p) \ | 300 | static inline int __test_and_set_bit_le(int nr, void *addr) |
317 | test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 301 | { |
318 | #define ext2_test_bit(nr,p) \ | 302 | return __test_and_set_bit(WORD_BITOFF_TO_LE(nr), addr); |
319 | test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 303 | } |
320 | #define ext2_find_first_zero_bit(p,sz) \ | 304 | |
321 | _find_first_zero_bit_le(p,sz) | 305 | static inline int test_and_set_bit_le(int nr, void *addr) |
322 | #define ext2_find_next_zero_bit(p,sz,off) \ | 306 | { |
323 | _find_next_zero_bit_le(p,sz,off) | 307 | return test_and_set_bit(WORD_BITOFF_TO_LE(nr), addr); |
324 | #define ext2_find_next_bit(p, sz, off) \ | 308 | } |
325 | _find_next_bit_le(p, sz, off) | 309 | |
310 | static inline int __test_and_clear_bit_le(int nr, void *addr) | ||
311 | { | ||
312 | return __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), addr); | ||
313 | } | ||
314 | |||
315 | static inline int test_and_clear_bit_le(int nr, void *addr) | ||
316 | { | ||
317 | return test_and_clear_bit(WORD_BITOFF_TO_LE(nr), addr); | ||
318 | } | ||
319 | |||
320 | static inline int test_bit_le(int nr, const void *addr) | ||
321 | { | ||
322 | return test_bit(WORD_BITOFF_TO_LE(nr), addr); | ||
323 | } | ||
324 | |||
325 | static inline int find_first_zero_bit_le(const void *p, unsigned size) | ||
326 | { | ||
327 | return _find_first_zero_bit_le(p, size); | ||
328 | } | ||
329 | |||
330 | static inline int find_next_zero_bit_le(const void *p, int size, int offset) | ||
331 | { | ||
332 | return _find_next_zero_bit_le(p, size, offset); | ||
333 | } | ||
334 | |||
335 | static inline int find_next_bit_le(const void *p, int size, int offset) | ||
336 | { | ||
337 | return _find_next_bit_le(p, size, offset); | ||
338 | } | ||
326 | 339 | ||
327 | /* | 340 | /* |
328 | * Minix is defined to use little-endian byte ordering. | 341 | * Ext2 is defined to use little-endian byte ordering. |
329 | * These do not need to be atomic. | ||
330 | */ | 342 | */ |
331 | #define minix_set_bit(nr,p) \ | 343 | #define ext2_set_bit_atomic(lock, nr, p) \ |
332 | __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 344 | test_and_set_bit_le(nr, p) |
333 | #define minix_test_bit(nr,p) \ | 345 | #define ext2_clear_bit_atomic(lock, nr, p) \ |
334 | test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 346 | test_and_clear_bit_le(nr, p) |
335 | #define minix_test_and_set_bit(nr,p) \ | ||
336 | __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | ||
337 | #define minix_test_and_clear_bit(nr,p) \ | ||
338 | __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | ||
339 | #define minix_find_first_zero_bit(p,sz) \ | ||
340 | _find_first_zero_bit_le(p,sz) | ||
341 | 347 | ||
342 | #endif /* __KERNEL__ */ | 348 | #endif /* __KERNEL__ */ |
343 | 349 | ||
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 3acd8fa25e34..d5d8d5c72682 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
14 | 14 | ||
15 | #include <asm/glue.h> | 15 | #include <asm/glue-cache.h> |
16 | #include <asm/shmparam.h> | 16 | #include <asm/shmparam.h> |
17 | #include <asm/cachetype.h> | 17 | #include <asm/cachetype.h> |
18 | #include <asm/outercache.h> | 18 | #include <asm/outercache.h> |
@@ -20,123 +20,6 @@ | |||
20 | #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) | 20 | #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * Cache Model | ||
24 | * =========== | ||
25 | */ | ||
26 | #undef _CACHE | ||
27 | #undef MULTI_CACHE | ||
28 | |||
29 | #if defined(CONFIG_CPU_CACHE_V3) | ||
30 | # ifdef _CACHE | ||
31 | # define MULTI_CACHE 1 | ||
32 | # else | ||
33 | # define _CACHE v3 | ||
34 | # endif | ||
35 | #endif | ||
36 | |||
37 | #if defined(CONFIG_CPU_CACHE_V4) | ||
38 | # ifdef _CACHE | ||
39 | # define MULTI_CACHE 1 | ||
40 | # else | ||
41 | # define _CACHE v4 | ||
42 | # endif | ||
43 | #endif | ||
44 | |||
45 | #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ | ||
46 | defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \ | ||
47 | defined(CONFIG_CPU_ARM1026) | ||
48 | # define MULTI_CACHE 1 | ||
49 | #endif | ||
50 | |||
51 | #if defined(CONFIG_CPU_FA526) | ||
52 | # ifdef _CACHE | ||
53 | # define MULTI_CACHE 1 | ||
54 | # else | ||
55 | # define _CACHE fa | ||
56 | # endif | ||
57 | #endif | ||
58 | |||
59 | #if defined(CONFIG_CPU_ARM926T) | ||
60 | # ifdef _CACHE | ||
61 | # define MULTI_CACHE 1 | ||
62 | # else | ||
63 | # define _CACHE arm926 | ||
64 | # endif | ||
65 | #endif | ||
66 | |||
67 | #if defined(CONFIG_CPU_ARM940T) | ||
68 | # ifdef _CACHE | ||
69 | # define MULTI_CACHE 1 | ||
70 | # else | ||
71 | # define _CACHE arm940 | ||
72 | # endif | ||
73 | #endif | ||
74 | |||
75 | #if defined(CONFIG_CPU_ARM946E) | ||
76 | # ifdef _CACHE | ||
77 | # define MULTI_CACHE 1 | ||
78 | # else | ||
79 | # define _CACHE arm946 | ||
80 | # endif | ||
81 | #endif | ||
82 | |||
83 | #if defined(CONFIG_CPU_CACHE_V4WB) | ||
84 | # ifdef _CACHE | ||
85 | # define MULTI_CACHE 1 | ||
86 | # else | ||
87 | # define _CACHE v4wb | ||
88 | # endif | ||
89 | #endif | ||
90 | |||
91 | #if defined(CONFIG_CPU_XSCALE) | ||
92 | # ifdef _CACHE | ||
93 | # define MULTI_CACHE 1 | ||
94 | # else | ||
95 | # define _CACHE xscale | ||
96 | # endif | ||
97 | #endif | ||
98 | |||
99 | #if defined(CONFIG_CPU_XSC3) | ||
100 | # ifdef _CACHE | ||
101 | # define MULTI_CACHE 1 | ||
102 | # else | ||
103 | # define _CACHE xsc3 | ||
104 | # endif | ||
105 | #endif | ||
106 | |||
107 | #if defined(CONFIG_CPU_MOHAWK) | ||
108 | # ifdef _CACHE | ||
109 | # define MULTI_CACHE 1 | ||
110 | # else | ||
111 | # define _CACHE mohawk | ||
112 | # endif | ||
113 | #endif | ||
114 | |||
115 | #if defined(CONFIG_CPU_FEROCEON) | ||
116 | # define MULTI_CACHE 1 | ||
117 | #endif | ||
118 | |||
119 | #if defined(CONFIG_CPU_V6) | ||
120 | //# ifdef _CACHE | ||
121 | # define MULTI_CACHE 1 | ||
122 | //# else | ||
123 | //# define _CACHE v6 | ||
124 | //# endif | ||
125 | #endif | ||
126 | |||
127 | #if defined(CONFIG_CPU_V7) | ||
128 | //# ifdef _CACHE | ||
129 | # define MULTI_CACHE 1 | ||
130 | //# else | ||
131 | //# define _CACHE v7 | ||
132 | //# endif | ||
133 | #endif | ||
134 | |||
135 | #if !defined(_CACHE) && !defined(MULTI_CACHE) | ||
136 | #error Unknown cache maintainence model | ||
137 | #endif | ||
138 | |||
139 | /* | ||
140 | * This flag is used to indicate that the page pointed to by a pte is clean | 23 | * This flag is used to indicate that the page pointed to by a pte is clean |
141 | * and does not require cleaning before returning it to the user. | 24 | * and does not require cleaning before returning it to the user. |
142 | */ | 25 | */ |
@@ -249,19 +132,11 @@ extern struct cpu_cache_fns cpu_cache; | |||
249 | * visible to the CPU. | 132 | * visible to the CPU. |
250 | */ | 133 | */ |
251 | #define dmac_map_area cpu_cache.dma_map_area | 134 | #define dmac_map_area cpu_cache.dma_map_area |
252 | #define dmac_unmap_area cpu_cache.dma_unmap_area | 135 | #define dmac_unmap_area cpu_cache.dma_unmap_area |
253 | #define dmac_flush_range cpu_cache.dma_flush_range | 136 | #define dmac_flush_range cpu_cache.dma_flush_range |
254 | 137 | ||
255 | #else | 138 | #else |
256 | 139 | ||
257 | #define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) | ||
258 | #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) | ||
259 | #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) | ||
260 | #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) | ||
261 | #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) | ||
262 | #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) | ||
263 | #define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) | ||
264 | |||
265 | extern void __cpuc_flush_icache_all(void); | 140 | extern void __cpuc_flush_icache_all(void); |
266 | extern void __cpuc_flush_kern_all(void); | 141 | extern void __cpuc_flush_kern_all(void); |
267 | extern void __cpuc_flush_user_all(void); | 142 | extern void __cpuc_flush_user_all(void); |
@@ -276,10 +151,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t); | |||
276 | * is visible to DMA, or data written by DMA to system memory is | 151 | * is visible to DMA, or data written by DMA to system memory is |
277 | * visible to the CPU. | 152 | * visible to the CPU. |
278 | */ | 153 | */ |
279 | #define dmac_map_area __glue(_CACHE,_dma_map_area) | ||
280 | #define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) | ||
281 | #define dmac_flush_range __glue(_CACHE,_dma_flush_range) | ||
282 | |||
283 | extern void dmac_map_area(const void *, size_t, int); | 154 | extern void dmac_map_area(const void *, size_t, int); |
284 | extern void dmac_unmap_area(const void *, size_t, int); | 155 | extern void dmac_unmap_area(const void *, size_t, int); |
285 | extern void dmac_flush_range(const void *, const void *); | 156 | extern void dmac_flush_range(const void *, const void *); |
@@ -316,7 +187,8 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, | |||
316 | * Optimized __flush_icache_all for the common cases. Note that UP ARMv7 | 187 | * Optimized __flush_icache_all for the common cases. Note that UP ARMv7 |
317 | * will fall through to use __flush_icache_all_generic. | 188 | * will fall through to use __flush_icache_all_generic. |
318 | */ | 189 | */ |
319 | #if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) || \ | 190 | #if (defined(CONFIG_CPU_V7) && \ |
191 | (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \ | ||
320 | defined(CONFIG_SMP_ON_UP) | 192 | defined(CONFIG_SMP_ON_UP) |
321 | #define __flush_icache_preferred __cpuc_flush_icache_all | 193 | #define __flush_icache_preferred __cpuc_flush_icache_all |
322 | #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) | 194 | #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) |
diff --git a/arch/arm/include/asm/cpu-multi32.h b/arch/arm/include/asm/cpu-multi32.h deleted file mode 100644 index e2b5b0b2116a..000000000000 --- a/arch/arm/include/asm/cpu-multi32.h +++ /dev/null | |||
@@ -1,69 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm/include/asm/cpu-multi32.h | ||
3 | * | ||
4 | * Copyright (C) 2000 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <asm/page.h> | ||
11 | |||
12 | struct mm_struct; | ||
13 | |||
14 | /* | ||
15 | * Don't change this structure - ASM code | ||
16 | * relies on it. | ||
17 | */ | ||
18 | extern struct processor { | ||
19 | /* MISC | ||
20 | * get data abort address/flags | ||
21 | */ | ||
22 | void (*_data_abort)(unsigned long pc); | ||
23 | /* | ||
24 | * Retrieve prefetch fault address | ||
25 | */ | ||
26 | unsigned long (*_prefetch_abort)(unsigned long lr); | ||
27 | /* | ||
28 | * Set up any processor specifics | ||
29 | */ | ||
30 | void (*_proc_init)(void); | ||
31 | /* | ||
32 | * Disable any processor specifics | ||
33 | */ | ||
34 | void (*_proc_fin)(void); | ||
35 | /* | ||
36 | * Special stuff for a reset | ||
37 | */ | ||
38 | void (*reset)(unsigned long addr) __attribute__((noreturn)); | ||
39 | /* | ||
40 | * Idle the processor | ||
41 | */ | ||
42 | int (*_do_idle)(void); | ||
43 | /* | ||
44 | * Processor architecture specific | ||
45 | */ | ||
46 | /* | ||
47 | * clean a virtual address range from the | ||
48 | * D-cache without flushing the cache. | ||
49 | */ | ||
50 | void (*dcache_clean_area)(void *addr, int size); | ||
51 | |||
52 | /* | ||
53 | * Set the page table | ||
54 | */ | ||
55 | void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); | ||
56 | /* | ||
57 | * Set a possibly extended PTE. Non-extended PTEs should | ||
58 | * ignore 'ext'. | ||
59 | */ | ||
60 | void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); | ||
61 | } processor; | ||
62 | |||
63 | #define cpu_proc_init() processor._proc_init() | ||
64 | #define cpu_proc_fin() processor._proc_fin() | ||
65 | #define cpu_reset(addr) processor.reset(addr) | ||
66 | #define cpu_do_idle() processor._do_idle() | ||
67 | #define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz) | ||
68 | #define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext) | ||
69 | #define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm) | ||
diff --git a/arch/arm/include/asm/cpu-single.h b/arch/arm/include/asm/cpu-single.h deleted file mode 100644 index f073a6d2a406..000000000000 --- a/arch/arm/include/asm/cpu-single.h +++ /dev/null | |||
@@ -1,44 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm/include/asm/cpu-single.h | ||
3 | * | ||
4 | * Copyright (C) 2000 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | /* | ||
11 | * Single CPU | ||
12 | */ | ||
13 | #ifdef __STDC__ | ||
14 | #define __catify_fn(name,x) name##x | ||
15 | #else | ||
16 | #define __catify_fn(name,x) name/**/x | ||
17 | #endif | ||
18 | #define __cpu_fn(name,x) __catify_fn(name,x) | ||
19 | |||
20 | /* | ||
21 | * If we are supporting multiple CPUs, then we must use a table of | ||
22 | * function pointers for this lot. Otherwise, we can optimise the | ||
23 | * table away. | ||
24 | */ | ||
25 | #define cpu_proc_init __cpu_fn(CPU_NAME,_proc_init) | ||
26 | #define cpu_proc_fin __cpu_fn(CPU_NAME,_proc_fin) | ||
27 | #define cpu_reset __cpu_fn(CPU_NAME,_reset) | ||
28 | #define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle) | ||
29 | #define cpu_dcache_clean_area __cpu_fn(CPU_NAME,_dcache_clean_area) | ||
30 | #define cpu_do_switch_mm __cpu_fn(CPU_NAME,_switch_mm) | ||
31 | #define cpu_set_pte_ext __cpu_fn(CPU_NAME,_set_pte_ext) | ||
32 | |||
33 | #include <asm/page.h> | ||
34 | |||
35 | struct mm_struct; | ||
36 | |||
37 | /* declare all the functions as extern */ | ||
38 | extern void cpu_proc_init(void); | ||
39 | extern void cpu_proc_fin(void); | ||
40 | extern int cpu_do_idle(void); | ||
41 | extern void cpu_dcache_clean_area(void *, int); | ||
42 | extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); | ||
43 | extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); | ||
44 | extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); | ||
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 20ae96cc0020..ed5bc9e05a4e 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h | |||
@@ -23,6 +23,8 @@ | |||
23 | #define CPUID_EXT_ISAR4 "c2, 4" | 23 | #define CPUID_EXT_ISAR4 "c2, 4" |
24 | #define CPUID_EXT_ISAR5 "c2, 5" | 24 | #define CPUID_EXT_ISAR5 "c2, 5" |
25 | 25 | ||
26 | extern unsigned int processor_id; | ||
27 | |||
26 | #ifdef CONFIG_CPU_CP15 | 28 | #ifdef CONFIG_CPU_CP15 |
27 | #define read_cpuid(reg) \ | 29 | #define read_cpuid(reg) \ |
28 | ({ \ | 30 | ({ \ |
@@ -43,7 +45,6 @@ | |||
43 | __val; \ | 45 | __val; \ |
44 | }) | 46 | }) |
45 | #else | 47 | #else |
46 | extern unsigned int processor_id; | ||
47 | #define read_cpuid(reg) (processor_id) | 48 | #define read_cpuid(reg) (processor_id) |
48 | #define read_cpuid_ext(reg) 0 | 49 | #define read_cpuid_ext(reg) 0 |
49 | #endif | 50 | #endif |
diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h new file mode 100644 index 000000000000..de5354746924 --- /dev/null +++ b/arch/arm/include/asm/fncpy.h | |||
@@ -0,0 +1,94 @@ | |||
1 | /* | ||
2 | * arch/arm/include/asm/fncpy.h - helper macros for function body copying | ||
3 | * | ||
4 | * Copyright (C) 2011 Linaro Limited | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | /* | ||
21 | * These macros are intended for use when there is a need to copy a low-level | ||
22 | * function body into special memory. | ||
23 | * | ||
24 | * For example, when reconfiguring the SDRAM controller, the code doing the | ||
25 | * reconfiguration may need to run from SRAM. | ||
26 | * | ||
27 | * NOTE: that the copied function body must be entirely self-contained and | ||
28 | * position-independent in order for this to work properly. | ||
29 | * | ||
30 | * NOTE: in order for embedded literals and data to get referenced correctly, | ||
31 | * the alignment of functions must be preserved when copying. To ensure this, | ||
32 | * the source and destination addresses for fncpy() must be aligned to a | ||
33 | * multiple of 8 bytes: you will be get a BUG() if this condition is not met. | ||
34 | * You will typically need a ".align 3" directive in the assembler where the | ||
35 | * function to be copied is defined, and ensure that your allocator for the | ||
36 | * destination buffer returns 8-byte-aligned pointers. | ||
37 | * | ||
38 | * Typical usage example: | ||
39 | * | ||
40 | * extern int f(args); | ||
41 | * extern uint32_t size_of_f; | ||
42 | * int (*copied_f)(args); | ||
43 | * void *sram_buffer; | ||
44 | * | ||
45 | * copied_f = fncpy(sram_buffer, &f, size_of_f); | ||
46 | * | ||
47 | * ... later, call the function: ... | ||
48 | * | ||
49 | * copied_f(args); | ||
50 | * | ||
51 | * The size of the function to be copied can't be determined from C: | ||
52 | * this must be determined by other means, such as adding assmbler directives | ||
53 | * in the file where f is defined. | ||
54 | */ | ||
55 | |||
56 | #ifndef __ASM_FNCPY_H | ||
57 | #define __ASM_FNCPY_H | ||
58 | |||
59 | #include <linux/types.h> | ||
60 | #include <linux/string.h> | ||
61 | |||
62 | #include <asm/bug.h> | ||
63 | #include <asm/cacheflush.h> | ||
64 | |||
65 | /* | ||
66 | * Minimum alignment requirement for the source and destination addresses | ||
67 | * for function copying. | ||
68 | */ | ||
69 | #define FNCPY_ALIGN 8 | ||
70 | |||
71 | #define fncpy(dest_buf, funcp, size) ({ \ | ||
72 | uintptr_t __funcp_address; \ | ||
73 | typeof(funcp) __result; \ | ||
74 | \ | ||
75 | asm("" : "=r" (__funcp_address) : "0" (funcp)); \ | ||
76 | \ | ||
77 | /* \ | ||
78 | * Ensure alignment of source and destination addresses, \ | ||
79 | * disregarding the function's Thumb bit: \ | ||
80 | */ \ | ||
81 | BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \ | ||
82 | (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \ | ||
83 | \ | ||
84 | memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \ | ||
85 | flush_icache_range((unsigned long)(dest_buf), \ | ||
86 | (unsigned long)(dest_buf) + (size)); \ | ||
87 | \ | ||
88 | asm("" : "=r" (__result) \ | ||
89 | : "0" ((uintptr_t)(dest_buf) | (__funcp_address & 1))); \ | ||
90 | \ | ||
91 | __result; \ | ||
92 | }) | ||
93 | |||
94 | #endif /* !__ASM_FNCPY_H */ | ||
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index b33fe7065b38..199a6b6de7f4 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h | |||
@@ -35,7 +35,7 @@ | |||
35 | : "cc", "memory") | 35 | : "cc", "memory") |
36 | 36 | ||
37 | static inline int | 37 | static inline int |
38 | futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | 38 | futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) |
39 | { | 39 | { |
40 | int op = (encoded_op >> 28) & 7; | 40 | int op = (encoded_op >> 28) & 7; |
41 | int cmp = (encoded_op >> 24) & 15; | 41 | int cmp = (encoded_op >> 24) & 15; |
@@ -46,7 +46,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
46 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 46 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
47 | oparg = 1 << oparg; | 47 | oparg = 1 << oparg; |
48 | 48 | ||
49 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 49 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
50 | return -EFAULT; | 50 | return -EFAULT; |
51 | 51 | ||
52 | pagefault_disable(); /* implies preempt_disable() */ | 52 | pagefault_disable(); /* implies preempt_disable() */ |
@@ -88,36 +88,35 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
88 | } | 88 | } |
89 | 89 | ||
90 | static inline int | 90 | static inline int |
91 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | 91 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
92 | u32 oldval, u32 newval) | ||
92 | { | 93 | { |
93 | int val; | 94 | int ret = 0; |
95 | u32 val; | ||
94 | 96 | ||
95 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 97 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
96 | return -EFAULT; | 98 | return -EFAULT; |
97 | 99 | ||
98 | pagefault_disable(); /* implies preempt_disable() */ | ||
99 | |||
100 | __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" | 100 | __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" |
101 | "1: " T(ldr) " %0, [%3]\n" | 101 | "1: " T(ldr) " %1, [%4]\n" |
102 | " teq %0, %1\n" | 102 | " teq %1, %2\n" |
103 | " it eq @ explicit IT needed for the 2b label\n" | 103 | " it eq @ explicit IT needed for the 2b label\n" |
104 | "2: " T(streq) " %2, [%3]\n" | 104 | "2: " T(streq) " %3, [%4]\n" |
105 | "3:\n" | 105 | "3:\n" |
106 | " .pushsection __ex_table,\"a\"\n" | 106 | " .pushsection __ex_table,\"a\"\n" |
107 | " .align 3\n" | 107 | " .align 3\n" |
108 | " .long 1b, 4f, 2b, 4f\n" | 108 | " .long 1b, 4f, 2b, 4f\n" |
109 | " .popsection\n" | 109 | " .popsection\n" |
110 | " .pushsection .fixup,\"ax\"\n" | 110 | " .pushsection .fixup,\"ax\"\n" |
111 | "4: mov %0, %4\n" | 111 | "4: mov %0, %5\n" |
112 | " b 3b\n" | 112 | " b 3b\n" |
113 | " .popsection" | 113 | " .popsection" |
114 | : "=&r" (val) | 114 | : "+r" (ret), "=&r" (val) |
115 | : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) | 115 | : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) |
116 | : "cc", "memory"); | 116 | : "cc", "memory"); |
117 | 117 | ||
118 | pagefault_enable(); /* subsumes preempt_enable() */ | 118 | *uval = val; |
119 | 119 | return ret; | |
120 | return val; | ||
121 | } | 120 | } |
122 | 121 | ||
123 | #endif /* !SMP */ | 122 | #endif /* !SMP */ |
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h new file mode 100644 index 000000000000..c7afbc552c7f --- /dev/null +++ b/arch/arm/include/asm/glue-cache.h | |||
@@ -0,0 +1,146 @@ | |||
1 | /* | ||
2 | * arch/arm/include/asm/glue-cache.h | ||
3 | * | ||
4 | * Copyright (C) 1999-2002 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #ifndef ASM_GLUE_CACHE_H | ||
11 | #define ASM_GLUE_CACHE_H | ||
12 | |||
13 | #include <asm/glue.h> | ||
14 | |||
15 | /* | ||
16 | * Cache Model | ||
17 | * =========== | ||
18 | */ | ||
19 | #undef _CACHE | ||
20 | #undef MULTI_CACHE | ||
21 | |||
22 | #if defined(CONFIG_CPU_CACHE_V3) | ||
23 | # ifdef _CACHE | ||
24 | # define MULTI_CACHE 1 | ||
25 | # else | ||
26 | # define _CACHE v3 | ||
27 | # endif | ||
28 | #endif | ||
29 | |||
30 | #if defined(CONFIG_CPU_CACHE_V4) | ||
31 | # ifdef _CACHE | ||
32 | # define MULTI_CACHE 1 | ||
33 | # else | ||
34 | # define _CACHE v4 | ||
35 | # endif | ||
36 | #endif | ||
37 | |||
38 | #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ | ||
39 | defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \ | ||
40 | defined(CONFIG_CPU_ARM1026) | ||
41 | # define MULTI_CACHE 1 | ||
42 | #endif | ||
43 | |||
44 | #if defined(CONFIG_CPU_FA526) | ||
45 | # ifdef _CACHE | ||
46 | # define MULTI_CACHE 1 | ||
47 | # else | ||
48 | # define _CACHE fa | ||
49 | # endif | ||
50 | #endif | ||
51 | |||
52 | #if defined(CONFIG_CPU_ARM926T) | ||
53 | # ifdef _CACHE | ||
54 | # define MULTI_CACHE 1 | ||
55 | # else | ||
56 | # define _CACHE arm926 | ||
57 | # endif | ||
58 | #endif | ||
59 | |||
60 | #if defined(CONFIG_CPU_ARM940T) | ||
61 | # ifdef _CACHE | ||
62 | # define MULTI_CACHE 1 | ||
63 | # else | ||
64 | # define _CACHE arm940 | ||
65 | # endif | ||
66 | #endif | ||
67 | |||
68 | #if defined(CONFIG_CPU_ARM946E) | ||
69 | # ifdef _CACHE | ||
70 | # define MULTI_CACHE 1 | ||
71 | # else | ||
72 | # define _CACHE arm946 | ||
73 | # endif | ||
74 | #endif | ||
75 | |||
76 | #if defined(CONFIG_CPU_CACHE_V4WB) | ||
77 | # ifdef _CACHE | ||
78 | # define MULTI_CACHE 1 | ||
79 | # else | ||
80 | # define _CACHE v4wb | ||
81 | # endif | ||
82 | #endif | ||
83 | |||
84 | #if defined(CONFIG_CPU_XSCALE) | ||
85 | # ifdef _CACHE | ||
86 | # define MULTI_CACHE 1 | ||
87 | # else | ||
88 | # define _CACHE xscale | ||
89 | # endif | ||
90 | #endif | ||
91 | |||
92 | #if defined(CONFIG_CPU_XSC3) | ||
93 | # ifdef _CACHE | ||
94 | # define MULTI_CACHE 1 | ||
95 | # else | ||
96 | # define _CACHE xsc3 | ||
97 | # endif | ||
98 | #endif | ||
99 | |||
100 | #if defined(CONFIG_CPU_MOHAWK) | ||
101 | # ifdef _CACHE | ||
102 | # define MULTI_CACHE 1 | ||
103 | # else | ||
104 | # define _CACHE mohawk | ||
105 | # endif | ||
106 | #endif | ||
107 | |||
108 | #if defined(CONFIG_CPU_FEROCEON) | ||
109 | # define MULTI_CACHE 1 | ||
110 | #endif | ||
111 | |||
112 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) | ||
113 | //# ifdef _CACHE | ||
114 | # define MULTI_CACHE 1 | ||
115 | //# else | ||
116 | //# define _CACHE v6 | ||
117 | //# endif | ||
118 | #endif | ||
119 | |||
120 | #if defined(CONFIG_CPU_V7) | ||
121 | //# ifdef _CACHE | ||
122 | # define MULTI_CACHE 1 | ||
123 | //# else | ||
124 | //# define _CACHE v7 | ||
125 | //# endif | ||
126 | #endif | ||
127 | |||
128 | #if !defined(_CACHE) && !defined(MULTI_CACHE) | ||
129 | #error Unknown cache maintainence model | ||
130 | #endif | ||
131 | |||
132 | #ifndef MULTI_CACHE | ||
133 | #define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) | ||
134 | #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) | ||
135 | #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) | ||
136 | #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) | ||
137 | #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) | ||
138 | #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) | ||
139 | #define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) | ||
140 | |||
141 | #define dmac_map_area __glue(_CACHE,_dma_map_area) | ||
142 | #define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) | ||
143 | #define dmac_flush_range __glue(_CACHE,_dma_flush_range) | ||
144 | #endif | ||
145 | |||
146 | #endif | ||
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h new file mode 100644 index 000000000000..354d571e8bcc --- /dev/null +++ b/arch/arm/include/asm/glue-df.h | |||
@@ -0,0 +1,110 @@ | |||
1 | /* | ||
2 | * arch/arm/include/asm/glue-df.h | ||
3 | * | ||
4 | * Copyright (C) 1997-1999 Russell King | ||
5 | * Copyright (C) 2000-2002 Deep Blue Solutions Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #ifndef ASM_GLUE_DF_H | ||
12 | #define ASM_GLUE_DF_H | ||
13 | |||
14 | #include <asm/glue.h> | ||
15 | |||
16 | /* | ||
17 | * Data Abort Model | ||
18 | * ================ | ||
19 | * | ||
20 | * We have the following to choose from: | ||
21 | * arm6 - ARM6 style | ||
22 | * arm7 - ARM7 style | ||
23 | * v4_early - ARMv4 without Thumb early abort handler | ||
24 | * v4t_late - ARMv4 with Thumb late abort handler | ||
25 | * v4t_early - ARMv4 with Thumb early abort handler | ||
26 | * v5tej_early - ARMv5 with Thumb and Java early abort handler | ||
27 | * xscale - ARMv5 with Thumb with Xscale extensions | ||
28 | * v6_early - ARMv6 generic early abort handler | ||
29 | * v7_early - ARMv7 generic early abort handler | ||
30 | */ | ||
31 | #undef CPU_DABORT_HANDLER | ||
32 | #undef MULTI_DABORT | ||
33 | |||
34 | #if defined(CONFIG_CPU_ARM610) | ||
35 | # ifdef CPU_DABORT_HANDLER | ||
36 | # define MULTI_DABORT 1 | ||
37 | # else | ||
38 | # define CPU_DABORT_HANDLER cpu_arm6_data_abort | ||
39 | # endif | ||
40 | #endif | ||
41 | |||
42 | #if defined(CONFIG_CPU_ARM710) | ||
43 | # ifdef CPU_DABORT_HANDLER | ||
44 | # define MULTI_DABORT 1 | ||
45 | # else | ||
46 | # define CPU_DABORT_HANDLER cpu_arm7_data_abort | ||
47 | # endif | ||
48 | #endif | ||
49 | |||
50 | #ifdef CONFIG_CPU_ABRT_LV4T | ||
51 | # ifdef CPU_DABORT_HANDLER | ||
52 | # define MULTI_DABORT 1 | ||
53 | # else | ||
54 | # define CPU_DABORT_HANDLER v4t_late_abort | ||
55 | # endif | ||
56 | #endif | ||
57 | |||
58 | #ifdef CONFIG_CPU_ABRT_EV4 | ||
59 | # ifdef CPU_DABORT_HANDLER | ||
60 | # define MULTI_DABORT 1 | ||
61 | # else | ||
62 | # define CPU_DABORT_HANDLER v4_early_abort | ||
63 | # endif | ||
64 | #endif | ||
65 | |||
66 | #ifdef CONFIG_CPU_ABRT_EV4T | ||
67 | # ifdef CPU_DABORT_HANDLER | ||
68 | # define MULTI_DABORT 1 | ||
69 | # else | ||
70 | # define CPU_DABORT_HANDLER v4t_early_abort | ||
71 | # endif | ||
72 | #endif | ||
73 | |||
74 | #ifdef CONFIG_CPU_ABRT_EV5TJ | ||
75 | # ifdef CPU_DABORT_HANDLER | ||
76 | # define MULTI_DABORT 1 | ||
77 | # else | ||
78 | # define CPU_DABORT_HANDLER v5tj_early_abort | ||
79 | # endif | ||
80 | #endif | ||
81 | |||
82 | #ifdef CONFIG_CPU_ABRT_EV5T | ||
83 | # ifdef CPU_DABORT_HANDLER | ||
84 | # define MULTI_DABORT 1 | ||
85 | # else | ||
86 | # define CPU_DABORT_HANDLER v5t_early_abort | ||
87 | # endif | ||
88 | #endif | ||
89 | |||
90 | #ifdef CONFIG_CPU_ABRT_EV6 | ||
91 | # ifdef CPU_DABORT_HANDLER | ||
92 | # define MULTI_DABORT 1 | ||
93 | # else | ||
94 | # define CPU_DABORT_HANDLER v6_early_abort | ||
95 | # endif | ||
96 | #endif | ||
97 | |||
98 | #ifdef CONFIG_CPU_ABRT_EV7 | ||
99 | # ifdef CPU_DABORT_HANDLER | ||
100 | # define MULTI_DABORT 1 | ||
101 | # else | ||
102 | # define CPU_DABORT_HANDLER v7_early_abort | ||
103 | # endif | ||
104 | #endif | ||
105 | |||
106 | #ifndef CPU_DABORT_HANDLER | ||
107 | #error Unknown data abort handler type | ||
108 | #endif | ||
109 | |||
110 | #endif | ||
diff --git a/arch/arm/include/asm/glue-pf.h b/arch/arm/include/asm/glue-pf.h new file mode 100644 index 000000000000..d385f37c13f0 --- /dev/null +++ b/arch/arm/include/asm/glue-pf.h | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * arch/arm/include/asm/glue-pf.h | ||
3 | * | ||
4 | * Copyright (C) 1997-1999 Russell King | ||
5 | * Copyright (C) 2000-2002 Deep Blue Solutions Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #ifndef ASM_GLUE_PF_H | ||
12 | #define ASM_GLUE_PF_H | ||
13 | |||
14 | #include <asm/glue.h> | ||
15 | |||
16 | /* | ||
17 | * Prefetch Abort Model | ||
18 | * ================ | ||
19 | * | ||
20 | * We have the following to choose from: | ||
21 | * legacy - no IFSR, no IFAR | ||
22 | * v6 - ARMv6: IFSR, no IFAR | ||
23 | * v7 - ARMv7: IFSR and IFAR | ||
24 | */ | ||
25 | |||
26 | #undef CPU_PABORT_HANDLER | ||
27 | #undef MULTI_PABORT | ||
28 | |||
29 | #ifdef CONFIG_CPU_PABRT_LEGACY | ||
30 | # ifdef CPU_PABORT_HANDLER | ||
31 | # define MULTI_PABORT 1 | ||
32 | # else | ||
33 | # define CPU_PABORT_HANDLER legacy_pabort | ||
34 | # endif | ||
35 | #endif | ||
36 | |||
37 | #ifdef CONFIG_CPU_PABRT_V6 | ||
38 | # ifdef CPU_PABORT_HANDLER | ||
39 | # define MULTI_PABORT 1 | ||
40 | # else | ||
41 | # define CPU_PABORT_HANDLER v6_pabort | ||
42 | # endif | ||
43 | #endif | ||
44 | |||
45 | #ifdef CONFIG_CPU_PABRT_V7 | ||
46 | # ifdef CPU_PABORT_HANDLER | ||
47 | # define MULTI_PABORT 1 | ||
48 | # else | ||
49 | # define CPU_PABORT_HANDLER v7_pabort | ||
50 | # endif | ||
51 | #endif | ||
52 | |||
53 | #ifndef CPU_PABORT_HANDLER | ||
54 | #error Unknown prefetch abort handler type | ||
55 | #endif | ||
56 | |||
57 | #endif | ||
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h new file mode 100644 index 000000000000..e2be7f142668 --- /dev/null +++ b/arch/arm/include/asm/glue-proc.h | |||
@@ -0,0 +1,264 @@ | |||
1 | /* | ||
2 | * arch/arm/include/asm/glue-proc.h | ||
3 | * | ||
4 | * Copyright (C) 1997-1999 Russell King | ||
5 | * Copyright (C) 2000 Deep Blue Solutions Ltd | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #ifndef ASM_GLUE_PROC_H | ||
12 | #define ASM_GLUE_PROC_H | ||
13 | |||
14 | #include <asm/glue.h> | ||
15 | |||
16 | /* | ||
17 | * Work out if we need multiple CPU support | ||
18 | */ | ||
19 | #undef MULTI_CPU | ||
20 | #undef CPU_NAME | ||
21 | |||
22 | /* | ||
23 | * CPU_NAME - the prefix for CPU related functions | ||
24 | */ | ||
25 | |||
26 | #ifdef CONFIG_CPU_ARM610 | ||
27 | # ifdef CPU_NAME | ||
28 | # undef MULTI_CPU | ||
29 | # define MULTI_CPU | ||
30 | # else | ||
31 | # define CPU_NAME cpu_arm6 | ||
32 | # endif | ||
33 | #endif | ||
34 | |||
35 | #ifdef CONFIG_CPU_ARM7TDMI | ||
36 | # ifdef CPU_NAME | ||
37 | # undef MULTI_CPU | ||
38 | # define MULTI_CPU | ||
39 | # else | ||
40 | # define CPU_NAME cpu_arm7tdmi | ||
41 | # endif | ||
42 | #endif | ||
43 | |||
44 | #ifdef CONFIG_CPU_ARM710 | ||
45 | # ifdef CPU_NAME | ||
46 | # undef MULTI_CPU | ||
47 | # define MULTI_CPU | ||
48 | # else | ||
49 | # define CPU_NAME cpu_arm7 | ||
50 | # endif | ||
51 | #endif | ||
52 | |||
53 | #ifdef CONFIG_CPU_ARM720T | ||
54 | # ifdef CPU_NAME | ||
55 | # undef MULTI_CPU | ||
56 | # define MULTI_CPU | ||
57 | # else | ||
58 | # define CPU_NAME cpu_arm720 | ||
59 | # endif | ||
60 | #endif | ||
61 | |||
62 | #ifdef CONFIG_CPU_ARM740T | ||
63 | # ifdef CPU_NAME | ||
64 | # undef MULTI_CPU | ||
65 | # define MULTI_CPU | ||
66 | # else | ||
67 | # define CPU_NAME cpu_arm740 | ||
68 | # endif | ||
69 | #endif | ||
70 | |||
71 | #ifdef CONFIG_CPU_ARM9TDMI | ||
72 | # ifdef CPU_NAME | ||
73 | # undef MULTI_CPU | ||
74 | # define MULTI_CPU | ||
75 | # else | ||
76 | # define CPU_NAME cpu_arm9tdmi | ||
77 | # endif | ||
78 | #endif | ||
79 | |||
80 | #ifdef CONFIG_CPU_ARM920T | ||
81 | # ifdef CPU_NAME | ||
82 | # undef MULTI_CPU | ||
83 | # define MULTI_CPU | ||
84 | # else | ||
85 | # define CPU_NAME cpu_arm920 | ||
86 | # endif | ||
87 | #endif | ||
88 | |||
89 | #ifdef CONFIG_CPU_ARM922T | ||
90 | # ifdef CPU_NAME | ||
91 | # undef MULTI_CPU | ||
92 | # define MULTI_CPU | ||
93 | # else | ||
94 | # define CPU_NAME cpu_arm922 | ||
95 | # endif | ||
96 | #endif | ||
97 | |||
98 | #ifdef CONFIG_CPU_FA526 | ||
99 | # ifdef CPU_NAME | ||
100 | # undef MULTI_CPU | ||
101 | # define MULTI_CPU | ||
102 | # else | ||
103 | # define CPU_NAME cpu_fa526 | ||
104 | # endif | ||
105 | #endif | ||
106 | |||
107 | #ifdef CONFIG_CPU_ARM925T | ||
108 | # ifdef CPU_NAME | ||
109 | # undef MULTI_CPU | ||
110 | # define MULTI_CPU | ||
111 | # else | ||
112 | # define CPU_NAME cpu_arm925 | ||
113 | # endif | ||
114 | #endif | ||
115 | |||
116 | #ifdef CONFIG_CPU_ARM926T | ||
117 | # ifdef CPU_NAME | ||
118 | # undef MULTI_CPU | ||
119 | # define MULTI_CPU | ||
120 | # else | ||
121 | # define CPU_NAME cpu_arm926 | ||
122 | # endif | ||
123 | #endif | ||
124 | |||
125 | #ifdef CONFIG_CPU_ARM940T | ||
126 | # ifdef CPU_NAME | ||
127 | # undef MULTI_CPU | ||
128 | # define MULTI_CPU | ||
129 | # else | ||
130 | # define CPU_NAME cpu_arm940 | ||
131 | # endif | ||
132 | #endif | ||
133 | |||
134 | #ifdef CONFIG_CPU_ARM946E | ||
135 | # ifdef CPU_NAME | ||
136 | # undef MULTI_CPU | ||
137 | # define MULTI_CPU | ||
138 | # else | ||
139 | # define CPU_NAME cpu_arm946 | ||
140 | # endif | ||
141 | #endif | ||
142 | |||
143 | #ifdef CONFIG_CPU_SA110 | ||
144 | # ifdef CPU_NAME | ||
145 | # undef MULTI_CPU | ||
146 | # define MULTI_CPU | ||
147 | # else | ||
148 | # define CPU_NAME cpu_sa110 | ||
149 | # endif | ||
150 | #endif | ||
151 | |||
152 | #ifdef CONFIG_CPU_SA1100 | ||
153 | # ifdef CPU_NAME | ||
154 | # undef MULTI_CPU | ||
155 | # define MULTI_CPU | ||
156 | # else | ||
157 | # define CPU_NAME cpu_sa1100 | ||
158 | # endif | ||
159 | #endif | ||
160 | |||
161 | #ifdef CONFIG_CPU_ARM1020 | ||
162 | # ifdef CPU_NAME | ||
163 | # undef MULTI_CPU | ||
164 | # define MULTI_CPU | ||
165 | # else | ||
166 | # define CPU_NAME cpu_arm1020 | ||
167 | # endif | ||
168 | #endif | ||
169 | |||
170 | #ifdef CONFIG_CPU_ARM1020E | ||
171 | # ifdef CPU_NAME | ||
172 | # undef MULTI_CPU | ||
173 | # define MULTI_CPU | ||
174 | # else | ||
175 | # define CPU_NAME cpu_arm1020e | ||
176 | # endif | ||
177 | #endif | ||
178 | |||
179 | #ifdef CONFIG_CPU_ARM1022 | ||
180 | # ifdef CPU_NAME | ||
181 | # undef MULTI_CPU | ||
182 | # define MULTI_CPU | ||
183 | # else | ||
184 | # define CPU_NAME cpu_arm1022 | ||
185 | # endif | ||
186 | #endif | ||
187 | |||
188 | #ifdef CONFIG_CPU_ARM1026 | ||
189 | # ifdef CPU_NAME | ||
190 | # undef MULTI_CPU | ||
191 | # define MULTI_CPU | ||
192 | # else | ||
193 | # define CPU_NAME cpu_arm1026 | ||
194 | # endif | ||
195 | #endif | ||
196 | |||
197 | #ifdef CONFIG_CPU_XSCALE | ||
198 | # ifdef CPU_NAME | ||
199 | # undef MULTI_CPU | ||
200 | # define MULTI_CPU | ||
201 | # else | ||
202 | # define CPU_NAME cpu_xscale | ||
203 | # endif | ||
204 | #endif | ||
205 | |||
206 | #ifdef CONFIG_CPU_XSC3 | ||
207 | # ifdef CPU_NAME | ||
208 | # undef MULTI_CPU | ||
209 | # define MULTI_CPU | ||
210 | # else | ||
211 | # define CPU_NAME cpu_xsc3 | ||
212 | # endif | ||
213 | #endif | ||
214 | |||
215 | #ifdef CONFIG_CPU_MOHAWK | ||
216 | # ifdef CPU_NAME | ||
217 | # undef MULTI_CPU | ||
218 | # define MULTI_CPU | ||
219 | # else | ||
220 | # define CPU_NAME cpu_mohawk | ||
221 | # endif | ||
222 | #endif | ||
223 | |||
224 | #ifdef CONFIG_CPU_FEROCEON | ||
225 | # ifdef CPU_NAME | ||
226 | # undef MULTI_CPU | ||
227 | # define MULTI_CPU | ||
228 | # else | ||
229 | # define CPU_NAME cpu_feroceon | ||
230 | # endif | ||
231 | #endif | ||
232 | |||
233 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) | ||
234 | # ifdef CPU_NAME | ||
235 | # undef MULTI_CPU | ||
236 | # define MULTI_CPU | ||
237 | # else | ||
238 | # define CPU_NAME cpu_v6 | ||
239 | # endif | ||
240 | #endif | ||
241 | |||
242 | #ifdef CONFIG_CPU_V7 | ||
243 | # ifdef CPU_NAME | ||
244 | # undef MULTI_CPU | ||
245 | # define MULTI_CPU | ||
246 | # else | ||
247 | # define CPU_NAME cpu_v7 | ||
248 | # endif | ||
249 | #endif | ||
250 | |||
251 | #ifndef MULTI_CPU | ||
252 | #define cpu_proc_init __glue(CPU_NAME,_proc_init) | ||
253 | #define cpu_proc_fin __glue(CPU_NAME,_proc_fin) | ||
254 | #define cpu_reset __glue(CPU_NAME,_reset) | ||
255 | #define cpu_do_idle __glue(CPU_NAME,_do_idle) | ||
256 | #define cpu_dcache_clean_area __glue(CPU_NAME,_dcache_clean_area) | ||
257 | #define cpu_do_switch_mm __glue(CPU_NAME,_switch_mm) | ||
258 | #define cpu_set_pte_ext __glue(CPU_NAME,_set_pte_ext) | ||
259 | #define cpu_suspend_size __glue(CPU_NAME,_suspend_size) | ||
260 | #define cpu_do_suspend __glue(CPU_NAME,_do_suspend) | ||
261 | #define cpu_do_resume __glue(CPU_NAME,_do_resume) | ||
262 | #endif | ||
263 | |||
264 | #endif | ||
diff --git a/arch/arm/include/asm/glue.h b/arch/arm/include/asm/glue.h index 234a3fc1c78e..0ec35d1698aa 100644 --- a/arch/arm/include/asm/glue.h +++ b/arch/arm/include/asm/glue.h | |||
@@ -15,7 +15,6 @@ | |||
15 | */ | 15 | */ |
16 | #ifdef __KERNEL__ | 16 | #ifdef __KERNEL__ |
17 | 17 | ||
18 | |||
19 | #ifdef __STDC__ | 18 | #ifdef __STDC__ |
20 | #define ____glue(name,fn) name##fn | 19 | #define ____glue(name,fn) name##fn |
21 | #else | 20 | #else |
@@ -23,141 +22,4 @@ | |||
23 | #endif | 22 | #endif |
24 | #define __glue(name,fn) ____glue(name,fn) | 23 | #define __glue(name,fn) ____glue(name,fn) |
25 | 24 | ||
26 | |||
27 | |||
28 | /* | ||
29 | * Data Abort Model | ||
30 | * ================ | ||
31 | * | ||
32 | * We have the following to choose from: | ||
33 | * arm6 - ARM6 style | ||
34 | * arm7 - ARM7 style | ||
35 | * v4_early - ARMv4 without Thumb early abort handler | ||
36 | * v4t_late - ARMv4 with Thumb late abort handler | ||
37 | * v4t_early - ARMv4 with Thumb early abort handler | ||
38 | * v5tej_early - ARMv5 with Thumb and Java early abort handler | ||
39 | * xscale - ARMv5 with Thumb with Xscale extensions | ||
40 | * v6_early - ARMv6 generic early abort handler | ||
41 | * v7_early - ARMv7 generic early abort handler | ||
42 | */ | ||
43 | #undef CPU_DABORT_HANDLER | ||
44 | #undef MULTI_DABORT | ||
45 | |||
46 | #if defined(CONFIG_CPU_ARM610) | ||
47 | # ifdef CPU_DABORT_HANDLER | ||
48 | # define MULTI_DABORT 1 | ||
49 | # else | ||
50 | # define CPU_DABORT_HANDLER cpu_arm6_data_abort | ||
51 | # endif | ||
52 | #endif | ||
53 | |||
54 | #if defined(CONFIG_CPU_ARM710) | ||
55 | # ifdef CPU_DABORT_HANDLER | ||
56 | # define MULTI_DABORT 1 | ||
57 | # else | ||
58 | # define CPU_DABORT_HANDLER cpu_arm7_data_abort | ||
59 | # endif | ||
60 | #endif | ||
61 | |||
62 | #ifdef CONFIG_CPU_ABRT_LV4T | ||
63 | # ifdef CPU_DABORT_HANDLER | ||
64 | # define MULTI_DABORT 1 | ||
65 | # else | ||
66 | # define CPU_DABORT_HANDLER v4t_late_abort | ||
67 | # endif | ||
68 | #endif | ||
69 | |||
70 | #ifdef CONFIG_CPU_ABRT_EV4 | ||
71 | # ifdef CPU_DABORT_HANDLER | ||
72 | # define MULTI_DABORT 1 | ||
73 | # else | ||
74 | # define CPU_DABORT_HANDLER v4_early_abort | ||
75 | # endif | ||
76 | #endif | ||
77 | |||
78 | #ifdef CONFIG_CPU_ABRT_EV4T | ||
79 | # ifdef CPU_DABORT_HANDLER | ||
80 | # define MULTI_DABORT 1 | ||
81 | # else | ||
82 | # define CPU_DABORT_HANDLER v4t_early_abort | ||
83 | # endif | ||
84 | #endif | ||
85 | |||
86 | #ifdef CONFIG_CPU_ABRT_EV5TJ | ||
87 | # ifdef CPU_DABORT_HANDLER | ||
88 | # define MULTI_DABORT 1 | ||
89 | # else | ||
90 | # define CPU_DABORT_HANDLER v5tj_early_abort | ||
91 | # endif | ||
92 | #endif | ||
93 | |||
94 | #ifdef CONFIG_CPU_ABRT_EV5T | ||
95 | # ifdef CPU_DABORT_HANDLER | ||
96 | # define MULTI_DABORT 1 | ||
97 | # else | ||
98 | # define CPU_DABORT_HANDLER v5t_early_abort | ||
99 | # endif | ||
100 | #endif | ||
101 | |||
102 | #ifdef CONFIG_CPU_ABRT_EV6 | ||
103 | # ifdef CPU_DABORT_HANDLER | ||
104 | # define MULTI_DABORT 1 | ||
105 | # else | ||
106 | # define CPU_DABORT_HANDLER v6_early_abort | ||
107 | # endif | ||
108 | #endif | ||
109 | |||
110 | #ifdef CONFIG_CPU_ABRT_EV7 | ||
111 | # ifdef CPU_DABORT_HANDLER | ||
112 | # define MULTI_DABORT 1 | ||
113 | # else | ||
114 | # define CPU_DABORT_HANDLER v7_early_abort | ||
115 | # endif | ||
116 | #endif | ||
117 | |||
118 | #ifndef CPU_DABORT_HANDLER | ||
119 | #error Unknown data abort handler type | ||
120 | #endif | ||
121 | |||
122 | /* | ||
123 | * Prefetch Abort Model | ||
124 | * ================ | ||
125 | * | ||
126 | * We have the following to choose from: | ||
127 | * legacy - no IFSR, no IFAR | ||
128 | * v6 - ARMv6: IFSR, no IFAR | ||
129 | * v7 - ARMv7: IFSR and IFAR | ||
130 | */ | ||
131 | |||
132 | #undef CPU_PABORT_HANDLER | ||
133 | #undef MULTI_PABORT | ||
134 | |||
135 | #ifdef CONFIG_CPU_PABRT_LEGACY | ||
136 | # ifdef CPU_PABORT_HANDLER | ||
137 | # define MULTI_PABORT 1 | ||
138 | # else | ||
139 | # define CPU_PABORT_HANDLER legacy_pabort | ||
140 | # endif | ||
141 | #endif | ||
142 | |||
143 | #ifdef CONFIG_CPU_PABRT_V6 | ||
144 | # ifdef CPU_PABORT_HANDLER | ||
145 | # define MULTI_PABORT 1 | ||
146 | # else | ||
147 | # define CPU_PABORT_HANDLER v6_pabort | ||
148 | # endif | ||
149 | #endif | ||
150 | |||
151 | #ifdef CONFIG_CPU_PABRT_V7 | ||
152 | # ifdef CPU_PABORT_HANDLER | ||
153 | # define MULTI_PABORT 1 | ||
154 | # else | ||
155 | # define CPU_PABORT_HANDLER v7_pabort | ||
156 | # endif | ||
157 | #endif | ||
158 | |||
159 | #ifndef CPU_PABORT_HANDLER | ||
160 | #error Unknown prefetch abort handler type | ||
161 | #endif | ||
162 | |||
163 | #endif | 25 | #endif |
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index 5aeec1e1735c..16bd48031583 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h | |||
@@ -36,6 +36,7 @@ | |||
36 | #define L2X0_RAW_INTR_STAT 0x21C | 36 | #define L2X0_RAW_INTR_STAT 0x21C |
37 | #define L2X0_INTR_CLEAR 0x220 | 37 | #define L2X0_INTR_CLEAR 0x220 |
38 | #define L2X0_CACHE_SYNC 0x730 | 38 | #define L2X0_CACHE_SYNC 0x730 |
39 | #define L2X0_DUMMY_REG 0x740 | ||
39 | #define L2X0_INV_LINE_PA 0x770 | 40 | #define L2X0_INV_LINE_PA 0x770 |
40 | #define L2X0_INV_WAY 0x77C | 41 | #define L2X0_INV_WAY 0x77C |
41 | #define L2X0_CLEAN_LINE_PA 0x7B0 | 42 | #define L2X0_CLEAN_LINE_PA 0x7B0 |
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h index 84557d321001..0691f9dcc500 100644 --- a/arch/arm/include/asm/hardware/gic.h +++ b/arch/arm/include/asm/hardware/gic.h | |||
@@ -34,6 +34,7 @@ | |||
34 | 34 | ||
35 | #ifndef __ASSEMBLY__ | 35 | #ifndef __ASSEMBLY__ |
36 | extern void __iomem *gic_cpu_base_addr; | 36 | extern void __iomem *gic_cpu_base_addr; |
37 | extern struct irq_chip gic_arch_extn; | ||
37 | 38 | ||
38 | void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *); | 39 | void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *); |
39 | void gic_secondary_init(unsigned int); | 40 | void gic_secondary_init(unsigned int); |
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h index a101f10bb5b1..e0d1c0cfa548 100644 --- a/arch/arm/include/asm/hardware/sp810.h +++ b/arch/arm/include/asm/hardware/sp810.h | |||
@@ -50,8 +50,17 @@ | |||
50 | #define SCPCELLID2 0xFF8 | 50 | #define SCPCELLID2 0xFF8 |
51 | #define SCPCELLID3 0xFFC | 51 | #define SCPCELLID3 0xFFC |
52 | 52 | ||
53 | #define SCCTRL_TIMEREN0SEL_REFCLK (0 << 15) | ||
54 | #define SCCTRL_TIMEREN0SEL_TIMCLK (1 << 15) | ||
55 | |||
56 | #define SCCTRL_TIMEREN1SEL_REFCLK (0 << 17) | ||
57 | #define SCCTRL_TIMEREN1SEL_TIMCLK (1 << 17) | ||
58 | |||
53 | static inline void sysctl_soft_reset(void __iomem *base) | 59 | static inline void sysctl_soft_reset(void __iomem *base) |
54 | { | 60 | { |
61 | /* switch to slow mode */ | ||
62 | writel(0x2, base + SCCTRL); | ||
63 | |||
55 | /* writing any value to SCSYSSTAT reg will reset system */ | 64 | /* writing any value to SCSYSSTAT reg will reset system */ |
56 | writel(0, base + SCSYSSTAT); | 65 | writel(0, base + SCSYSSTAT); |
57 | } | 66 | } |
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 7080e2c8fa62..a4edd19dd3d6 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h | |||
@@ -19,11 +19,36 @@ | |||
19 | 19 | ||
20 | extern pte_t *pkmap_page_table; | 20 | extern pte_t *pkmap_page_table; |
21 | 21 | ||
22 | extern void *kmap_high(struct page *page); | ||
23 | extern void kunmap_high(struct page *page); | ||
24 | |||
25 | /* | ||
26 | * The reason for kmap_high_get() is to ensure that the currently kmap'd | ||
27 | * page usage count does not decrease to zero while we're using its | ||
28 | * existing virtual mapping in an atomic context. With a VIVT cache this | ||
29 | * is essential to do, but with a VIPT cache this is only an optimization | ||
30 | * so not to pay the price of establishing a second mapping if an existing | ||
31 | * one can be used. However, on platforms without hardware TLB maintenance | ||
32 | * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since | ||
33 | * the locking involved must also disable IRQs which is incompatible with | ||
34 | * the IPI mechanism used by global TLB operations. | ||
35 | */ | ||
22 | #define ARCH_NEEDS_KMAP_HIGH_GET | 36 | #define ARCH_NEEDS_KMAP_HIGH_GET |
37 | #if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6) | ||
38 | #undef ARCH_NEEDS_KMAP_HIGH_GET | ||
39 | #if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT) | ||
40 | #error "The sum of features in your kernel config cannot be supported together" | ||
41 | #endif | ||
42 | #endif | ||
23 | 43 | ||
24 | extern void *kmap_high(struct page *page); | 44 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET |
25 | extern void *kmap_high_get(struct page *page); | 45 | extern void *kmap_high_get(struct page *page); |
26 | extern void kunmap_high(struct page *page); | 46 | #else |
47 | static inline void *kmap_high_get(struct page *page) | ||
48 | { | ||
49 | return NULL; | ||
50 | } | ||
51 | #endif | ||
27 | 52 | ||
28 | /* | 53 | /* |
29 | * The following functions are already defined by <linux/highmem.h> | 54 | * The following functions are already defined by <linux/highmem.h> |
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 20e0f7c9e03e..d66605dea55a 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h | |||
@@ -95,6 +95,15 @@ static inline void __iomem *__typesafe_io(unsigned long addr) | |||
95 | return (void __iomem *)addr; | 95 | return (void __iomem *)addr; |
96 | } | 96 | } |
97 | 97 | ||
98 | /* IO barriers */ | ||
99 | #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE | ||
100 | #define __iormb() rmb() | ||
101 | #define __iowmb() wmb() | ||
102 | #else | ||
103 | #define __iormb() do { } while (0) | ||
104 | #define __iowmb() do { } while (0) | ||
105 | #endif | ||
106 | |||
98 | /* | 107 | /* |
99 | * Now, pick up the machine-defined IO definitions | 108 | * Now, pick up the machine-defined IO definitions |
100 | */ | 109 | */ |
@@ -125,17 +134,17 @@ static inline void __iomem *__typesafe_io(unsigned long addr) | |||
125 | * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space. | 134 | * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space. |
126 | */ | 135 | */ |
127 | #ifdef __io | 136 | #ifdef __io |
128 | #define outb(v,p) __raw_writeb(v,__io(p)) | 137 | #define outb(v,p) ({ __iowmb(); __raw_writeb(v,__io(p)); }) |
129 | #define outw(v,p) __raw_writew((__force __u16) \ | 138 | #define outw(v,p) ({ __iowmb(); __raw_writew((__force __u16) \ |
130 | cpu_to_le16(v),__io(p)) | 139 | cpu_to_le16(v),__io(p)); }) |
131 | #define outl(v,p) __raw_writel((__force __u32) \ | 140 | #define outl(v,p) ({ __iowmb(); __raw_writel((__force __u32) \ |
132 | cpu_to_le32(v),__io(p)) | 141 | cpu_to_le32(v),__io(p)); }) |
133 | 142 | ||
134 | #define inb(p) ({ __u8 __v = __raw_readb(__io(p)); __v; }) | 143 | #define inb(p) ({ __u8 __v = __raw_readb(__io(p)); __iormb(); __v; }) |
135 | #define inw(p) ({ __u16 __v = le16_to_cpu((__force __le16) \ | 144 | #define inw(p) ({ __u16 __v = le16_to_cpu((__force __le16) \ |
136 | __raw_readw(__io(p))); __v; }) | 145 | __raw_readw(__io(p))); __iormb(); __v; }) |
137 | #define inl(p) ({ __u32 __v = le32_to_cpu((__force __le32) \ | 146 | #define inl(p) ({ __u32 __v = le32_to_cpu((__force __le32) \ |
138 | __raw_readl(__io(p))); __v; }) | 147 | __raw_readl(__io(p))); __iormb(); __v; }) |
139 | 148 | ||
140 | #define outsb(p,d,l) __raw_writesb(__io(p),d,l) | 149 | #define outsb(p,d,l) __raw_writesb(__io(p),d,l) |
141 | #define outsw(p,d,l) __raw_writesw(__io(p),d,l) | 150 | #define outsw(p,d,l) __raw_writesw(__io(p),d,l) |
@@ -192,14 +201,6 @@ extern void _memset_io(volatile void __iomem *, int, size_t); | |||
192 | #define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \ | 201 | #define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \ |
193 | cpu_to_le32(v),__mem_pci(c))) | 202 | cpu_to_le32(v),__mem_pci(c))) |
194 | 203 | ||
195 | #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE | ||
196 | #define __iormb() rmb() | ||
197 | #define __iowmb() wmb() | ||
198 | #else | ||
199 | #define __iormb() do { } while (0) | ||
200 | #define __iowmb() do { } while (0) | ||
201 | #endif | ||
202 | |||
203 | #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) | 204 | #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) |
204 | #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) | 205 | #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) |
205 | #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) | 206 | #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) |
diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h index c0094d8edae4..c2b9b4bdec00 100644 --- a/arch/arm/include/asm/kexec.h +++ b/arch/arm/include/asm/kexec.h | |||
@@ -50,6 +50,9 @@ static inline void crash_setup_regs(struct pt_regs *newregs, | |||
50 | } | 50 | } |
51 | } | 51 | } |
52 | 52 | ||
53 | /* Function pointer to optional machine-specific reinitialization */ | ||
54 | extern void (*kexec_reinit)(void); | ||
55 | |||
53 | #endif /* __ASSEMBLY__ */ | 56 | #endif /* __ASSEMBLY__ */ |
54 | 57 | ||
55 | #endif /* CONFIG_KEXEC */ | 58 | #endif /* CONFIG_KEXEC */ |
diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h index 6bc63ab498ce..080d74f8128d 100644 --- a/arch/arm/include/asm/localtimer.h +++ b/arch/arm/include/asm/localtimer.h | |||
@@ -44,8 +44,14 @@ int local_timer_ack(void); | |||
44 | /* | 44 | /* |
45 | * Setup a local timer interrupt for a CPU. | 45 | * Setup a local timer interrupt for a CPU. |
46 | */ | 46 | */ |
47 | void local_timer_setup(struct clock_event_device *); | 47 | int local_timer_setup(struct clock_event_device *); |
48 | 48 | ||
49 | #else | ||
50 | |||
51 | static inline int local_timer_setup(struct clock_event_device *evt) | ||
52 | { | ||
53 | return -ENXIO; | ||
54 | } | ||
49 | #endif | 55 | #endif |
50 | 56 | ||
51 | #endif | 57 | #endif |
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 3a0893a76a3b..bf13b814c1b8 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h | |||
@@ -15,10 +15,6 @@ struct meminfo; | |||
15 | struct sys_timer; | 15 | struct sys_timer; |
16 | 16 | ||
17 | struct machine_desc { | 17 | struct machine_desc { |
18 | /* | ||
19 | * Note! The first two elements are used | ||
20 | * by assembler code in head.S, head-common.S | ||
21 | */ | ||
22 | unsigned int nr; /* architecture number */ | 18 | unsigned int nr; /* architecture number */ |
23 | const char *name; /* architecture name */ | 19 | const char *name; /* architecture name */ |
24 | unsigned long boot_params; /* tagged list */ | 20 | unsigned long boot_params; /* tagged list */ |
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index 22ac140edd9e..febe495d0c6e 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h | |||
@@ -34,4 +34,35 @@ do { \ | |||
34 | raw_spin_unlock(&desc->lock); \ | 34 | raw_spin_unlock(&desc->lock); \ |
35 | } while(0) | 35 | } while(0) |
36 | 36 | ||
37 | #ifndef __ASSEMBLY__ | ||
38 | /* | ||
39 | * Entry/exit functions for chained handlers where the primary IRQ chip | ||
40 | * may implement either fasteoi or level-trigger flow control. | ||
41 | */ | ||
42 | static inline void chained_irq_enter(struct irq_chip *chip, | ||
43 | struct irq_desc *desc) | ||
44 | { | ||
45 | /* FastEOI controllers require no action on entry. */ | ||
46 | if (chip->irq_eoi) | ||
47 | return; | ||
48 | |||
49 | if (chip->irq_mask_ack) { | ||
50 | chip->irq_mask_ack(&desc->irq_data); | ||
51 | } else { | ||
52 | chip->irq_mask(&desc->irq_data); | ||
53 | if (chip->irq_ack) | ||
54 | chip->irq_ack(&desc->irq_data); | ||
55 | } | ||
56 | } | ||
57 | |||
58 | static inline void chained_irq_exit(struct irq_chip *chip, | ||
59 | struct irq_desc *desc) | ||
60 | { | ||
61 | if (chip->irq_eoi) | ||
62 | chip->irq_eoi(&desc->irq_data); | ||
63 | else | ||
64 | chip->irq_unmask(&desc->irq_data); | ||
65 | } | ||
66 | #endif | ||
67 | |||
37 | #endif | 68 | #endif |
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 23c2e8e5c0fa..431077c5a867 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <linux/const.h> | 17 | #include <linux/const.h> |
18 | #include <linux/types.h> | ||
18 | #include <mach/memory.h> | 19 | #include <mach/memory.h> |
19 | #include <asm/sizes.h> | 20 | #include <asm/sizes.h> |
20 | 21 | ||
@@ -133,20 +134,10 @@ | |||
133 | #endif | 134 | #endif |
134 | 135 | ||
135 | /* | 136 | /* |
136 | * Physical vs virtual RAM address space conversion. These are | ||
137 | * private definitions which should NOT be used outside memory.h | ||
138 | * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. | ||
139 | */ | ||
140 | #ifndef __virt_to_phys | ||
141 | #define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) | ||
142 | #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) | ||
143 | #endif | ||
144 | |||
145 | /* | ||
146 | * Convert a physical address to a Page Frame Number and back | 137 | * Convert a physical address to a Page Frame Number and back |
147 | */ | 138 | */ |
148 | #define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT) | 139 | #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) |
149 | #define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) | 140 | #define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) |
150 | 141 | ||
151 | /* | 142 | /* |
152 | * Convert a page to/from a physical address | 143 | * Convert a page to/from a physical address |
@@ -157,6 +148,62 @@ | |||
157 | #ifndef __ASSEMBLY__ | 148 | #ifndef __ASSEMBLY__ |
158 | 149 | ||
159 | /* | 150 | /* |
151 | * Physical vs virtual RAM address space conversion. These are | ||
152 | * private definitions which should NOT be used outside memory.h | ||
153 | * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. | ||
154 | */ | ||
155 | #ifndef __virt_to_phys | ||
156 | #ifdef CONFIG_ARM_PATCH_PHYS_VIRT | ||
157 | |||
158 | /* | ||
159 | * Constants used to force the right instruction encodings and shifts | ||
160 | * so that all we need to do is modify the 8-bit constant field. | ||
161 | */ | ||
162 | #define __PV_BITS_31_24 0x81000000 | ||
163 | #define __PV_BITS_23_16 0x00810000 | ||
164 | |||
165 | extern unsigned long __pv_phys_offset; | ||
166 | #define PHYS_OFFSET __pv_phys_offset | ||
167 | |||
168 | #define __pv_stub(from,to,instr,type) \ | ||
169 | __asm__("@ __pv_stub\n" \ | ||
170 | "1: " instr " %0, %1, %2\n" \ | ||
171 | " .pushsection .pv_table,\"a\"\n" \ | ||
172 | " .long 1b\n" \ | ||
173 | " .popsection\n" \ | ||
174 | : "=r" (to) \ | ||
175 | : "r" (from), "I" (type)) | ||
176 | |||
177 | static inline unsigned long __virt_to_phys(unsigned long x) | ||
178 | { | ||
179 | unsigned long t; | ||
180 | __pv_stub(x, t, "add", __PV_BITS_31_24); | ||
181 | #ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT | ||
182 | __pv_stub(t, t, "add", __PV_BITS_23_16); | ||
183 | #endif | ||
184 | return t; | ||
185 | } | ||
186 | |||
187 | static inline unsigned long __phys_to_virt(unsigned long x) | ||
188 | { | ||
189 | unsigned long t; | ||
190 | __pv_stub(x, t, "sub", __PV_BITS_31_24); | ||
191 | #ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT | ||
192 | __pv_stub(t, t, "sub", __PV_BITS_23_16); | ||
193 | #endif | ||
194 | return t; | ||
195 | } | ||
196 | #else | ||
197 | #define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) | ||
198 | #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) | ||
199 | #endif | ||
200 | #endif | ||
201 | |||
202 | #ifndef PHYS_OFFSET | ||
203 | #define PHYS_OFFSET PLAT_PHYS_OFFSET | ||
204 | #endif | ||
205 | |||
206 | /* | ||
160 | * The DMA mask corresponding to the maximum bus address allocatable | 207 | * The DMA mask corresponding to the maximum bus address allocatable |
161 | * using GFP_DMA. The default here places no restriction on DMA | 208 | * using GFP_DMA. The default here places no restriction on DMA |
162 | * allocations. This must be the smallest DMA mask in the system, | 209 | * allocations. This must be the smallest DMA mask in the system, |
@@ -188,12 +235,12 @@ | |||
188 | * translation for translating DMA addresses. Use the driver | 235 | * translation for translating DMA addresses. Use the driver |
189 | * DMA support - see dma-mapping.h. | 236 | * DMA support - see dma-mapping.h. |
190 | */ | 237 | */ |
191 | static inline unsigned long virt_to_phys(void *x) | 238 | static inline phys_addr_t virt_to_phys(const volatile void *x) |
192 | { | 239 | { |
193 | return __virt_to_phys((unsigned long)(x)); | 240 | return __virt_to_phys((unsigned long)(x)); |
194 | } | 241 | } |
195 | 242 | ||
196 | static inline void *phys_to_virt(unsigned long x) | 243 | static inline void *phys_to_virt(phys_addr_t x) |
197 | { | 244 | { |
198 | return (void *)(__phys_to_virt((unsigned long)(x))); | 245 | return (void *)(__phys_to_virt((unsigned long)(x))); |
199 | } | 246 | } |
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index 12c8e680cbff..543b44916d2c 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h | |||
@@ -25,8 +25,31 @@ struct mod_arch_specific { | |||
25 | }; | 25 | }; |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * Include the ARM architecture version. | 28 | * Add the ARM architecture version to the version magic string |
29 | */ | 29 | */ |
30 | #define MODULE_ARCH_VERMAGIC "ARMv" __stringify(__LINUX_ARM_ARCH__) " " | 30 | #define MODULE_ARCH_VERMAGIC_ARMVSN "ARMv" __stringify(__LINUX_ARM_ARCH__) " " |
31 | |||
32 | /* Add __virt_to_phys patching state as well */ | ||
33 | #ifdef CONFIG_ARM_PATCH_PHYS_VIRT | ||
34 | #ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT | ||
35 | #define MODULE_ARCH_VERMAGIC_P2V "p2v16 " | ||
36 | #else | ||
37 | #define MODULE_ARCH_VERMAGIC_P2V "p2v8 " | ||
38 | #endif | ||
39 | #else | ||
40 | #define MODULE_ARCH_VERMAGIC_P2V "" | ||
41 | #endif | ||
42 | |||
43 | /* Add instruction set architecture tag to distinguish ARM/Thumb kernels */ | ||
44 | #ifdef CONFIG_THUMB2_KERNEL | ||
45 | #define MODULE_ARCH_VERMAGIC_ARMTHUMB "thumb2 " | ||
46 | #else | ||
47 | #define MODULE_ARCH_VERMAGIC_ARMTHUMB "" | ||
48 | #endif | ||
49 | |||
50 | #define MODULE_ARCH_VERMAGIC \ | ||
51 | MODULE_ARCH_VERMAGIC_ARMVSN \ | ||
52 | MODULE_ARCH_VERMAGIC_ARMTHUMB \ | ||
53 | MODULE_ARCH_VERMAGIC_P2V | ||
31 | 54 | ||
32 | #endif /* _ASM_ARM_MODULE_H */ | 55 | #endif /* _ASM_ARM_MODULE_H */ |
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index fc1900925275..d8387437ec5a 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h | |||
@@ -21,6 +21,8 @@ | |||
21 | #ifndef __ASM_OUTERCACHE_H | 21 | #ifndef __ASM_OUTERCACHE_H |
22 | #define __ASM_OUTERCACHE_H | 22 | #define __ASM_OUTERCACHE_H |
23 | 23 | ||
24 | #include <linux/types.h> | ||
25 | |||
24 | struct outer_cache_fns { | 26 | struct outer_cache_fns { |
25 | void (*inv_range)(unsigned long, unsigned long); | 27 | void (*inv_range)(unsigned long, unsigned long); |
26 | void (*clean_range)(unsigned long, unsigned long); | 28 | void (*clean_range)(unsigned long, unsigned long); |
@@ -31,23 +33,24 @@ struct outer_cache_fns { | |||
31 | #ifdef CONFIG_OUTER_CACHE_SYNC | 33 | #ifdef CONFIG_OUTER_CACHE_SYNC |
32 | void (*sync)(void); | 34 | void (*sync)(void); |
33 | #endif | 35 | #endif |
36 | void (*set_debug)(unsigned long); | ||
34 | }; | 37 | }; |
35 | 38 | ||
36 | #ifdef CONFIG_OUTER_CACHE | 39 | #ifdef CONFIG_OUTER_CACHE |
37 | 40 | ||
38 | extern struct outer_cache_fns outer_cache; | 41 | extern struct outer_cache_fns outer_cache; |
39 | 42 | ||
40 | static inline void outer_inv_range(unsigned long start, unsigned long end) | 43 | static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) |
41 | { | 44 | { |
42 | if (outer_cache.inv_range) | 45 | if (outer_cache.inv_range) |
43 | outer_cache.inv_range(start, end); | 46 | outer_cache.inv_range(start, end); |
44 | } | 47 | } |
45 | static inline void outer_clean_range(unsigned long start, unsigned long end) | 48 | static inline void outer_clean_range(phys_addr_t start, phys_addr_t end) |
46 | { | 49 | { |
47 | if (outer_cache.clean_range) | 50 | if (outer_cache.clean_range) |
48 | outer_cache.clean_range(start, end); | 51 | outer_cache.clean_range(start, end); |
49 | } | 52 | } |
50 | static inline void outer_flush_range(unsigned long start, unsigned long end) | 53 | static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) |
51 | { | 54 | { |
52 | if (outer_cache.flush_range) | 55 | if (outer_cache.flush_range) |
53 | outer_cache.flush_range(start, end); | 56 | outer_cache.flush_range(start, end); |
@@ -73,11 +76,11 @@ static inline void outer_disable(void) | |||
73 | 76 | ||
74 | #else | 77 | #else |
75 | 78 | ||
76 | static inline void outer_inv_range(unsigned long start, unsigned long end) | 79 | static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) |
77 | { } | 80 | { } |
78 | static inline void outer_clean_range(unsigned long start, unsigned long end) | 81 | static inline void outer_clean_range(phys_addr_t start, phys_addr_t end) |
79 | { } | 82 | { } |
80 | static inline void outer_flush_range(unsigned long start, unsigned long end) | 83 | static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) |
81 | { } | 84 | { } |
82 | static inline void outer_flush_all(void) { } | 85 | static inline void outer_flush_all(void) { } |
83 | static inline void outer_inv_all(void) { } | 86 | static inline void outer_inv_all(void) { } |
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 9763be04f77e..22de005f159c 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h | |||
@@ -10,6 +10,8 @@ | |||
10 | #ifndef _ASMARM_PGALLOC_H | 10 | #ifndef _ASMARM_PGALLOC_H |
11 | #define _ASMARM_PGALLOC_H | 11 | #define _ASMARM_PGALLOC_H |
12 | 12 | ||
13 | #include <linux/pagemap.h> | ||
14 | |||
13 | #include <asm/domain.h> | 15 | #include <asm/domain.h> |
14 | #include <asm/pgtable-hwdef.h> | 16 | #include <asm/pgtable-hwdef.h> |
15 | #include <asm/processor.h> | 17 | #include <asm/processor.h> |
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index ebcb6432f45f..5750704e0271 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h | |||
@@ -301,6 +301,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | |||
301 | #define pgd_present(pgd) (1) | 301 | #define pgd_present(pgd) (1) |
302 | #define pgd_clear(pgdp) do { } while (0) | 302 | #define pgd_clear(pgdp) do { } while (0) |
303 | #define set_pgd(pgd,pgdp) do { } while (0) | 303 | #define set_pgd(pgd,pgdp) do { } while (0) |
304 | #define set_pud(pud,pudp) do { } while (0) | ||
304 | 305 | ||
305 | 306 | ||
306 | /* Find an entry in the second-level page table.. */ | 307 | /* Find an entry in the second-level page table.. */ |
@@ -351,7 +352,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) | |||
351 | #define pte_unmap(pte) __pte_unmap(pte) | 352 | #define pte_unmap(pte) __pte_unmap(pte) |
352 | 353 | ||
353 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) | 354 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) |
354 | #define pfn_pte(pfn,prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | 355 | #define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot)) |
355 | 356 | ||
356 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) | 357 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
357 | #define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) | 358 | #define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) |
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 8ccea012722c..7544ce6b481a 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h | |||
@@ -12,11 +12,25 @@ | |||
12 | #ifndef __ARM_PMU_H__ | 12 | #ifndef __ARM_PMU_H__ |
13 | #define __ARM_PMU_H__ | 13 | #define __ARM_PMU_H__ |
14 | 14 | ||
15 | #include <linux/interrupt.h> | ||
16 | |||
15 | enum arm_pmu_type { | 17 | enum arm_pmu_type { |
16 | ARM_PMU_DEVICE_CPU = 0, | 18 | ARM_PMU_DEVICE_CPU = 0, |
17 | ARM_NUM_PMU_DEVICES, | 19 | ARM_NUM_PMU_DEVICES, |
18 | }; | 20 | }; |
19 | 21 | ||
22 | /* | ||
23 | * struct arm_pmu_platdata - ARM PMU platform data | ||
24 | * | ||
25 | * @handle_irq: an optional handler which will be called from the interrupt and | ||
26 | * passed the address of the low level handler, and can be used to implement | ||
27 | * any platform specific handling before or after calling it. | ||
28 | */ | ||
29 | struct arm_pmu_platdata { | ||
30 | irqreturn_t (*handle_irq)(int irq, void *dev, | ||
31 | irq_handler_t pmu_handler); | ||
32 | }; | ||
33 | |||
20 | #ifdef CONFIG_CPU_HAS_PMU | 34 | #ifdef CONFIG_CPU_HAS_PMU |
21 | 35 | ||
22 | /** | 36 | /** |
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 8fdae9bc9abb..8ec535e11fd7 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h | |||
@@ -13,250 +13,86 @@ | |||
13 | 13 | ||
14 | #ifdef __KERNEL__ | 14 | #ifdef __KERNEL__ |
15 | 15 | ||
16 | #include <asm/glue-proc.h> | ||
17 | #include <asm/page.h> | ||
16 | 18 | ||
17 | /* | 19 | #ifndef __ASSEMBLY__ |
18 | * Work out if we need multiple CPU support | 20 | |
19 | */ | 21 | struct mm_struct; |
20 | #undef MULTI_CPU | ||
21 | #undef CPU_NAME | ||
22 | 22 | ||
23 | /* | 23 | /* |
24 | * CPU_NAME - the prefix for CPU related functions | 24 | * Don't change this structure - ASM code relies on it. |
25 | */ | 25 | */ |
26 | 26 | extern struct processor { | |
27 | #ifdef CONFIG_CPU_ARM610 | 27 | /* MISC |
28 | # ifdef CPU_NAME | 28 | * get data abort address/flags |
29 | # undef MULTI_CPU | 29 | */ |
30 | # define MULTI_CPU | 30 | void (*_data_abort)(unsigned long pc); |
31 | # else | 31 | /* |
32 | # define CPU_NAME cpu_arm6 | 32 | * Retrieve prefetch fault address |
33 | # endif | 33 | */ |
34 | #endif | 34 | unsigned long (*_prefetch_abort)(unsigned long lr); |
35 | 35 | /* | |
36 | #ifdef CONFIG_CPU_ARM7TDMI | 36 | * Set up any processor specifics |
37 | # ifdef CPU_NAME | 37 | */ |
38 | # undef MULTI_CPU | 38 | void (*_proc_init)(void); |
39 | # define MULTI_CPU | 39 | /* |
40 | # else | 40 | * Disable any processor specifics |
41 | # define CPU_NAME cpu_arm7tdmi | 41 | */ |
42 | # endif | 42 | void (*_proc_fin)(void); |
43 | #endif | 43 | /* |
44 | 44 | * Special stuff for a reset | |
45 | #ifdef CONFIG_CPU_ARM710 | 45 | */ |
46 | # ifdef CPU_NAME | 46 | void (*reset)(unsigned long addr) __attribute__((noreturn)); |
47 | # undef MULTI_CPU | 47 | /* |
48 | # define MULTI_CPU | 48 | * Idle the processor |
49 | # else | 49 | */ |
50 | # define CPU_NAME cpu_arm7 | 50 | int (*_do_idle)(void); |
51 | # endif | 51 | /* |
52 | #endif | 52 | * Processor architecture specific |
53 | 53 | */ | |
54 | #ifdef CONFIG_CPU_ARM720T | 54 | /* |
55 | # ifdef CPU_NAME | 55 | * clean a virtual address range from the |
56 | # undef MULTI_CPU | 56 | * D-cache without flushing the cache. |
57 | # define MULTI_CPU | 57 | */ |
58 | # else | 58 | void (*dcache_clean_area)(void *addr, int size); |
59 | # define CPU_NAME cpu_arm720 | 59 | |
60 | # endif | 60 | /* |
61 | #endif | 61 | * Set the page table |
62 | 62 | */ | |
63 | #ifdef CONFIG_CPU_ARM740T | 63 | void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); |
64 | # ifdef CPU_NAME | 64 | /* |
65 | # undef MULTI_CPU | 65 | * Set a possibly extended PTE. Non-extended PTEs should |
66 | # define MULTI_CPU | 66 | * ignore 'ext'. |
67 | # else | 67 | */ |
68 | # define CPU_NAME cpu_arm740 | 68 | void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); |
69 | # endif | 69 | |
70 | #endif | 70 | /* Suspend/resume */ |
71 | 71 | unsigned int suspend_size; | |
72 | #ifdef CONFIG_CPU_ARM9TDMI | 72 | void (*do_suspend)(void *); |
73 | # ifdef CPU_NAME | 73 | void (*do_resume)(void *); |
74 | # undef MULTI_CPU | 74 | } processor; |
75 | # define MULTI_CPU | ||
76 | # else | ||
77 | # define CPU_NAME cpu_arm9tdmi | ||
78 | # endif | ||
79 | #endif | ||
80 | |||
81 | #ifdef CONFIG_CPU_ARM920T | ||
82 | # ifdef CPU_NAME | ||
83 | # undef MULTI_CPU | ||
84 | # define MULTI_CPU | ||
85 | # else | ||
86 | # define CPU_NAME cpu_arm920 | ||
87 | # endif | ||
88 | #endif | ||
89 | |||
90 | #ifdef CONFIG_CPU_ARM922T | ||
91 | # ifdef CPU_NAME | ||
92 | # undef MULTI_CPU | ||
93 | # define MULTI_CPU | ||
94 | # else | ||
95 | # define CPU_NAME cpu_arm922 | ||
96 | # endif | ||
97 | #endif | ||
98 | |||
99 | #ifdef CONFIG_CPU_FA526 | ||
100 | # ifdef CPU_NAME | ||
101 | # undef MULTI_CPU | ||
102 | # define MULTI_CPU | ||
103 | # else | ||
104 | # define CPU_NAME cpu_fa526 | ||
105 | # endif | ||
106 | #endif | ||
107 | |||
108 | #ifdef CONFIG_CPU_ARM925T | ||
109 | # ifdef CPU_NAME | ||
110 | # undef MULTI_CPU | ||
111 | # define MULTI_CPU | ||
112 | # else | ||
113 | # define CPU_NAME cpu_arm925 | ||
114 | # endif | ||
115 | #endif | ||
116 | |||
117 | #ifdef CONFIG_CPU_ARM926T | ||
118 | # ifdef CPU_NAME | ||
119 | # undef MULTI_CPU | ||
120 | # define MULTI_CPU | ||
121 | # else | ||
122 | # define CPU_NAME cpu_arm926 | ||
123 | # endif | ||
124 | #endif | ||
125 | |||
126 | #ifdef CONFIG_CPU_ARM940T | ||
127 | # ifdef CPU_NAME | ||
128 | # undef MULTI_CPU | ||
129 | # define MULTI_CPU | ||
130 | # else | ||
131 | # define CPU_NAME cpu_arm940 | ||
132 | # endif | ||
133 | #endif | ||
134 | |||
135 | #ifdef CONFIG_CPU_ARM946E | ||
136 | # ifdef CPU_NAME | ||
137 | # undef MULTI_CPU | ||
138 | # define MULTI_CPU | ||
139 | # else | ||
140 | # define CPU_NAME cpu_arm946 | ||
141 | # endif | ||
142 | #endif | ||
143 | |||
144 | #ifdef CONFIG_CPU_SA110 | ||
145 | # ifdef CPU_NAME | ||
146 | # undef MULTI_CPU | ||
147 | # define MULTI_CPU | ||
148 | # else | ||
149 | # define CPU_NAME cpu_sa110 | ||
150 | # endif | ||
151 | #endif | ||
152 | |||
153 | #ifdef CONFIG_CPU_SA1100 | ||
154 | # ifdef CPU_NAME | ||
155 | # undef MULTI_CPU | ||
156 | # define MULTI_CPU | ||
157 | # else | ||
158 | # define CPU_NAME cpu_sa1100 | ||
159 | # endif | ||
160 | #endif | ||
161 | |||
162 | #ifdef CONFIG_CPU_ARM1020 | ||
163 | # ifdef CPU_NAME | ||
164 | # undef MULTI_CPU | ||
165 | # define MULTI_CPU | ||
166 | # else | ||
167 | # define CPU_NAME cpu_arm1020 | ||
168 | # endif | ||
169 | #endif | ||
170 | |||
171 | #ifdef CONFIG_CPU_ARM1020E | ||
172 | # ifdef CPU_NAME | ||
173 | # undef MULTI_CPU | ||
174 | # define MULTI_CPU | ||
175 | # else | ||
176 | # define CPU_NAME cpu_arm1020e | ||
177 | # endif | ||
178 | #endif | ||
179 | |||
180 | #ifdef CONFIG_CPU_ARM1022 | ||
181 | # ifdef CPU_NAME | ||
182 | # undef MULTI_CPU | ||
183 | # define MULTI_CPU | ||
184 | # else | ||
185 | # define CPU_NAME cpu_arm1022 | ||
186 | # endif | ||
187 | #endif | ||
188 | |||
189 | #ifdef CONFIG_CPU_ARM1026 | ||
190 | # ifdef CPU_NAME | ||
191 | # undef MULTI_CPU | ||
192 | # define MULTI_CPU | ||
193 | # else | ||
194 | # define CPU_NAME cpu_arm1026 | ||
195 | # endif | ||
196 | #endif | ||
197 | |||
198 | #ifdef CONFIG_CPU_XSCALE | ||
199 | # ifdef CPU_NAME | ||
200 | # undef MULTI_CPU | ||
201 | # define MULTI_CPU | ||
202 | # else | ||
203 | # define CPU_NAME cpu_xscale | ||
204 | # endif | ||
205 | #endif | ||
206 | |||
207 | #ifdef CONFIG_CPU_XSC3 | ||
208 | # ifdef CPU_NAME | ||
209 | # undef MULTI_CPU | ||
210 | # define MULTI_CPU | ||
211 | # else | ||
212 | # define CPU_NAME cpu_xsc3 | ||
213 | # endif | ||
214 | #endif | ||
215 | |||
216 | #ifdef CONFIG_CPU_MOHAWK | ||
217 | # ifdef CPU_NAME | ||
218 | # undef MULTI_CPU | ||
219 | # define MULTI_CPU | ||
220 | # else | ||
221 | # define CPU_NAME cpu_mohawk | ||
222 | # endif | ||
223 | #endif | ||
224 | |||
225 | #ifdef CONFIG_CPU_FEROCEON | ||
226 | # ifdef CPU_NAME | ||
227 | # undef MULTI_CPU | ||
228 | # define MULTI_CPU | ||
229 | # else | ||
230 | # define CPU_NAME cpu_feroceon | ||
231 | # endif | ||
232 | #endif | ||
233 | |||
234 | #ifdef CONFIG_CPU_V6 | ||
235 | # ifdef CPU_NAME | ||
236 | # undef MULTI_CPU | ||
237 | # define MULTI_CPU | ||
238 | # else | ||
239 | # define CPU_NAME cpu_v6 | ||
240 | # endif | ||
241 | #endif | ||
242 | |||
243 | #ifdef CONFIG_CPU_V7 | ||
244 | # ifdef CPU_NAME | ||
245 | # undef MULTI_CPU | ||
246 | # define MULTI_CPU | ||
247 | # else | ||
248 | # define CPU_NAME cpu_v7 | ||
249 | # endif | ||
250 | #endif | ||
251 | |||
252 | #ifndef __ASSEMBLY__ | ||
253 | 75 | ||
254 | #ifndef MULTI_CPU | 76 | #ifndef MULTI_CPU |
255 | #include <asm/cpu-single.h> | 77 | extern void cpu_proc_init(void); |
78 | extern void cpu_proc_fin(void); | ||
79 | extern int cpu_do_idle(void); | ||
80 | extern void cpu_dcache_clean_area(void *, int); | ||
81 | extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); | ||
82 | extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); | ||
83 | extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); | ||
256 | #else | 84 | #else |
257 | #include <asm/cpu-multi32.h> | 85 | #define cpu_proc_init() processor._proc_init() |
86 | #define cpu_proc_fin() processor._proc_fin() | ||
87 | #define cpu_reset(addr) processor.reset(addr) | ||
88 | #define cpu_do_idle() processor._do_idle() | ||
89 | #define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz) | ||
90 | #define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext) | ||
91 | #define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm) | ||
258 | #endif | 92 | #endif |
259 | 93 | ||
94 | extern void cpu_resume(void); | ||
95 | |||
260 | #include <asm/memory.h> | 96 | #include <asm/memory.h> |
261 | 97 | ||
262 | #ifdef CONFIG_MMU | 98 | #ifdef CONFIG_MMU |
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 67357baaeeeb..b2d9df5667af 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h | |||
@@ -29,19 +29,7 @@ | |||
29 | #define STACK_TOP_MAX TASK_SIZE | 29 | #define STACK_TOP_MAX TASK_SIZE |
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | union debug_insn { | ||
33 | u32 arm; | ||
34 | u16 thumb; | ||
35 | }; | ||
36 | |||
37 | struct debug_entry { | ||
38 | u32 address; | ||
39 | union debug_insn insn; | ||
40 | }; | ||
41 | |||
42 | struct debug_info { | 32 | struct debug_info { |
43 | int nsaved; | ||
44 | struct debug_entry bp[2]; | ||
45 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 33 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
46 | struct perf_event *hbp[ARM_MAX_HBP_SLOTS]; | 34 | struct perf_event *hbp[ARM_MAX_HBP_SLOTS]; |
47 | #endif | 35 | #endif |
@@ -95,7 +83,7 @@ extern void release_thread(struct task_struct *); | |||
95 | 83 | ||
96 | unsigned long get_wchan(struct task_struct *p); | 84 | unsigned long get_wchan(struct task_struct *p); |
97 | 85 | ||
98 | #if __LINUX_ARM_ARCH__ == 6 | 86 | #if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327) |
99 | #define cpu_relax() smp_mb() | 87 | #define cpu_relax() smp_mb() |
100 | #else | 88 | #else |
101 | #define cpu_relax() barrier() | 89 | #define cpu_relax() barrier() |
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 783d50f32618..a8ff22b2a391 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h | |||
@@ -130,8 +130,6 @@ struct pt_regs { | |||
130 | 130 | ||
131 | #ifdef __KERNEL__ | 131 | #ifdef __KERNEL__ |
132 | 132 | ||
133 | #define arch_has_single_step() (1) | ||
134 | |||
135 | #define user_mode(regs) \ | 133 | #define user_mode(regs) \ |
136 | (((regs)->ARM_cpsr & 0xf) == 0) | 134 | (((regs)->ARM_cpsr & 0xf) == 0) |
137 | 135 | ||
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index f1e5a9bca249..95176af3df8c 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h | |||
@@ -192,14 +192,10 @@ static struct tagtable __tagtable_##fn __tag = { tag, fn } | |||
192 | /* | 192 | /* |
193 | * Memory map description | 193 | * Memory map description |
194 | */ | 194 | */ |
195 | #ifdef CONFIG_ARCH_LH7A40X | 195 | #define NR_BANKS 8 |
196 | # define NR_BANKS 16 | ||
197 | #else | ||
198 | # define NR_BANKS 8 | ||
199 | #endif | ||
200 | 196 | ||
201 | struct membank { | 197 | struct membank { |
202 | unsigned long start; | 198 | phys_addr_t start; |
203 | unsigned long size; | 199 | unsigned long size; |
204 | unsigned int highmem; | 200 | unsigned int highmem; |
205 | }; | 201 | }; |
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h index 2376835015d6..4eb6d005ffaa 100644 --- a/arch/arm/include/asm/smp_scu.h +++ b/arch/arm/include/asm/smp_scu.h | |||
@@ -1,7 +1,14 @@ | |||
1 | #ifndef __ASMARM_ARCH_SCU_H | 1 | #ifndef __ASMARM_ARCH_SCU_H |
2 | #define __ASMARM_ARCH_SCU_H | 2 | #define __ASMARM_ARCH_SCU_H |
3 | 3 | ||
4 | #define SCU_PM_NORMAL 0 | ||
5 | #define SCU_PM_DORMANT 2 | ||
6 | #define SCU_PM_POWEROFF 3 | ||
7 | |||
8 | #ifndef __ASSEMBLER__ | ||
4 | unsigned int scu_get_core_count(void __iomem *); | 9 | unsigned int scu_get_core_count(void __iomem *); |
5 | void scu_enable(void __iomem *); | 10 | void scu_enable(void __iomem *); |
11 | int scu_power_mode(void __iomem *, unsigned int); | ||
12 | #endif | ||
6 | 13 | ||
7 | #endif | 14 | #endif |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 17eb355707dd..fdd3820edff8 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -5,17 +5,52 @@ | |||
5 | #error SMP not supported on pre-ARMv6 CPUs | 5 | #error SMP not supported on pre-ARMv6 CPUs |
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | /* | ||
9 | * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K | ||
10 | * extensions, so when running on UP, we have to patch these instructions away. | ||
11 | */ | ||
12 | #define ALT_SMP(smp, up) \ | ||
13 | "9998: " smp "\n" \ | ||
14 | " .pushsection \".alt.smp.init\", \"a\"\n" \ | ||
15 | " .long 9998b\n" \ | ||
16 | " " up "\n" \ | ||
17 | " .popsection\n" | ||
18 | |||
19 | #ifdef CONFIG_THUMB2_KERNEL | ||
20 | #define SEV ALT_SMP("sev.w", "nop.w") | ||
21 | /* | ||
22 | * For Thumb-2, special care is needed to ensure that the conditional WFE | ||
23 | * instruction really does assemble to exactly 4 bytes (as required by | ||
24 | * the SMP_ON_UP fixup code). By itself "wfene" might cause the | ||
25 | * assembler to insert a extra (16-bit) IT instruction, depending on the | ||
26 | * presence or absence of neighbouring conditional instructions. | ||
27 | * | ||
28 | * To avoid this unpredictableness, an approprite IT is inserted explicitly: | ||
29 | * the assembler won't change IT instructions which are explicitly present | ||
30 | * in the input. | ||
31 | */ | ||
32 | #define WFE(cond) ALT_SMP( \ | ||
33 | "it " cond "\n\t" \ | ||
34 | "wfe" cond ".n", \ | ||
35 | \ | ||
36 | "nop.w" \ | ||
37 | ) | ||
38 | #else | ||
39 | #define SEV ALT_SMP("sev", "nop") | ||
40 | #define WFE(cond) ALT_SMP("wfe" cond, "nop") | ||
41 | #endif | ||
42 | |||
8 | static inline void dsb_sev(void) | 43 | static inline void dsb_sev(void) |
9 | { | 44 | { |
10 | #if __LINUX_ARM_ARCH__ >= 7 | 45 | #if __LINUX_ARM_ARCH__ >= 7 |
11 | __asm__ __volatile__ ( | 46 | __asm__ __volatile__ ( |
12 | "dsb\n" | 47 | "dsb\n" |
13 | "sev" | 48 | SEV |
14 | ); | 49 | ); |
15 | #elif defined(CONFIG_CPU_32v6K) | 50 | #else |
16 | __asm__ __volatile__ ( | 51 | __asm__ __volatile__ ( |
17 | "mcr p15, 0, %0, c7, c10, 4\n" | 52 | "mcr p15, 0, %0, c7, c10, 4\n" |
18 | "sev" | 53 | SEV |
19 | : : "r" (0) | 54 | : : "r" (0) |
20 | ); | 55 | ); |
21 | #endif | 56 | #endif |
@@ -46,9 +81,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) | |||
46 | __asm__ __volatile__( | 81 | __asm__ __volatile__( |
47 | "1: ldrex %0, [%1]\n" | 82 | "1: ldrex %0, [%1]\n" |
48 | " teq %0, #0\n" | 83 | " teq %0, #0\n" |
49 | #ifdef CONFIG_CPU_32v6K | 84 | WFE("ne") |
50 | " wfene\n" | ||
51 | #endif | ||
52 | " strexeq %0, %2, [%1]\n" | 85 | " strexeq %0, %2, [%1]\n" |
53 | " teqeq %0, #0\n" | 86 | " teqeq %0, #0\n" |
54 | " bne 1b" | 87 | " bne 1b" |
@@ -107,9 +140,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) | |||
107 | __asm__ __volatile__( | 140 | __asm__ __volatile__( |
108 | "1: ldrex %0, [%1]\n" | 141 | "1: ldrex %0, [%1]\n" |
109 | " teq %0, #0\n" | 142 | " teq %0, #0\n" |
110 | #ifdef CONFIG_CPU_32v6K | 143 | WFE("ne") |
111 | " wfene\n" | ||
112 | #endif | ||
113 | " strexeq %0, %2, [%1]\n" | 144 | " strexeq %0, %2, [%1]\n" |
114 | " teq %0, #0\n" | 145 | " teq %0, #0\n" |
115 | " bne 1b" | 146 | " bne 1b" |
@@ -176,9 +207,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw) | |||
176 | "1: ldrex %0, [%2]\n" | 207 | "1: ldrex %0, [%2]\n" |
177 | " adds %0, %0, #1\n" | 208 | " adds %0, %0, #1\n" |
178 | " strexpl %1, %0, [%2]\n" | 209 | " strexpl %1, %0, [%2]\n" |
179 | #ifdef CONFIG_CPU_32v6K | 210 | WFE("mi") |
180 | " wfemi\n" | ||
181 | #endif | ||
182 | " rsbpls %0, %1, #0\n" | 211 | " rsbpls %0, %1, #0\n" |
183 | " bmi 1b" | 212 | " bmi 1b" |
184 | : "=&r" (tmp), "=&r" (tmp2) | 213 | : "=&r" (tmp), "=&r" (tmp2) |
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 97f6d60297d5..9a87823642d0 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h | |||
@@ -347,6 +347,7 @@ void cpu_idle_wait(void); | |||
347 | #include <asm-generic/cmpxchg-local.h> | 347 | #include <asm-generic/cmpxchg-local.h> |
348 | 348 | ||
349 | #if __LINUX_ARM_ARCH__ < 6 | 349 | #if __LINUX_ARM_ARCH__ < 6 |
350 | /* min ARCH < ARMv6 */ | ||
350 | 351 | ||
351 | #ifdef CONFIG_SMP | 352 | #ifdef CONFIG_SMP |
352 | #error "SMP is not supported on this platform" | 353 | #error "SMP is not supported on this platform" |
@@ -365,7 +366,7 @@ void cpu_idle_wait(void); | |||
365 | #include <asm-generic/cmpxchg.h> | 366 | #include <asm-generic/cmpxchg.h> |
366 | #endif | 367 | #endif |
367 | 368 | ||
368 | #else /* __LINUX_ARM_ARCH__ >= 6 */ | 369 | #else /* min ARCH >= ARMv6 */ |
369 | 370 | ||
370 | extern void __bad_cmpxchg(volatile void *ptr, int size); | 371 | extern void __bad_cmpxchg(volatile void *ptr, int size); |
371 | 372 | ||
@@ -379,7 +380,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
379 | unsigned long oldval, res; | 380 | unsigned long oldval, res; |
380 | 381 | ||
381 | switch (size) { | 382 | switch (size) { |
382 | #ifdef CONFIG_CPU_32v6K | 383 | #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ |
383 | case 1: | 384 | case 1: |
384 | do { | 385 | do { |
385 | asm volatile("@ __cmpxchg1\n" | 386 | asm volatile("@ __cmpxchg1\n" |
@@ -404,7 +405,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
404 | : "memory", "cc"); | 405 | : "memory", "cc"); |
405 | } while (res); | 406 | } while (res); |
406 | break; | 407 | break; |
407 | #endif /* CONFIG_CPU_32v6K */ | 408 | #endif |
408 | case 4: | 409 | case 4: |
409 | do { | 410 | do { |
410 | asm volatile("@ __cmpxchg4\n" | 411 | asm volatile("@ __cmpxchg4\n" |
@@ -450,12 +451,12 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, | |||
450 | unsigned long ret; | 451 | unsigned long ret; |
451 | 452 | ||
452 | switch (size) { | 453 | switch (size) { |
453 | #ifndef CONFIG_CPU_32v6K | 454 | #ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ |
454 | case 1: | 455 | case 1: |
455 | case 2: | 456 | case 2: |
456 | ret = __cmpxchg_local_generic(ptr, old, new, size); | 457 | ret = __cmpxchg_local_generic(ptr, old, new, size); |
457 | break; | 458 | break; |
458 | #endif /* !CONFIG_CPU_32v6K */ | 459 | #endif |
459 | default: | 460 | default: |
460 | ret = __cmpxchg(ptr, old, new, size); | 461 | ret = __cmpxchg(ptr, old, new, size); |
461 | } | 462 | } |
@@ -469,7 +470,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, | |||
469 | (unsigned long)(n), \ | 470 | (unsigned long)(n), \ |
470 | sizeof(*(ptr)))) | 471 | sizeof(*(ptr)))) |
471 | 472 | ||
472 | #ifdef CONFIG_CPU_32v6K | 473 | #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ |
473 | 474 | ||
474 | /* | 475 | /* |
475 | * Note : ARMv7-M (currently unsupported by Linux) does not support | 476 | * Note : ARMv7-M (currently unsupported by Linux) does not support |
@@ -524,11 +525,11 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, | |||
524 | (unsigned long long)(o), \ | 525 | (unsigned long long)(o), \ |
525 | (unsigned long long)(n))) | 526 | (unsigned long long)(n))) |
526 | 527 | ||
527 | #else /* !CONFIG_CPU_32v6K */ | 528 | #else /* min ARCH = ARMv6 */ |
528 | 529 | ||
529 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | 530 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) |
530 | 531 | ||
531 | #endif /* CONFIG_CPU_32v6K */ | 532 | #endif |
532 | 533 | ||
533 | #endif /* __LINUX_ARM_ARCH__ >= 6 */ | 534 | #endif /* __LINUX_ARM_ARCH__ >= 6 */ |
534 | 535 | ||
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index f41a6f57cd12..82dfe5d0c41e 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h | |||
@@ -18,16 +18,34 @@ | |||
18 | #define __ASMARM_TLB_H | 18 | #define __ASMARM_TLB_H |
19 | 19 | ||
20 | #include <asm/cacheflush.h> | 20 | #include <asm/cacheflush.h> |
21 | #include <asm/tlbflush.h> | ||
22 | 21 | ||
23 | #ifndef CONFIG_MMU | 22 | #ifndef CONFIG_MMU |
24 | 23 | ||
25 | #include <linux/pagemap.h> | 24 | #include <linux/pagemap.h> |
25 | |||
26 | #define tlb_flush(tlb) ((void) tlb) | ||
27 | |||
26 | #include <asm-generic/tlb.h> | 28 | #include <asm-generic/tlb.h> |
27 | 29 | ||
28 | #else /* !CONFIG_MMU */ | 30 | #else /* !CONFIG_MMU */ |
29 | 31 | ||
32 | #include <linux/swap.h> | ||
30 | #include <asm/pgalloc.h> | 33 | #include <asm/pgalloc.h> |
34 | #include <asm/tlbflush.h> | ||
35 | |||
36 | /* | ||
37 | * We need to delay page freeing for SMP as other CPUs can access pages | ||
38 | * which have been removed but not yet had their TLB entries invalidated. | ||
39 | * Also, as ARMv7 speculative prefetch can drag new entries into the TLB, | ||
40 | * we need to apply this same delaying tactic to ensure correct operation. | ||
41 | */ | ||
42 | #if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) | ||
43 | #define tlb_fast_mode(tlb) 0 | ||
44 | #define FREE_PTE_NR 500 | ||
45 | #else | ||
46 | #define tlb_fast_mode(tlb) 1 | ||
47 | #define FREE_PTE_NR 0 | ||
48 | #endif | ||
31 | 49 | ||
32 | /* | 50 | /* |
33 | * TLB handling. This allows us to remove pages from the page | 51 | * TLB handling. This allows us to remove pages from the page |
@@ -36,12 +54,58 @@ | |||
36 | struct mmu_gather { | 54 | struct mmu_gather { |
37 | struct mm_struct *mm; | 55 | struct mm_struct *mm; |
38 | unsigned int fullmm; | 56 | unsigned int fullmm; |
57 | struct vm_area_struct *vma; | ||
39 | unsigned long range_start; | 58 | unsigned long range_start; |
40 | unsigned long range_end; | 59 | unsigned long range_end; |
60 | unsigned int nr; | ||
61 | struct page *pages[FREE_PTE_NR]; | ||
41 | }; | 62 | }; |
42 | 63 | ||
43 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | 64 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); |
44 | 65 | ||
66 | /* | ||
67 | * This is unnecessarily complex. There's three ways the TLB shootdown | ||
68 | * code is used: | ||
69 | * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). | ||
70 | * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. | ||
71 | * tlb->vma will be non-NULL. | ||
72 | * 2. Unmapping all vmas. See exit_mmap(). | ||
73 | * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. | ||
74 | * tlb->vma will be non-NULL. Additionally, page tables will be freed. | ||
75 | * 3. Unmapping argument pages. See shift_arg_pages(). | ||
76 | * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. | ||
77 | * tlb->vma will be NULL. | ||
78 | */ | ||
79 | static inline void tlb_flush(struct mmu_gather *tlb) | ||
80 | { | ||
81 | if (tlb->fullmm || !tlb->vma) | ||
82 | flush_tlb_mm(tlb->mm); | ||
83 | else if (tlb->range_end > 0) { | ||
84 | flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); | ||
85 | tlb->range_start = TASK_SIZE; | ||
86 | tlb->range_end = 0; | ||
87 | } | ||
88 | } | ||
89 | |||
90 | static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) | ||
91 | { | ||
92 | if (!tlb->fullmm) { | ||
93 | if (addr < tlb->range_start) | ||
94 | tlb->range_start = addr; | ||
95 | if (addr + PAGE_SIZE > tlb->range_end) | ||
96 | tlb->range_end = addr + PAGE_SIZE; | ||
97 | } | ||
98 | } | ||
99 | |||
100 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) | ||
101 | { | ||
102 | tlb_flush(tlb); | ||
103 | if (!tlb_fast_mode(tlb)) { | ||
104 | free_pages_and_swap_cache(tlb->pages, tlb->nr); | ||
105 | tlb->nr = 0; | ||
106 | } | ||
107 | } | ||
108 | |||
45 | static inline struct mmu_gather * | 109 | static inline struct mmu_gather * |
46 | tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | 110 | tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) |
47 | { | 111 | { |
@@ -49,6 +113,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | |||
49 | 113 | ||
50 | tlb->mm = mm; | 114 | tlb->mm = mm; |
51 | tlb->fullmm = full_mm_flush; | 115 | tlb->fullmm = full_mm_flush; |
116 | tlb->vma = NULL; | ||
117 | tlb->nr = 0; | ||
52 | 118 | ||
53 | return tlb; | 119 | return tlb; |
54 | } | 120 | } |
@@ -56,8 +122,7 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | |||
56 | static inline void | 122 | static inline void |
57 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | 123 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) |
58 | { | 124 | { |
59 | if (tlb->fullmm) | 125 | tlb_flush_mmu(tlb); |
60 | flush_tlb_mm(tlb->mm); | ||
61 | 126 | ||
62 | /* keep the page table cache within bounds */ | 127 | /* keep the page table cache within bounds */ |
63 | check_pgt_cache(); | 128 | check_pgt_cache(); |
@@ -71,12 +136,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | |||
71 | static inline void | 136 | static inline void |
72 | tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) | 137 | tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) |
73 | { | 138 | { |
74 | if (!tlb->fullmm) { | 139 | tlb_add_flush(tlb, addr); |
75 | if (addr < tlb->range_start) | ||
76 | tlb->range_start = addr; | ||
77 | if (addr + PAGE_SIZE > tlb->range_end) | ||
78 | tlb->range_end = addr + PAGE_SIZE; | ||
79 | } | ||
80 | } | 140 | } |
81 | 141 | ||
82 | /* | 142 | /* |
@@ -89,6 +149,7 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |||
89 | { | 149 | { |
90 | if (!tlb->fullmm) { | 150 | if (!tlb->fullmm) { |
91 | flush_cache_range(vma, vma->vm_start, vma->vm_end); | 151 | flush_cache_range(vma, vma->vm_start, vma->vm_end); |
152 | tlb->vma = vma; | ||
92 | tlb->range_start = TASK_SIZE; | 153 | tlb->range_start = TASK_SIZE; |
93 | tlb->range_end = 0; | 154 | tlb->range_end = 0; |
94 | } | 155 | } |
@@ -97,12 +158,30 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |||
97 | static inline void | 158 | static inline void |
98 | tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | 159 | tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) |
99 | { | 160 | { |
100 | if (!tlb->fullmm && tlb->range_end > 0) | 161 | if (!tlb->fullmm) |
101 | flush_tlb_range(vma, tlb->range_start, tlb->range_end); | 162 | tlb_flush(tlb); |
163 | } | ||
164 | |||
165 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | ||
166 | { | ||
167 | if (tlb_fast_mode(tlb)) { | ||
168 | free_page_and_swap_cache(page); | ||
169 | } else { | ||
170 | tlb->pages[tlb->nr++] = page; | ||
171 | if (tlb->nr >= FREE_PTE_NR) | ||
172 | tlb_flush_mmu(tlb); | ||
173 | } | ||
174 | } | ||
175 | |||
176 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | ||
177 | unsigned long addr) | ||
178 | { | ||
179 | pgtable_page_dtor(pte); | ||
180 | tlb_add_flush(tlb, addr); | ||
181 | tlb_remove_page(tlb, pte); | ||
102 | } | 182 | } |
103 | 183 | ||
104 | #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) | 184 | #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) |
105 | #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) | ||
106 | #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) | 185 | #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) |
107 | 186 | ||
108 | #define tlb_migrate_finish(mm) do { } while (0) | 187 | #define tlb_migrate_finish(mm) do { } while (0) |
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index ce7378ea15a2..d2005de383b8 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h | |||
@@ -10,12 +10,7 @@ | |||
10 | #ifndef _ASMARM_TLBFLUSH_H | 10 | #ifndef _ASMARM_TLBFLUSH_H |
11 | #define _ASMARM_TLBFLUSH_H | 11 | #define _ASMARM_TLBFLUSH_H |
12 | 12 | ||
13 | 13 | #ifdef CONFIG_MMU | |
14 | #ifndef CONFIG_MMU | ||
15 | |||
16 | #define tlb_flush(tlb) ((void) tlb) | ||
17 | |||
18 | #else /* CONFIG_MMU */ | ||
19 | 14 | ||
20 | #include <asm/glue.h> | 15 | #include <asm/glue.h> |
21 | 16 | ||
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index e71d6ff8d104..60843eb0f61c 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h | |||
@@ -28,15 +28,14 @@ | |||
28 | #define tls_emu 1 | 28 | #define tls_emu 1 |
29 | #define has_tls_reg 1 | 29 | #define has_tls_reg 1 |
30 | #define set_tls set_tls_none | 30 | #define set_tls set_tls_none |
31 | #elif __LINUX_ARM_ARCH__ >= 7 || \ | 31 | #elif defined(CONFIG_CPU_V6) |
32 | (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) | ||
33 | #define tls_emu 0 | ||
34 | #define has_tls_reg 1 | ||
35 | #define set_tls set_tls_v6k | ||
36 | #elif __LINUX_ARM_ARCH__ == 6 | ||
37 | #define tls_emu 0 | 32 | #define tls_emu 0 |
38 | #define has_tls_reg (elf_hwcap & HWCAP_TLS) | 33 | #define has_tls_reg (elf_hwcap & HWCAP_TLS) |
39 | #define set_tls set_tls_v6 | 34 | #define set_tls set_tls_v6 |
35 | #elif defined(CONFIG_CPU_32v6K) | ||
36 | #define tls_emu 0 | ||
37 | #define has_tls_reg 1 | ||
38 | #define set_tls set_tls_v6k | ||
40 | #else | 39 | #else |
41 | #define tls_emu 0 | 40 | #define tls_emu 0 |
42 | #define has_tls_reg 0 | 41 | #define has_tls_reg 0 |
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h index 1b960d5ef6a5..f90756dc16dc 100644 --- a/arch/arm/include/asm/traps.h +++ b/arch/arm/include/asm/traps.h | |||
@@ -45,6 +45,7 @@ static inline int in_exception_text(unsigned long ptr) | |||
45 | 45 | ||
46 | extern void __init early_trap_init(void); | 46 | extern void __init early_trap_init(void); |
47 | extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); | 47 | extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); |
48 | extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs); | ||
48 | 49 | ||
49 | extern void *vectors_page; | 50 | extern void *vectors_page; |
50 | 51 | ||
diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/asm/types.h index 345df01534a4..48192ac3a23a 100644 --- a/arch/arm/include/asm/types.h +++ b/arch/arm/include/asm/types.h | |||
@@ -16,15 +16,6 @@ typedef unsigned short umode_t; | |||
16 | 16 | ||
17 | #define BITS_PER_LONG 32 | 17 | #define BITS_PER_LONG 32 |
18 | 18 | ||
19 | #ifndef __ASSEMBLY__ | ||
20 | |||
21 | /* Dma addresses are 32-bits wide. */ | ||
22 | |||
23 | typedef u32 dma_addr_t; | ||
24 | typedef u32 dma64_addr_t; | ||
25 | |||
26 | #endif /* __ASSEMBLY__ */ | ||
27 | |||
28 | #endif /* __KERNEL__ */ | 19 | #endif /* __KERNEL__ */ |
29 | 20 | ||
30 | #endif | 21 | #endif |
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h index 05ac4b06876a..35917b3a97f9 100644 --- a/arch/arm/include/asm/user.h +++ b/arch/arm/include/asm/user.h | |||
@@ -71,7 +71,7 @@ struct user{ | |||
71 | /* the registers. */ | 71 | /* the registers. */ |
72 | unsigned long magic; /* To uniquely identify a core file */ | 72 | unsigned long magic; /* To uniquely identify a core file */ |
73 | char u_comm[32]; /* User command that was responsible */ | 73 | char u_comm[32]; /* User command that was responsible */ |
74 | int u_debugreg[8]; | 74 | int u_debugreg[8]; /* No longer used */ |
75 | struct user_fp u_fp; /* FP state */ | 75 | struct user_fp u_fp; /* FP state */ |
76 | struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */ | 76 | struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */ |
77 | /* the FP registers. */ | 77 | /* the FP registers. */ |