aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /arch/arm/include
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/asm-offsets.h1
-rw-r--r--arch/arm/include/asm/assembler.h12
-rw-r--r--arch/arm/include/asm/atomic.h228
-rw-r--r--arch/arm/include/asm/cacheflush.h157
-rw-r--r--arch/arm/include/asm/clkdev.h4
-rw-r--r--arch/arm/include/asm/cpu.h1
-rw-r--r--arch/arm/include/asm/dma-mapping.h111
-rw-r--r--arch/arm/include/asm/dma.h4
-rw-r--r--arch/arm/include/asm/elf.h4
-rw-r--r--arch/arm/include/asm/entry-macro-vic2.S57
-rw-r--r--arch/arm/include/asm/futex.h16
-rw-r--r--arch/arm/include/asm/hardware/cache-tauros2.h11
-rw-r--r--arch/arm/include/asm/hardware/coresight.h165
-rw-r--r--arch/arm/include/asm/hardware/iop3xx-adma.h12
-rw-r--r--arch/arm/include/asm/hardware/iop3xx.h18
-rw-r--r--arch/arm/include/asm/hardware/it8152.h12
-rw-r--r--arch/arm/include/asm/hardware/locomo.h4
-rw-r--r--arch/arm/include/asm/hardware/sa1111.h4
-rw-r--r--arch/arm/include/asm/highmem.h15
-rw-r--r--arch/arm/include/asm/io.h11
-rw-r--r--arch/arm/include/asm/irq.h1
-rw-r--r--arch/arm/include/asm/kmap_types.h1
-rw-r--r--arch/arm/include/asm/mach-types.h1
-rw-r--r--arch/arm/include/asm/mach/irq.h4
-rw-r--r--arch/arm/include/asm/mach/time.h8
-rw-r--r--arch/arm/include/asm/memory.h39
-rw-r--r--arch/arm/include/asm/mman.h3
-rw-r--r--arch/arm/include/asm/mmu.h1
-rw-r--r--arch/arm/include/asm/mmu_context.h15
-rw-r--r--arch/arm/include/asm/outercache.h75
-rw-r--r--arch/arm/include/asm/page.h7
-rw-r--r--arch/arm/include/asm/pci.h11
-rw-r--r--arch/arm/include/asm/perf_event.h31
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h5
-rw-r--r--arch/arm/include/asm/pgtable.h14
-rw-r--r--arch/arm/include/asm/pmu.h75
-rw-r--r--arch/arm/include/asm/proc-fns.h374
-rw-r--r--arch/arm/include/asm/ptrace.h8
-rw-r--r--arch/arm/include/asm/setup.h12
-rw-r--r--arch/arm/include/asm/smp_plat.h5
-rw-r--r--arch/arm/include/asm/smp_twd.h17
-rw-r--r--arch/arm/include/asm/socket.h2
-rw-r--r--arch/arm/include/asm/spinlock.h76
-rw-r--r--arch/arm/include/asm/spinlock_types.h8
-rw-r--r--arch/arm/include/asm/swab.h19
-rw-r--r--arch/arm/include/asm/system.h20
-rw-r--r--arch/arm/include/asm/thread_info.h3
-rw-r--r--arch/arm/include/asm/thread_notify.h2
-rw-r--r--arch/arm/include/asm/tlbflush.h32
-rw-r--r--arch/arm/include/asm/uaccess.h40
-rw-r--r--arch/arm/include/asm/ucontext.h23
-rw-r--r--arch/arm/include/asm/unistd.h4
-rw-r--r--arch/arm/include/asm/user.h12
53 files changed, 1326 insertions, 469 deletions
diff --git a/arch/arm/include/asm/asm-offsets.h b/arch/arm/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/arm/include/asm/asm-offsets.h
@@ -0,0 +1 @@
#include <generated/asm-offsets.h>
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 00f46d9ce299..6e8f05c8a1c8 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -149,10 +149,10 @@
149 149
150#define USER(x...) \ 150#define USER(x...) \
1519999: x; \ 1519999: x; \
152 .section __ex_table,"a"; \ 152 .pushsection __ex_table,"a"; \
153 .align 3; \ 153 .align 3; \
154 .long 9999b,9001f; \ 154 .long 9999b,9001f; \
155 .previous 155 .popsection
156 156
157/* 157/*
158 * SMP data memory barrier 158 * SMP data memory barrier
@@ -193,10 +193,10 @@
193 .error "Unsupported inc macro argument" 193 .error "Unsupported inc macro argument"
194 .endif 194 .endif
195 195
196 .section __ex_table,"a" 196 .pushsection __ex_table,"a"
197 .align 3 197 .align 3
198 .long 9999b, \abort 198 .long 9999b, \abort
199 .previous 199 .popsection
200 .endm 200 .endm
201 201
202 .macro usracc, instr, reg, ptr, inc, cond, rept, abort 202 .macro usracc, instr, reg, ptr, inc, cond, rept, abort
@@ -234,10 +234,10 @@
234 .error "Unsupported inc macro argument" 234 .error "Unsupported inc macro argument"
235 .endif 235 .endif
236 236
237 .section __ex_table,"a" 237 .pushsection __ex_table,"a"
238 .align 3 238 .align 3
239 .long 9999b, \abort 239 .long 9999b, \abort
240 .previous 240 .popsection
241 .endr 241 .endr
242 .endm 242 .endm
243 243
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index d0daeab2234e..e8ddec2cb158 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -235,6 +235,234 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
235#define smp_mb__before_atomic_inc() smp_mb() 235#define smp_mb__before_atomic_inc() smp_mb()
236#define smp_mb__after_atomic_inc() smp_mb() 236#define smp_mb__after_atomic_inc() smp_mb()
237 237
238#ifndef CONFIG_GENERIC_ATOMIC64
239typedef struct {
240 u64 __aligned(8) counter;
241} atomic64_t;
242
243#define ATOMIC64_INIT(i) { (i) }
244
245static inline u64 atomic64_read(atomic64_t *v)
246{
247 u64 result;
248
249 __asm__ __volatile__("@ atomic64_read\n"
250" ldrexd %0, %H0, [%1]"
251 : "=&r" (result)
252 : "r" (&v->counter)
253 );
254
255 return result;
256}
257
258static inline void atomic64_set(atomic64_t *v, u64 i)
259{
260 u64 tmp;
261
262 __asm__ __volatile__("@ atomic64_set\n"
263"1: ldrexd %0, %H0, [%1]\n"
264" strexd %0, %2, %H2, [%1]\n"
265" teq %0, #0\n"
266" bne 1b"
267 : "=&r" (tmp)
268 : "r" (&v->counter), "r" (i)
269 : "cc");
270}
271
272static inline void atomic64_add(u64 i, atomic64_t *v)
273{
274 u64 result;
275 unsigned long tmp;
276
277 __asm__ __volatile__("@ atomic64_add\n"
278"1: ldrexd %0, %H0, [%2]\n"
279" adds %0, %0, %3\n"
280" adc %H0, %H0, %H3\n"
281" strexd %1, %0, %H0, [%2]\n"
282" teq %1, #0\n"
283" bne 1b"
284 : "=&r" (result), "=&r" (tmp)
285 : "r" (&v->counter), "r" (i)
286 : "cc");
287}
288
289static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
290{
291 u64 result;
292 unsigned long tmp;
293
294 smp_mb();
295
296 __asm__ __volatile__("@ atomic64_add_return\n"
297"1: ldrexd %0, %H0, [%2]\n"
298" adds %0, %0, %3\n"
299" adc %H0, %H0, %H3\n"
300" strexd %1, %0, %H0, [%2]\n"
301" teq %1, #0\n"
302" bne 1b"
303 : "=&r" (result), "=&r" (tmp)
304 : "r" (&v->counter), "r" (i)
305 : "cc");
306
307 smp_mb();
308
309 return result;
310}
311
312static inline void atomic64_sub(u64 i, atomic64_t *v)
313{
314 u64 result;
315 unsigned long tmp;
316
317 __asm__ __volatile__("@ atomic64_sub\n"
318"1: ldrexd %0, %H0, [%2]\n"
319" subs %0, %0, %3\n"
320" sbc %H0, %H0, %H3\n"
321" strexd %1, %0, %H0, [%2]\n"
322" teq %1, #0\n"
323" bne 1b"
324 : "=&r" (result), "=&r" (tmp)
325 : "r" (&v->counter), "r" (i)
326 : "cc");
327}
328
329static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
330{
331 u64 result;
332 unsigned long tmp;
333
334 smp_mb();
335
336 __asm__ __volatile__("@ atomic64_sub_return\n"
337"1: ldrexd %0, %H0, [%2]\n"
338" subs %0, %0, %3\n"
339" sbc %H0, %H0, %H3\n"
340" strexd %1, %0, %H0, [%2]\n"
341" teq %1, #0\n"
342" bne 1b"
343 : "=&r" (result), "=&r" (tmp)
344 : "r" (&v->counter), "r" (i)
345 : "cc");
346
347 smp_mb();
348
349 return result;
350}
351
352static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
353{
354 u64 oldval;
355 unsigned long res;
356
357 smp_mb();
358
359 do {
360 __asm__ __volatile__("@ atomic64_cmpxchg\n"
361 "ldrexd %1, %H1, [%2]\n"
362 "mov %0, #0\n"
363 "teq %1, %3\n"
364 "teqeq %H1, %H3\n"
365 "strexdeq %0, %4, %H4, [%2]"
366 : "=&r" (res), "=&r" (oldval)
367 : "r" (&ptr->counter), "r" (old), "r" (new)
368 : "cc");
369 } while (res);
370
371 smp_mb();
372
373 return oldval;
374}
375
376static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
377{
378 u64 result;
379 unsigned long tmp;
380
381 smp_mb();
382
383 __asm__ __volatile__("@ atomic64_xchg\n"
384"1: ldrexd %0, %H0, [%2]\n"
385" strexd %1, %3, %H3, [%2]\n"
386" teq %1, #0\n"
387" bne 1b"
388 : "=&r" (result), "=&r" (tmp)
389 : "r" (&ptr->counter), "r" (new)
390 : "cc");
391
392 smp_mb();
393
394 return result;
395}
396
397static inline u64 atomic64_dec_if_positive(atomic64_t *v)
398{
399 u64 result;
400 unsigned long tmp;
401
402 smp_mb();
403
404 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
405"1: ldrexd %0, %H0, [%2]\n"
406" subs %0, %0, #1\n"
407" sbc %H0, %H0, #0\n"
408" teq %H0, #0\n"
409" bmi 2f\n"
410" strexd %1, %0, %H0, [%2]\n"
411" teq %1, #0\n"
412" bne 1b\n"
413"2:"
414 : "=&r" (result), "=&r" (tmp)
415 : "r" (&v->counter)
416 : "cc");
417
418 smp_mb();
419
420 return result;
421}
422
423static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
424{
425 u64 val;
426 unsigned long tmp;
427 int ret = 1;
428
429 smp_mb();
430
431 __asm__ __volatile__("@ atomic64_add_unless\n"
432"1: ldrexd %0, %H0, [%3]\n"
433" teq %0, %4\n"
434" teqeq %H0, %H4\n"
435" moveq %1, #0\n"
436" beq 2f\n"
437" adds %0, %0, %5\n"
438" adc %H0, %H0, %H5\n"
439" strexd %2, %0, %H0, [%3]\n"
440" teq %2, #0\n"
441" bne 1b\n"
442"2:"
443 : "=&r" (val), "=&r" (ret), "=&r" (tmp)
444 : "r" (&v->counter), "r" (u), "r" (a)
445 : "cc");
446
447 if (ret)
448 smp_mb();
449
450 return ret;
451}
452
453#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
454#define atomic64_inc(v) atomic64_add(1LL, (v))
455#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
456#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
457#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
458#define atomic64_dec(v) atomic64_sub(1LL, (v))
459#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
460#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
461#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
462
463#else /* !CONFIG_GENERIC_ATOMIC64 */
464#include <asm-generic/atomic64.h>
465#endif
238#include <asm-generic/atomic-long.h> 466#include <asm-generic/atomic-long.h>
239#endif 467#endif
240#endif 468#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 3d0cdd21b882..4656a24058d2 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -15,6 +15,7 @@
15#include <asm/glue.h> 15#include <asm/glue.h>
16#include <asm/shmparam.h> 16#include <asm/shmparam.h>
17#include <asm/cachetype.h> 17#include <asm/cachetype.h>
18#include <asm/outercache.h>
18 19
19#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) 20#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
20 21
@@ -42,7 +43,8 @@
42#endif 43#endif
43 44
44#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ 45#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
45 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) 46 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
47 defined(CONFIG_CPU_ARM1026)
46# define MULTI_CACHE 1 48# define MULTI_CACHE 1
47#endif 49#endif
48 50
@@ -154,16 +156,16 @@
154 * Please note that the implementation of these, and the required 156 * Please note that the implementation of these, and the required
155 * effects are cache-type (VIVT/VIPT/PIPT) specific. 157 * effects are cache-type (VIVT/VIPT/PIPT) specific.
156 * 158 *
157 * flush_cache_kern_all() 159 * flush_kern_all()
158 * 160 *
159 * Unconditionally clean and invalidate the entire cache. 161 * Unconditionally clean and invalidate the entire cache.
160 * 162 *
161 * flush_cache_user_mm(mm) 163 * flush_user_all()
162 * 164 *
163 * Clean and invalidate all user space cache entries 165 * Clean and invalidate all user space cache entries
164 * before a change of page tables. 166 * before a change of page tables.
165 * 167 *
166 * flush_cache_user_range(start, end, flags) 168 * flush_user_range(start, end, flags)
167 * 169 *
168 * Clean and invalidate a range of cache entries in the 170 * Clean and invalidate a range of cache entries in the
169 * specified address space before a change of page tables. 171 * specified address space before a change of page tables.
@@ -179,23 +181,22 @@
179 * - start - virtual start address 181 * - start - virtual start address
180 * - end - virtual end address 182 * - end - virtual end address
181 * 183 *
182 * DMA Cache Coherency 184 * coherent_user_range(start, end)
183 * ===================
184 * 185 *
185 * dma_inv_range(start, end) 186 * Ensure coherency between the Icache and the Dcache in the
186 * 187 * region described by start, end. If you have non-snooping
187 * Invalidate (discard) the specified virtual address range. 188 * Harvard caches, you need to implement this function.
188 * May not write back any entries. If 'start' or 'end'
189 * are not cache line aligned, those lines must be written
190 * back.
191 * - start - virtual start address 189 * - start - virtual start address
192 * - end - virtual end address 190 * - end - virtual end address
193 * 191 *
194 * dma_clean_range(start, end) 192 * flush_kern_dcache_area(kaddr, size)
195 * 193 *
196 * Clean (write back) the specified virtual address range. 194 * Ensure that the data held in page is written back.
197 * - start - virtual start address 195 * - kaddr - page address
198 * - end - virtual end address 196 * - size - region size
197 *
198 * DMA Cache Coherency
199 * ===================
199 * 200 *
200 * dma_flush_range(start, end) 201 * dma_flush_range(start, end)
201 * 202 *
@@ -211,17 +212,12 @@ struct cpu_cache_fns {
211 212
212 void (*coherent_kern_range)(unsigned long, unsigned long); 213 void (*coherent_kern_range)(unsigned long, unsigned long);
213 void (*coherent_user_range)(unsigned long, unsigned long); 214 void (*coherent_user_range)(unsigned long, unsigned long);
214 void (*flush_kern_dcache_page)(void *); 215 void (*flush_kern_dcache_area)(void *, size_t);
215 216
216 void (*dma_inv_range)(const void *, const void *); 217 void (*dma_map_area)(const void *, size_t, int);
217 void (*dma_clean_range)(const void *, const void *); 218 void (*dma_unmap_area)(const void *, size_t, int);
218 void (*dma_flush_range)(const void *, const void *);
219};
220 219
221struct outer_cache_fns { 220 void (*dma_flush_range)(const void *, const void *);
222 void (*inv_range)(unsigned long, unsigned long);
223 void (*clean_range)(unsigned long, unsigned long);
224 void (*flush_range)(unsigned long, unsigned long);
225}; 221};
226 222
227/* 223/*
@@ -236,7 +232,7 @@ extern struct cpu_cache_fns cpu_cache;
236#define __cpuc_flush_user_range cpu_cache.flush_user_range 232#define __cpuc_flush_user_range cpu_cache.flush_user_range
237#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range 233#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
238#define __cpuc_coherent_user_range cpu_cache.coherent_user_range 234#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
239#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page 235#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
240 236
241/* 237/*
242 * These are private to the dma-mapping API. Do not use directly. 238 * These are private to the dma-mapping API. Do not use directly.
@@ -244,8 +240,8 @@ extern struct cpu_cache_fns cpu_cache;
244 * is visible to DMA, or data written by DMA to system memory is 240 * is visible to DMA, or data written by DMA to system memory is
245 * visible to the CPU. 241 * visible to the CPU.
246 */ 242 */
247#define dmac_inv_range cpu_cache.dma_inv_range 243#define dmac_map_area cpu_cache.dma_map_area
248#define dmac_clean_range cpu_cache.dma_clean_range 244#define dmac_unmap_area cpu_cache.dma_unmap_area
249#define dmac_flush_range cpu_cache.dma_flush_range 245#define dmac_flush_range cpu_cache.dma_flush_range
250 246
251#else 247#else
@@ -255,14 +251,14 @@ extern struct cpu_cache_fns cpu_cache;
255#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) 251#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
256#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) 252#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
257#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) 253#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
258#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) 254#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
259 255
260extern void __cpuc_flush_kern_all(void); 256extern void __cpuc_flush_kern_all(void);
261extern void __cpuc_flush_user_all(void); 257extern void __cpuc_flush_user_all(void);
262extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); 258extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
263extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); 259extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
264extern void __cpuc_coherent_user_range(unsigned long, unsigned long); 260extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
265extern void __cpuc_flush_dcache_page(void *); 261extern void __cpuc_flush_dcache_area(void *, size_t);
266 262
267/* 263/*
268 * These are private to the dma-mapping API. Do not use directly. 264 * These are private to the dma-mapping API. Do not use directly.
@@ -270,58 +266,23 @@ extern void __cpuc_flush_dcache_page(void *);
270 * is visible to DMA, or data written by DMA to system memory is 266 * is visible to DMA, or data written by DMA to system memory is
271 * visible to the CPU. 267 * visible to the CPU.
272 */ 268 */
273#define dmac_inv_range __glue(_CACHE,_dma_inv_range) 269#define dmac_map_area __glue(_CACHE,_dma_map_area)
274#define dmac_clean_range __glue(_CACHE,_dma_clean_range) 270#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
275#define dmac_flush_range __glue(_CACHE,_dma_flush_range) 271#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
276 272
277extern void dmac_inv_range(const void *, const void *); 273extern void dmac_map_area(const void *, size_t, int);
278extern void dmac_clean_range(const void *, const void *); 274extern void dmac_unmap_area(const void *, size_t, int);
279extern void dmac_flush_range(const void *, const void *); 275extern void dmac_flush_range(const void *, const void *);
280 276
281#endif 277#endif
282 278
283#ifdef CONFIG_OUTER_CACHE
284
285extern struct outer_cache_fns outer_cache;
286
287static inline void outer_inv_range(unsigned long start, unsigned long end)
288{
289 if (outer_cache.inv_range)
290 outer_cache.inv_range(start, end);
291}
292static inline void outer_clean_range(unsigned long start, unsigned long end)
293{
294 if (outer_cache.clean_range)
295 outer_cache.clean_range(start, end);
296}
297static inline void outer_flush_range(unsigned long start, unsigned long end)
298{
299 if (outer_cache.flush_range)
300 outer_cache.flush_range(start, end);
301}
302
303#else
304
305static inline void outer_inv_range(unsigned long start, unsigned long end)
306{ }
307static inline void outer_clean_range(unsigned long start, unsigned long end)
308{ }
309static inline void outer_flush_range(unsigned long start, unsigned long end)
310{ }
311
312#endif
313
314/* 279/*
315 * Copy user data from/to a page which is mapped into a different 280 * Copy user data from/to a page which is mapped into a different
316 * processes address space. Really, we want to allow our "user 281 * processes address space. Really, we want to allow our "user
317 * space" model to handle this. 282 * space" model to handle this.
318 */ 283 */
319#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 284extern void copy_to_user_page(struct vm_area_struct *, struct page *,
320 do { \ 285 unsigned long, void *, const void *, unsigned long);
321 memcpy(dst, src, len); \
322 flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
323 } while (0)
324
325#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 286#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
326 do { \ 287 do { \
327 memcpy(dst, src, len); \ 288 memcpy(dst, src, len); \
@@ -331,15 +292,15 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
331 * Convert calls to our calling convention. 292 * Convert calls to our calling convention.
332 */ 293 */
333#define flush_cache_all() __cpuc_flush_kern_all() 294#define flush_cache_all() __cpuc_flush_kern_all()
334#ifndef CONFIG_CPU_CACHE_VIPT 295
335static inline void flush_cache_mm(struct mm_struct *mm) 296static inline void vivt_flush_cache_mm(struct mm_struct *mm)
336{ 297{
337 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) 298 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
338 __cpuc_flush_user_all(); 299 __cpuc_flush_user_all();
339} 300}
340 301
341static inline void 302static inline void
342flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 303vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
343{ 304{
344 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) 305 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
345 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), 306 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
@@ -347,7 +308,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
347} 308}
348 309
349static inline void 310static inline void
350flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 311vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
351{ 312{
352 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 313 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
353 unsigned long addr = user_addr & PAGE_MASK; 314 unsigned long addr = user_addr & PAGE_MASK;
@@ -355,23 +316,17 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned l
355 } 316 }
356} 317}
357 318
358static inline void 319#ifndef CONFIG_CPU_CACHE_VIPT
359flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 320#define flush_cache_mm(mm) \
360 unsigned long uaddr, void *kaddr, 321 vivt_flush_cache_mm(mm)
361 unsigned long len, int write) 322#define flush_cache_range(vma,start,end) \
362{ 323 vivt_flush_cache_range(vma,start,end)
363 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 324#define flush_cache_page(vma,addr,pfn) \
364 unsigned long addr = (unsigned long)kaddr; 325 vivt_flush_cache_page(vma,addr,pfn)
365 __cpuc_coherent_kern_range(addr, addr + len);
366 }
367}
368#else 326#else
369extern void flush_cache_mm(struct mm_struct *mm); 327extern void flush_cache_mm(struct mm_struct *mm);
370extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 328extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
371extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); 329extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
372extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
373 unsigned long uaddr, void *kaddr,
374 unsigned long len, int write);
375#endif 330#endif
376 331
377#define flush_cache_dup_mm(mm) flush_cache_mm(mm) 332#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
@@ -408,21 +363,34 @@ extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
408 * about to change to user space. This is the same method as used on SPARC64. 363 * about to change to user space. This is the same method as used on SPARC64.
409 * See update_mmu_cache for the user space part. 364 * See update_mmu_cache for the user space part.
410 */ 365 */
366#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
411extern void flush_dcache_page(struct page *); 367extern void flush_dcache_page(struct page *);
412 368
413extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
414
415static inline void __flush_icache_all(void) 369static inline void __flush_icache_all(void)
416{ 370{
417#ifdef CONFIG_ARM_ERRATA_411920 371#ifdef CONFIG_ARM_ERRATA_411920
418 extern void v6_icache_inval_all(void); 372 extern void v6_icache_inval_all(void);
419 v6_icache_inval_all(); 373 v6_icache_inval_all();
374#elif defined(CONFIG_SMP) && __LINUX_ARM_ARCH__ >= 7
375 asm("mcr p15, 0, %0, c7, c1, 0 @ invalidate I-cache inner shareable\n"
376 :
377 : "r" (0));
420#else 378#else
421 asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n" 379 asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
422 : 380 :
423 : "r" (0)); 381 : "r" (0));
424#endif 382#endif
425} 383}
384static inline void flush_kernel_vmap_range(void *addr, int size)
385{
386 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
387 __cpuc_flush_dcache_area(addr, (size_t)size);
388}
389static inline void invalidate_kernel_vmap_range(void *addr, int size)
390{
391 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
392 __cpuc_flush_dcache_area(addr, (size_t)size);
393}
426 394
427#define ARCH_HAS_FLUSH_ANON_PAGE 395#define ARCH_HAS_FLUSH_ANON_PAGE
428static inline void flush_anon_page(struct vm_area_struct *vma, 396static inline void flush_anon_page(struct vm_area_struct *vma,
@@ -439,7 +407,7 @@ static inline void flush_kernel_dcache_page(struct page *page)
439{ 407{
440 /* highmem pages are always flushed upon kunmap already */ 408 /* highmem pages are always flushed upon kunmap already */
441 if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page)) 409 if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
442 __cpuc_flush_dcache_page(page_address(page)); 410 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
443} 411}
444 412
445#define flush_dcache_mmap_lock(mapping) \ 413#define flush_dcache_mmap_lock(mapping) \
@@ -456,13 +424,6 @@ static inline void flush_kernel_dcache_page(struct page *page)
456 */ 424 */
457#define flush_icache_page(vma,page) do { } while (0) 425#define flush_icache_page(vma,page) do { } while (0)
458 426
459static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
460 unsigned offset, size_t size)
461{
462 const void *start = (void __force *)virt + offset;
463 dmac_inv_range(start, start + size);
464}
465
466/* 427/*
467 * flush_cache_vmap() is used when creating mappings (eg, via vmap, 428 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
468 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT 429 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h
index b6ec7c627b39..b56c1389b6fa 100644
--- a/arch/arm/include/asm/clkdev.h
+++ b/arch/arm/include/asm/clkdev.h
@@ -13,6 +13,7 @@
13#define __ASM_CLKDEV_H 13#define __ASM_CLKDEV_H
14 14
15struct clk; 15struct clk;
16struct device;
16 17
17struct clk_lookup { 18struct clk_lookup {
18 struct list_head node; 19 struct list_head node;
@@ -27,4 +28,7 @@ struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
27void clkdev_add(struct clk_lookup *cl); 28void clkdev_add(struct clk_lookup *cl);
28void clkdev_drop(struct clk_lookup *cl); 29void clkdev_drop(struct clk_lookup *cl);
29 30
31void clkdev_add_table(struct clk_lookup *, size_t);
32int clk_add_alias(const char *, const char *, char *, struct device *);
33
30#endif 34#endif
diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h
index 634b2d7c612a..793968173bef 100644
--- a/arch/arm/include/asm/cpu.h
+++ b/arch/arm/include/asm/cpu.h
@@ -11,6 +11,7 @@
11#define __ASM_ARM_CPU_H 11#define __ASM_ARM_CPU_H
12 12
13#include <linux/percpu.h> 13#include <linux/percpu.h>
14#include <linux/cpu.h>
14 15
15struct cpuinfo_arm { 16struct cpuinfo_arm {
16 struct cpu cpu; 17 struct cpu cpu;
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index ff46dfa68a97..69ce0727edb5 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -15,20 +15,15 @@
15 * must not be used by drivers. 15 * must not be used by drivers.
16 */ 16 */
17#ifndef __arch_page_to_dma 17#ifndef __arch_page_to_dma
18
19#if !defined(CONFIG_HIGHMEM)
20static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) 18static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
21{ 19{
22 return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page)); 20 return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
23} 21}
24#elif defined(__pfn_to_bus) 22
25static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) 23static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
26{ 24{
27 return (dma_addr_t)__pfn_to_bus(page_to_pfn(page)); 25 return pfn_to_page(__bus_to_pfn(addr));
28} 26}
29#else
30#error "this machine class needs to define __arch_page_to_dma to use HIGHMEM"
31#endif
32 27
33static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) 28static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
34{ 29{
@@ -45,6 +40,11 @@ static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
45 return __arch_page_to_dma(dev, page); 40 return __arch_page_to_dma(dev, page);
46} 41}
47 42
43static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
44{
45 return __arch_dma_to_page(dev, addr);
46}
47
48static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) 48static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
49{ 49{
50 return __arch_dma_to_virt(dev, addr); 50 return __arch_dma_to_virt(dev, addr);
@@ -57,18 +57,58 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
57#endif 57#endif
58 58
59/* 59/*
60 * DMA-consistent mapping functions. These allocate/free a region of 60 * The DMA API is built upon the notion of "buffer ownership". A buffer
61 * uncached, unwrite-buffered mapped memory space for use with DMA 61 * is either exclusively owned by the CPU (and therefore may be accessed
62 * devices. This is the "generic" version. The PCI specific version 62 * by it) or exclusively owned by the DMA device. These helper functions
63 * is in pci.h 63 * represent the transitions between these two ownership states.
64 *
65 * Note, however, that on later ARMs, this notion does not work due to
66 * speculative prefetches. We model our approach on the assumption that
67 * the CPU does do speculative prefetches, which means we clean caches
68 * before transfers and delay cache invalidation until transfer completion.
64 * 69 *
65 * Note: Drivers should NOT use this function directly, as it will break 70 * Private support functions: these are not part of the API and are
66 * platforms with CONFIG_DMABOUNCE. 71 * liable to change. Drivers must not use these.
67 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
68 */ 72 */
69extern void dma_cache_maint(const void *kaddr, size_t size, int rw); 73static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
70extern void dma_cache_maint_page(struct page *page, unsigned long offset, 74 enum dma_data_direction dir)
71 size_t size, int rw); 75{
76 extern void ___dma_single_cpu_to_dev(const void *, size_t,
77 enum dma_data_direction);
78
79 if (!arch_is_coherent())
80 ___dma_single_cpu_to_dev(kaddr, size, dir);
81}
82
83static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
84 enum dma_data_direction dir)
85{
86 extern void ___dma_single_dev_to_cpu(const void *, size_t,
87 enum dma_data_direction);
88
89 if (!arch_is_coherent())
90 ___dma_single_dev_to_cpu(kaddr, size, dir);
91}
92
93static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
94 size_t size, enum dma_data_direction dir)
95{
96 extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
97 size_t, enum dma_data_direction);
98
99 if (!arch_is_coherent())
100 ___dma_page_cpu_to_dev(page, off, size, dir);
101}
102
103static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
104 size_t size, enum dma_data_direction dir)
105{
106 extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
107 size_t, enum dma_data_direction);
108
109 if (!arch_is_coherent())
110 ___dma_page_dev_to_cpu(page, off, size, dir);
111}
72 112
73/* 113/*
74 * Return whether the given device DMA address mask can be supported 114 * Return whether the given device DMA address mask can be supported
@@ -88,6 +128,14 @@ static inline int dma_supported(struct device *dev, u64 mask)
88 128
89static inline int dma_set_mask(struct device *dev, u64 dma_mask) 129static inline int dma_set_mask(struct device *dev, u64 dma_mask)
90{ 130{
131#ifdef CONFIG_DMABOUNCE
132 if (dev->archdata.dmabounce) {
133 if (dma_mask >= ISA_DMA_THRESHOLD)
134 return 0;
135 else
136 return -EIO;
137 }
138#endif
91 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 139 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
92 return -EIO; 140 return -EIO;
93 141
@@ -257,9 +305,11 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
257 */ 305 */
258extern dma_addr_t dma_map_single(struct device *, void *, size_t, 306extern dma_addr_t dma_map_single(struct device *, void *, size_t,
259 enum dma_data_direction); 307 enum dma_data_direction);
308extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
309 enum dma_data_direction);
260extern dma_addr_t dma_map_page(struct device *, struct page *, 310extern dma_addr_t dma_map_page(struct device *, struct page *,
261 unsigned long, size_t, enum dma_data_direction); 311 unsigned long, size_t, enum dma_data_direction);
262extern void dma_unmap_single(struct device *, dma_addr_t, size_t, 312extern void dma_unmap_page(struct device *, dma_addr_t, size_t,
263 enum dma_data_direction); 313 enum dma_data_direction);
264 314
265/* 315/*
@@ -302,8 +352,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
302{ 352{
303 BUG_ON(!valid_dma_direction(dir)); 353 BUG_ON(!valid_dma_direction(dir));
304 354
305 if (!arch_is_coherent()) 355 __dma_single_cpu_to_dev(cpu_addr, size, dir);
306 dma_cache_maint(cpu_addr, size, dir);
307 356
308 return virt_to_dma(dev, cpu_addr); 357 return virt_to_dma(dev, cpu_addr);
309} 358}
@@ -327,8 +376,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
327{ 376{
328 BUG_ON(!valid_dma_direction(dir)); 377 BUG_ON(!valid_dma_direction(dir));
329 378
330 if (!arch_is_coherent()) 379 __dma_page_cpu_to_dev(page, offset, size, dir);
331 dma_cache_maint_page(page, offset, size, dir);
332 380
333 return page_to_dma(dev, page) + offset; 381 return page_to_dma(dev, page) + offset;
334} 382}
@@ -350,9 +398,8 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
350static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, 398static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
351 size_t size, enum dma_data_direction dir) 399 size_t size, enum dma_data_direction dir)
352{ 400{
353 /* nothing to do */ 401 __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
354} 402}
355#endif /* CONFIG_DMABOUNCE */
356 403
357/** 404/**
358 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 405 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
@@ -371,8 +418,10 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
371static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, 418static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
372 size_t size, enum dma_data_direction dir) 419 size_t size, enum dma_data_direction dir)
373{ 420{
374 dma_unmap_single(dev, handle, size, dir); 421 __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK,
422 size, dir);
375} 423}
424#endif /* CONFIG_DMABOUNCE */
376 425
377/** 426/**
378 * dma_sync_single_range_for_cpu 427 * dma_sync_single_range_for_cpu
@@ -398,7 +447,10 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
398{ 447{
399 BUG_ON(!valid_dma_direction(dir)); 448 BUG_ON(!valid_dma_direction(dir));
400 449
401 dmabounce_sync_for_cpu(dev, handle, offset, size, dir); 450 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
451 return;
452
453 __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
402} 454}
403 455
404static inline void dma_sync_single_range_for_device(struct device *dev, 456static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -410,8 +462,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
410 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) 462 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
411 return; 463 return;
412 464
413 if (!arch_is_coherent()) 465 __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
414 dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
415} 466}
416 467
417static inline void dma_sync_single_for_cpu(struct device *dev, 468static inline void dma_sync_single_for_cpu(struct device *dev,
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
index 7edf3536df24..ca51143f97f1 100644
--- a/arch/arm/include/asm/dma.h
+++ b/arch/arm/include/asm/dma.h
@@ -138,12 +138,12 @@ extern int get_dma_residue(unsigned int chan);
138#define NO_DMA 255 138#define NO_DMA 255
139#endif 139#endif
140 140
141#endif /* CONFIG_ISA_DMA_API */
142
141#ifdef CONFIG_PCI 143#ifdef CONFIG_PCI
142extern int isa_dma_bridge_buggy; 144extern int isa_dma_bridge_buggy;
143#else 145#else
144#define isa_dma_bridge_buggy (0) 146#define isa_dma_bridge_buggy (0)
145#endif 147#endif
146 148
147#endif /* CONFIG_ISA_DMA_API */
148
149#endif /* __ASM_ARM_DMA_H */ 149#endif /* __ASM_ARM_DMA_H */
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 6aac3f5bb2f3..51662feb9f1d 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -9,6 +9,8 @@
9#include <asm/ptrace.h> 9#include <asm/ptrace.h>
10#include <asm/user.h> 10#include <asm/user.h>
11 11
12struct task_struct;
13
12typedef unsigned long elf_greg_t; 14typedef unsigned long elf_greg_t;
13typedef unsigned long elf_freg_t[3]; 15typedef unsigned long elf_freg_t[3];
14 16
@@ -98,10 +100,10 @@ extern int elf_check_arch(const struct elf32_hdr *);
98extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int); 100extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
99#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk) 101#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk)
100 102
103struct task_struct;
101int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); 104int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
102#define ELF_CORE_COPY_TASK_REGS dump_task_regs 105#define ELF_CORE_COPY_TASK_REGS dump_task_regs
103 106
104#define USE_ELF_CORE_DUMP
105#define ELF_EXEC_PAGESIZE 4096 107#define ELF_EXEC_PAGESIZE 4096
106 108
107/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 109/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/arm/include/asm/entry-macro-vic2.S b/arch/arm/include/asm/entry-macro-vic2.S
new file mode 100644
index 000000000000..3ceb85e43850
--- /dev/null
+++ b/arch/arm/include/asm/entry-macro-vic2.S
@@ -0,0 +1,57 @@
1/* arch/arm/include/asm/entry-macro-vic2.S
2 *
3 * Originally arch/arm/mach-s3c6400/include/mach/entry-macro.S
4 *
5 * Copyright 2008 Openmoko, Inc.
6 * Copyright 2008 Simtec Electronics
7 * http://armlinux.simtec.co.uk/
8 * Ben Dooks <ben@simtec.co.uk>
9 *
10 * Low-level IRQ helper macros for a device with two VICs
11 *
12 * This file is licensed under the terms of the GNU General Public
13 * License version 2. This program is licensed "as is" without any
14 * warranty of any kind, whether express or implied.
15*/
16
17/* This should be included from <mach/entry-macro.S> with the necessary
18 * defines for virtual addresses and IRQ bases for the two vics.
19 *
20 * The code needs the following defined:
21 * IRQ_VIC0_BASE IRQ number of VIC0's first IRQ
22 * IRQ_VIC1_BASE IRQ number of VIC1's first IRQ
23 * VA_VIC0 Virtual address of VIC0
24 * VA_VIC1 Virtual address of VIC1
25 *
26 * Note, code assumes VIC0's virtual address is an ARM immediate constant
27 * away from VIC1.
28*/
29
30#include <asm/hardware/vic.h>
31
32 .macro disable_fiq
33 .endm
34
35 .macro get_irqnr_preamble, base, tmp
36 ldr \base, =VA_VIC0
37 .endm
38
39 .macro arch_ret_to_user, tmp1, tmp2
40 .endm
41
42 .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
43
44 @ check the vic0
45 mov \irqnr, #IRQ_VIC0_BASE + 31
46 ldr \irqstat, [ \base, # VIC_IRQ_STATUS ]
47 teq \irqstat, #0
48
49 @ otherwise try vic1
50 addeq \tmp, \base, #(VA_VIC1 - VA_VIC0)
51 addeq \irqnr, \irqnr, #(IRQ_VIC1_BASE - IRQ_VIC0_BASE)
52 ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
53 teqeq \irqstat, #0
54
55 clzne \irqstat, \irqstat
56 subne \irqnr, \irqnr, \irqstat
57 .endm
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index bfcc15929a7f..540a044153a5 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -21,14 +21,14 @@
21 "2: strt %0, [%2]\n" \ 21 "2: strt %0, [%2]\n" \
22 " mov %0, #0\n" \ 22 " mov %0, #0\n" \
23 "3:\n" \ 23 "3:\n" \
24 " .section __ex_table,\"a\"\n" \ 24 " .pushsection __ex_table,\"a\"\n" \
25 " .align 3\n" \ 25 " .align 3\n" \
26 " .long 1b, 4f, 2b, 4f\n" \ 26 " .long 1b, 4f, 2b, 4f\n" \
27 " .previous\n" \ 27 " .popsection\n" \
28 " .section .fixup,\"ax\"\n" \ 28 " .pushsection .fixup,\"ax\"\n" \
29 "4: mov %0, %4\n" \ 29 "4: mov %0, %4\n" \
30 " b 3b\n" \ 30 " b 3b\n" \
31 " .previous" \ 31 " .popsection" \
32 : "=&r" (ret), "=&r" (oldval) \ 32 : "=&r" (ret), "=&r" (oldval) \
33 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ 33 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
34 : "cc", "memory") 34 : "cc", "memory")
@@ -102,14 +102,14 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
102 " it eq @ explicit IT needed for the 2b label\n" 102 " it eq @ explicit IT needed for the 2b label\n"
103 "2: streqt %2, [%3]\n" 103 "2: streqt %2, [%3]\n"
104 "3:\n" 104 "3:\n"
105 " .section __ex_table,\"a\"\n" 105 " .pushsection __ex_table,\"a\"\n"
106 " .align 3\n" 106 " .align 3\n"
107 " .long 1b, 4f, 2b, 4f\n" 107 " .long 1b, 4f, 2b, 4f\n"
108 " .previous\n" 108 " .popsection\n"
109 " .section .fixup,\"ax\"\n" 109 " .pushsection .fixup,\"ax\"\n"
110 "4: mov %0, %4\n" 110 "4: mov %0, %4\n"
111 " b 3b\n" 111 " b 3b\n"
112 " .previous" 112 " .popsection"
113 : "=&r" (val) 113 : "=&r" (val)
114 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) 114 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
115 : "cc", "memory"); 115 : "cc", "memory");
diff --git a/arch/arm/include/asm/hardware/cache-tauros2.h b/arch/arm/include/asm/hardware/cache-tauros2.h
new file mode 100644
index 000000000000..538f17ca905b
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cache-tauros2.h
@@ -0,0 +1,11 @@
1/*
2 * arch/arm/include/asm/hardware/cache-tauros2.h
3 *
4 * Copyright (C) 2008 Marvell Semiconductor
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11extern void __init tauros2_init(void);
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
new file mode 100644
index 000000000000..f82b25d4f73e
--- /dev/null
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -0,0 +1,165 @@
1/*
2 * linux/arch/arm/include/asm/hardware/coresight.h
3 *
4 * CoreSight components' registers
5 *
6 * Copyright (C) 2009 Nokia Corporation.
7 * Alexander Shishkin
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#ifndef __ASM_HARDWARE_CORESIGHT_H
15#define __ASM_HARDWARE_CORESIGHT_H
16
17#define TRACER_ACCESSED_BIT 0
18#define TRACER_RUNNING_BIT 1
19#define TRACER_CYCLE_ACC_BIT 2
20#define TRACER_ACCESSED BIT(TRACER_ACCESSED_BIT)
21#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT)
22#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT)
23
24struct tracectx {
25 unsigned int etb_bufsz;
26 void __iomem *etb_regs;
27 void __iomem *etm_regs;
28 unsigned long flags;
29 int ncmppairs;
30 int etm_portsz;
31 struct device *dev;
32 struct clk *emu_clk;
33 struct mutex mutex;
34};
35
36#define TRACER_TIMEOUT 10000
37
38#define etm_writel(t, v, x) \
39 (__raw_writel((v), (t)->etm_regs + (x)))
40#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x)))
41
42/* CoreSight Management Registers */
43#define CSMR_LOCKACCESS 0xfb0
44#define CSMR_LOCKSTATUS 0xfb4
45#define CSMR_AUTHSTATUS 0xfb8
46#define CSMR_DEVID 0xfc8
47#define CSMR_DEVTYPE 0xfcc
48/* CoreSight Component Registers */
49#define CSCR_CLASS 0xff4
50
51#define CSCR_PRSR 0x314
52
53#define UNLOCK_MAGIC 0xc5acce55
54
55/* ETM control register, "ETM Architecture", 3.3.1 */
56#define ETMR_CTRL 0
57#define ETMCTRL_POWERDOWN 1
58#define ETMCTRL_PROGRAM (1 << 10)
59#define ETMCTRL_PORTSEL (1 << 11)
60#define ETMCTRL_DO_CONTEXTID (3 << 14)
61#define ETMCTRL_PORTMASK1 (7 << 4)
62#define ETMCTRL_PORTMASK2 (1 << 21)
63#define ETMCTRL_PORTMASK (ETMCTRL_PORTMASK1 | ETMCTRL_PORTMASK2)
64#define ETMCTRL_PORTSIZE(x) ((((x) & 7) << 4) | (!!((x) & 8)) << 21)
65#define ETMCTRL_DO_CPRT (1 << 1)
66#define ETMCTRL_DATAMASK (3 << 2)
67#define ETMCTRL_DATA_DO_DATA (1 << 2)
68#define ETMCTRL_DATA_DO_ADDR (1 << 3)
69#define ETMCTRL_DATA_DO_BOTH (ETMCTRL_DATA_DO_DATA | ETMCTRL_DATA_DO_ADDR)
70#define ETMCTRL_BRANCH_OUTPUT (1 << 8)
71#define ETMCTRL_CYCLEACCURATE (1 << 12)
72
73/* ETM configuration code register */
74#define ETMR_CONFCODE (0x04)
75
76/* ETM trace start/stop resource control register */
77#define ETMR_TRACESSCTRL (0x18)
78
79/* ETM trigger event register */
80#define ETMR_TRIGEVT (0x08)
81
82/* address access type register bits, "ETM architecture",
83 * table 3-27 */
84/* - access type */
85#define ETMAAT_IFETCH 0
86#define ETMAAT_IEXEC 1
87#define ETMAAT_IEXECPASS 2
88#define ETMAAT_IEXECFAIL 3
89#define ETMAAT_DLOADSTORE 4
90#define ETMAAT_DLOAD 5
91#define ETMAAT_DSTORE 6
92/* - comparison access size */
93#define ETMAAT_JAVA (0 << 3)
94#define ETMAAT_THUMB (1 << 3)
95#define ETMAAT_ARM (3 << 3)
96/* - data value comparison control */
97#define ETMAAT_NOVALCMP (0 << 5)
98#define ETMAAT_VALMATCH (1 << 5)
99#define ETMAAT_VALNOMATCH (3 << 5)
100/* - exact match */
101#define ETMAAT_EXACTMATCH (1 << 7)
102/* - context id comparator control */
103#define ETMAAT_IGNCONTEXTID (0 << 8)
104#define ETMAAT_VALUE1 (1 << 8)
105#define ETMAAT_VALUE2 (2 << 8)
106#define ETMAAT_VALUE3 (3 << 8)
107/* - security level control */
108#define ETMAAT_IGNSECURITY (0 << 10)
109#define ETMAAT_NSONLY (1 << 10)
110#define ETMAAT_SONLY (2 << 10)
111
112#define ETMR_COMP_VAL(x) (0x40 + (x) * 4)
113#define ETMR_COMP_ACC_TYPE(x) (0x80 + (x) * 4)
114
115/* ETM status register, "ETM Architecture", 3.3.2 */
116#define ETMR_STATUS (0x10)
117#define ETMST_OVERFLOW (1 << 0)
118#define ETMST_PROGBIT (1 << 1)
119#define ETMST_STARTSTOP (1 << 2)
120#define ETMST_TRIGGER (1 << 3)
121
122#define etm_progbit(t) (etm_readl((t), ETMR_STATUS) & ETMST_PROGBIT)
123#define etm_started(t) (etm_readl((t), ETMR_STATUS) & ETMST_STARTSTOP)
124#define etm_triggered(t) (etm_readl((t), ETMR_STATUS) & ETMST_TRIGGER)
125
126#define ETMR_TRACEENCTRL2 0x1c
127#define ETMR_TRACEENCTRL 0x24
128#define ETMTE_INCLEXCL (1 << 24)
129#define ETMR_TRACEENEVT 0x20
130#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \
131 ETMCTRL_DATA_DO_ADDR | \
132 ETMCTRL_BRANCH_OUTPUT | \
133 ETMCTRL_DO_CONTEXTID)
134
135/* ETB registers, "CoreSight Components TRM", 9.3 */
136#define ETBR_DEPTH 0x04
137#define ETBR_STATUS 0x0c
138#define ETBR_READMEM 0x10
139#define ETBR_READADDR 0x14
140#define ETBR_WRITEADDR 0x18
141#define ETBR_TRIGGERCOUNT 0x1c
142#define ETBR_CTRL 0x20
143#define ETBR_FORMATTERCTRL 0x304
144#define ETBFF_ENFTC 1
145#define ETBFF_ENFCONT (1 << 1)
146#define ETBFF_FONFLIN (1 << 4)
147#define ETBFF_MANUAL_FLUSH (1 << 6)
148#define ETBFF_TRIGIN (1 << 8)
149#define ETBFF_TRIGEVT (1 << 9)
150#define ETBFF_TRIGFL (1 << 10)
151
152#define etb_writel(t, v, x) \
153 (__raw_writel((v), (t)->etb_regs + (x)))
154#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x)))
155
156#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
157#define etm_unlock(t) \
158 do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
159
160#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
161#define etb_unlock(t) \
162 do { etb_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
163
164#endif /* __ASM_HARDWARE_CORESIGHT_H */
165
diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h
index 1a8c7279a28b..9b28f1243bdc 100644
--- a/arch/arm/include/asm/hardware/iop3xx-adma.h
+++ b/arch/arm/include/asm/hardware/iop3xx-adma.h
@@ -366,8 +366,7 @@ static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
366 slot_cnt += *slots_per_op; 366 slot_cnt += *slots_per_op;
367 } 367 }
368 368
369 if (len) 369 slot_cnt += *slots_per_op;
370 slot_cnt += *slots_per_op;
371 370
372 return slot_cnt; 371 return slot_cnt;
373} 372}
@@ -389,8 +388,7 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
389 slot_cnt += *slots_per_op; 388 slot_cnt += *slots_per_op;
390 } 389 }
391 390
392 if (len) 391 slot_cnt += *slots_per_op;
393 slot_cnt += *slots_per_op;
394 392
395 return slot_cnt; 393 return slot_cnt;
396} 394}
@@ -737,10 +735,8 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
737 i += slots_per_op; 735 i += slots_per_op;
738 } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT); 736 } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
739 737
740 if (len) { 738 iter = iop_hw_desc_slot_idx(hw_desc, i);
741 iter = iop_hw_desc_slot_idx(hw_desc, i); 739 iter->byte_count = len;
742 iter->byte_count = len;
743 }
744 } 740 }
745} 741}
746 742
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h
index 8d60ad267e3a..5daea2961d48 100644
--- a/arch/arm/include/asm/hardware/iop3xx.h
+++ b/arch/arm/include/asm/hardware/iop3xx.h
@@ -234,7 +234,13 @@ extern int iop3xx_get_init_atu(void);
234void iop3xx_map_io(void); 234void iop3xx_map_io(void);
235void iop_init_cp6_handler(void); 235void iop_init_cp6_handler(void);
236void iop_init_time(unsigned long tickrate); 236void iop_init_time(unsigned long tickrate);
237unsigned long iop_gettimeoffset(void); 237
238static inline u32 read_tmr0(void)
239{
240 u32 val;
241 asm volatile("mrc p6, 0, %0, c0, c1, 0" : "=r" (val));
242 return val;
243}
238 244
239static inline void write_tmr0(u32 val) 245static inline void write_tmr0(u32 val)
240{ 246{
@@ -253,6 +259,11 @@ static inline u32 read_tcr0(void)
253 return val; 259 return val;
254} 260}
255 261
262static inline void write_tcr0(u32 val)
263{
264 asm volatile("mcr p6, 0, %0, c2, c1, 0" : : "r" (val));
265}
266
256static inline u32 read_tcr1(void) 267static inline u32 read_tcr1(void)
257{ 268{
258 u32 val; 269 u32 val;
@@ -260,6 +271,11 @@ static inline u32 read_tcr1(void)
260 return val; 271 return val;
261} 272}
262 273
274static inline void write_tcr1(u32 val)
275{
276 asm volatile("mcr p6, 0, %0, c3, c1, 0" : : "r" (val));
277}
278
263static inline void write_trr0(u32 val) 279static inline void write_trr0(u32 val)
264{ 280{
265 asm volatile("mcr p6, 0, %0, c4, c1, 0" : : "r" (val)); 281 asm volatile("mcr p6, 0, %0, c4, c1, 0" : : "r" (val));
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
index 74b5fff7f575..6700c7fc7ebd 100644
--- a/arch/arm/include/asm/hardware/it8152.h
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -75,6 +75,18 @@ extern unsigned long it8152_base_address;
75 IT8152_PD_IRQ(1) USB (USBR) 75 IT8152_PD_IRQ(1) USB (USBR)
76 IT8152_PD_IRQ(0) Audio controller (ACR) 76 IT8152_PD_IRQ(0) Audio controller (ACR)
77 */ 77 */
78#define IT8152_IRQ(x) (IRQ_BOARD_END + (x))
79
80/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
81#define IT8152_LD_IRQ_COUNT 9
82#define IT8152_LP_IRQ_COUNT 16
83#define IT8152_PD_IRQ_COUNT 15
84
85/* Priorities: */
86#define IT8152_PD_IRQ(i) IT8152_IRQ(i)
87#define IT8152_LP_IRQ(i) (IT8152_IRQ(i) + IT8152_PD_IRQ_COUNT)
88#define IT8152_LD_IRQ(i) (IT8152_IRQ(i) + IT8152_PD_IRQ_COUNT + IT8152_LP_IRQ_COUNT)
89
78/* frequently used interrupts */ 90/* frequently used interrupts */
79#define IT8152_PCISERR IT8152_PD_IRQ(14) 91#define IT8152_PCISERR IT8152_PD_IRQ(14)
80#define IT8152_H2PTADR IT8152_PD_IRQ(13) 92#define IT8152_H2PTADR IT8152_PD_IRQ(13)
diff --git a/arch/arm/include/asm/hardware/locomo.h b/arch/arm/include/asm/hardware/locomo.h
index 954b1be991b4..74e51d6bd93f 100644
--- a/arch/arm/include/asm/hardware/locomo.h
+++ b/arch/arm/include/asm/hardware/locomo.h
@@ -214,4 +214,8 @@ void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int
214/* Frontlight control */ 214/* Frontlight control */
215void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf); 215void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf);
216 216
217struct locomo_platform_data {
218 int irq_base; /* IRQ base for cascaded on-chip IRQs */
219};
220
217#endif 221#endif
diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h
index 5da2595759e5..92ed254c175b 100644
--- a/arch/arm/include/asm/hardware/sa1111.h
+++ b/arch/arm/include/asm/hardware/sa1111.h
@@ -578,4 +578,8 @@ void sa1111_set_io_dir(struct sa1111_dev *sadev, unsigned int bits, unsigned int
578void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v); 578void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v);
579void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v); 579void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v);
580 580
581struct sa1111_platform_data {
582 int irq_base; /* base for cascaded on-chip IRQs */
583};
584
581#endif /* _ASM_ARCH_SA1111 */ 585#endif /* _ASM_ARCH_SA1111 */
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 7f36d00600b4..feb988a7ec37 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -11,7 +11,11 @@
11 11
12#define kmap_prot PAGE_KERNEL 12#define kmap_prot PAGE_KERNEL
13 13
14#define flush_cache_kmaps() flush_cache_all() 14#define flush_cache_kmaps() \
15 do { \
16 if (cache_is_vivt()) \
17 flush_cache_all(); \
18 } while (0)
15 19
16extern pte_t *pkmap_page_table; 20extern pte_t *pkmap_page_table;
17 21
@@ -21,11 +25,20 @@ extern void *kmap_high(struct page *page);
21extern void *kmap_high_get(struct page *page); 25extern void *kmap_high_get(struct page *page);
22extern void kunmap_high(struct page *page); 26extern void kunmap_high(struct page *page);
23 27
28extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte);
29extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
30
31/*
32 * The following functions are already defined by <linux/highmem.h>
33 * when CONFIG_HIGHMEM is not set.
34 */
35#ifdef CONFIG_HIGHMEM
24extern void *kmap(struct page *page); 36extern void *kmap(struct page *page);
25extern void kunmap(struct page *page); 37extern void kunmap(struct page *page);
26extern void *kmap_atomic(struct page *page, enum km_type type); 38extern void *kmap_atomic(struct page *page, enum km_type type);
27extern void kunmap_atomic(void *kvaddr, enum km_type type); 39extern void kunmap_atomic(void *kvaddr, enum km_type type);
28extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 40extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
29extern struct page *kmap_atomic_to_page(const void *ptr); 41extern struct page *kmap_atomic_to_page(const void *ptr);
42#endif
30 43
31#endif 44#endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index d2a59cfc30ce..c980156f3263 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -69,9 +69,16 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
69/* 69/*
70 * __arm_ioremap takes CPU physical address. 70 * __arm_ioremap takes CPU physical address.
71 * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page 71 * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page
72 * The _caller variety takes a __builtin_return_address(0) value for
73 * /proc/vmalloc to use - and should only be used in non-inline functions.
72 */ 74 */
73extern void __iomem * __arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); 75extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
74extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int); 76 size_t, unsigned int, void *);
77extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int,
78 void *);
79
80extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
81extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int);
75extern void __iounmap(volatile void __iomem *addr); 82extern void __iounmap(volatile void __iomem *addr);
76 83
77/* 84/*
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 328f14a8b790..237282f7c762 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -17,6 +17,7 @@
17 17
18#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
19struct irqaction; 19struct irqaction;
20struct pt_regs;
20extern void migrate_irqs(void); 21extern void migrate_irqs(void);
21 22
22extern void asm_do_IRQ(unsigned int, struct pt_regs *); 23extern void asm_do_IRQ(unsigned int, struct pt_regs *);
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index c019949a5189..c4b2ea3fbe42 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -18,6 +18,7 @@ enum km_type {
18 KM_IRQ1, 18 KM_IRQ1,
19 KM_SOFTIRQ0, 19 KM_SOFTIRQ0,
20 KM_SOFTIRQ1, 20 KM_SOFTIRQ1,
21 KM_L1_CACHE,
21 KM_L2_CACHE, 22 KM_L2_CACHE,
22 KM_TYPE_NR 23 KM_TYPE_NR
23}; 24};
diff --git a/arch/arm/include/asm/mach-types.h b/arch/arm/include/asm/mach-types.h
new file mode 100644
index 000000000000..948178cc6ba8
--- /dev/null
+++ b/arch/arm/include/asm/mach-types.h
@@ -0,0 +1 @@
#include <generated/mach-types.h>
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index acac5302e4ea..8920b2d6e3b8 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -26,9 +26,9 @@ extern int show_fiq_list(struct seq_file *, void *);
26 */ 26 */
27#define do_bad_IRQ(irq,desc) \ 27#define do_bad_IRQ(irq,desc) \
28do { \ 28do { \
29 spin_lock(&desc->lock); \ 29 raw_spin_lock(&desc->lock); \
30 handle_bad_irq(irq, desc); \ 30 handle_bad_irq(irq, desc); \
31 spin_unlock(&desc->lock); \ 31 raw_spin_unlock(&desc->lock); \
32} while(0) 32} while(0)
33 33
34#endif 34#endif
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index b2cc1fcd0400..8bffc3ff3acf 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -46,12 +46,4 @@ struct sys_timer {
46extern struct sys_timer *system_timer; 46extern struct sys_timer *system_timer;
47extern void timer_tick(void); 47extern void timer_tick(void);
48 48
49/*
50 * Kernel time keeping support.
51 */
52struct timespec;
53extern int (*set_rtc)(void);
54extern void save_time_delta(struct timespec *delta, struct timespec *rtc);
55extern void restore_time_delta(struct timespec *delta, struct timespec *rtc);
56
57#endif 49#endif
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index cefedf062138..4312ee5e3d0b 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -76,6 +76,17 @@
76 */ 76 */
77#define IOREMAP_MAX_ORDER 24 77#define IOREMAP_MAX_ORDER 24
78 78
79/*
80 * Size of DMA-consistent memory region. Must be multiple of 2M,
81 * between 2MB and 14MB inclusive.
82 */
83#ifndef CONSISTENT_DMA_SIZE
84#define CONSISTENT_DMA_SIZE SZ_2M
85#endif
86
87#define CONSISTENT_END (0xffe00000UL)
88#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
89
79#else /* CONFIG_MMU */ 90#else /* CONFIG_MMU */
80 91
81/* 92/*
@@ -93,11 +104,11 @@
93#endif 104#endif
94 105
95#ifndef PHYS_OFFSET 106#ifndef PHYS_OFFSET
96#define PHYS_OFFSET (CONFIG_DRAM_BASE) 107#define PHYS_OFFSET UL(CONFIG_DRAM_BASE)
97#endif 108#endif
98 109
99#ifndef END_MEM 110#ifndef END_MEM
100#define END_MEM (CONFIG_DRAM_BASE + CONFIG_DRAM_SIZE) 111#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
101#endif 112#endif
102 113
103#ifndef PAGE_OFFSET 114#ifndef PAGE_OFFSET
@@ -113,20 +124,14 @@
113#endif /* !CONFIG_MMU */ 124#endif /* !CONFIG_MMU */
114 125
115/* 126/*
116 * Size of DMA-consistent memory region. Must be multiple of 2M,
117 * between 2MB and 14MB inclusive.
118 */
119#ifndef CONSISTENT_DMA_SIZE
120#define CONSISTENT_DMA_SIZE SZ_2M
121#endif
122
123/*
124 * Physical vs virtual RAM address space conversion. These are 127 * Physical vs virtual RAM address space conversion. These are
125 * private definitions which should NOT be used outside memory.h 128 * private definitions which should NOT be used outside memory.h
126 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. 129 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
127 */ 130 */
131#ifndef __virt_to_phys
128#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) 132#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
129#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) 133#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
134#endif
130 135
131/* 136/*
132 * Convert a physical address to a Page Frame Number and back 137 * Convert a physical address to a Page Frame Number and back
@@ -134,6 +139,12 @@
134#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT) 139#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
135#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) 140#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
136 141
142/*
143 * Convert a page to/from a physical address
144 */
145#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
146#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
147
137#ifndef __ASSEMBLY__ 148#ifndef __ASSEMBLY__
138 149
139/* 150/*
@@ -194,7 +205,8 @@ static inline void *phys_to_virt(unsigned long x)
194#ifndef __virt_to_bus 205#ifndef __virt_to_bus
195#define __virt_to_bus __virt_to_phys 206#define __virt_to_bus __virt_to_phys
196#define __bus_to_virt __phys_to_virt 207#define __bus_to_virt __phys_to_virt
197#define __pfn_to_bus(x) ((x) << PAGE_SHIFT) 208#define __pfn_to_bus(x) __pfn_to_phys(x)
209#define __bus_to_pfn(x) __phys_to_pfn(x)
198#endif 210#endif
199 211
200static inline __deprecated unsigned long virt_to_bus(void *x) 212static inline __deprecated unsigned long virt_to_bus(void *x)
@@ -293,11 +305,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
293#endif /* !CONFIG_DISCONTIGMEM */ 305#endif /* !CONFIG_DISCONTIGMEM */
294 306
295/* 307/*
296 * For BIO. "will die". Kill me when bio_to_phys() and bvec_to_phys() die.
297 */
298#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
299
300/*
301 * Optional coherency support. Currently used only by selected 308 * Optional coherency support. Currently used only by selected
302 * Intel XSC3-based systems. 309 * Intel XSC3-based systems.
303 */ 310 */
diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h
index 8eebf89f5ab1..41f99c573b93 100644
--- a/arch/arm/include/asm/mman.h
+++ b/arch/arm/include/asm/mman.h
@@ -1 +1,4 @@
1#include <asm-generic/mman.h> 1#include <asm-generic/mman.h>
2
3#define arch_mmap_check(addr, len, flags) \
4 (((flags) & MAP_FIXED && (addr) < FIRST_USER_ADDRESS) ? -EINVAL : 0)
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index b561584d04a1..68870c776671 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -6,6 +6,7 @@
6typedef struct { 6typedef struct {
7#ifdef CONFIG_CPU_HAS_ASID 7#ifdef CONFIG_CPU_HAS_ASID
8 unsigned int id; 8 unsigned int id;
9 spinlock_t id_lock;
9#endif 10#endif
10 unsigned int kvm_seq; 11 unsigned int kvm_seq;
11} mm_context_t; 12} mm_context_t;
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index de6cefb329dd..a0b3cac0547c 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -43,12 +43,23 @@ void __check_kvm_seq(struct mm_struct *mm);
43#define ASID_FIRST_VERSION (1 << ASID_BITS) 43#define ASID_FIRST_VERSION (1 << ASID_BITS)
44 44
45extern unsigned int cpu_last_asid; 45extern unsigned int cpu_last_asid;
46#ifdef CONFIG_SMP
47DECLARE_PER_CPU(struct mm_struct *, current_mm);
48#endif
46 49
47void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); 50void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
48void __new_context(struct mm_struct *mm); 51void __new_context(struct mm_struct *mm);
49 52
50static inline void check_context(struct mm_struct *mm) 53static inline void check_context(struct mm_struct *mm)
51{ 54{
55 /*
56 * This code is executed with interrupts enabled. Therefore,
57 * mm->context.id cannot be updated to the latest ASID version
58 * on a different CPU (and condition below not triggered)
59 * without first getting an IPI to reset the context. The
60 * alternative is to take a read_lock on mm->context.id_lock
61 * (after changing its type to rwlock_t).
62 */
52 if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) 63 if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
53 __new_context(mm); 64 __new_context(mm);
54 65
@@ -108,6 +119,10 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
108 __flush_icache_all(); 119 __flush_icache_all();
109#endif 120#endif
110 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { 121 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
122#ifdef CONFIG_SMP
123 struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
124 *crt_mm = next;
125#endif
111 check_context(next); 126 check_context(next);
112 cpu_switch_mm(next->pgd, next); 127 cpu_switch_mm(next->pgd, next);
113 if (cache_is_vivt()) 128 if (cache_is_vivt())
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
new file mode 100644
index 000000000000..25f76bae57ab
--- /dev/null
+++ b/arch/arm/include/asm/outercache.h
@@ -0,0 +1,75 @@
1/*
2 * arch/arm/include/asm/outercache.h
3 *
4 * Copyright (C) 2010 ARM Ltd.
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef __ASM_OUTERCACHE_H
22#define __ASM_OUTERCACHE_H
23
24struct outer_cache_fns {
25 void (*inv_range)(unsigned long, unsigned long);
26 void (*clean_range)(unsigned long, unsigned long);
27 void (*flush_range)(unsigned long, unsigned long);
28#ifdef CONFIG_OUTER_CACHE_SYNC
29 void (*sync)(void);
30#endif
31};
32
33#ifdef CONFIG_OUTER_CACHE
34
35extern struct outer_cache_fns outer_cache;
36
37static inline void outer_inv_range(unsigned long start, unsigned long end)
38{
39 if (outer_cache.inv_range)
40 outer_cache.inv_range(start, end);
41}
42static inline void outer_clean_range(unsigned long start, unsigned long end)
43{
44 if (outer_cache.clean_range)
45 outer_cache.clean_range(start, end);
46}
47static inline void outer_flush_range(unsigned long start, unsigned long end)
48{
49 if (outer_cache.flush_range)
50 outer_cache.flush_range(start, end);
51}
52
53#else
54
55static inline void outer_inv_range(unsigned long start, unsigned long end)
56{ }
57static inline void outer_clean_range(unsigned long start, unsigned long end)
58{ }
59static inline void outer_flush_range(unsigned long start, unsigned long end)
60{ }
61
62#endif
63
64#ifdef CONFIG_OUTER_CACHE_SYNC
65static inline void outer_sync(void)
66{
67 if (outer_cache.sync)
68 outer_cache.sync();
69}
70#else
71static inline void outer_sync(void)
72{ }
73#endif
74
75#endif /* __ASM_OUTERCACHE_H */
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 3a32af4cce30..a485ac3c8696 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -117,11 +117,12 @@
117#endif 117#endif
118 118
119struct page; 119struct page;
120struct vm_area_struct;
120 121
121struct cpu_user_fns { 122struct cpu_user_fns {
122 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); 123 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
123 void (*cpu_copy_user_highpage)(struct page *to, struct page *from, 124 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
124 unsigned long vaddr); 125 unsigned long vaddr, struct vm_area_struct *vma);
125}; 126};
126 127
127#ifdef MULTI_USER 128#ifdef MULTI_USER
@@ -137,7 +138,7 @@ extern struct cpu_user_fns cpu_user;
137 138
138extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); 139extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
139extern void __cpu_copy_user_highpage(struct page *to, struct page *from, 140extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
140 unsigned long vaddr); 141 unsigned long vaddr, struct vm_area_struct *vma);
141#endif 142#endif
142 143
143#define clear_user_highpage(page,vaddr) \ 144#define clear_user_highpage(page,vaddr) \
@@ -145,7 +146,7 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
145 146
146#define __HAVE_ARCH_COPY_USER_HIGHPAGE 147#define __HAVE_ARCH_COPY_USER_HIGHPAGE
147#define copy_user_highpage(to,from,vaddr,vma) \ 148#define copy_user_highpage(to,from,vaddr,vma) \
148 __cpu_copy_user_highpage(to, from, vaddr) 149 __cpu_copy_user_highpage(to, from, vaddr, vma)
149 150
150#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) 151#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
151extern void copy_page(void *to, const void *from); 152extern void copy_page(void *to, const void *from);
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index 226cddd2fb65..47980118d0a5 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -30,17 +30,6 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
30 */ 30 */
31#define PCI_DMA_BUS_IS_PHYS (1) 31#define PCI_DMA_BUS_IS_PHYS (1)
32 32
33/*
34 * Whether pci_unmap_{single,page} is a nop depends upon the
35 * configuration.
36 */
37#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME;
38#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME;
39#define pci_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
40#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
41#define pci_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
42#define pci_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
43
44#ifdef CONFIG_PCI 33#ifdef CONFIG_PCI
45static inline void pci_dma_burst_advice(struct pci_dev *pdev, 34static inline void pci_dma_burst_advice(struct pci_dev *pdev,
46 enum pci_dma_burst_strategy *strat, 35 enum pci_dma_burst_strategy *strat,
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
new file mode 100644
index 000000000000..49e3049aba32
--- /dev/null
+++ b/arch/arm/include/asm/perf_event.h
@@ -0,0 +1,31 @@
1/*
2 * linux/arch/arm/include/asm/perf_event.h
3 *
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#ifndef __ARM_PERF_EVENT_H__
13#define __ARM_PERF_EVENT_H__
14
15/*
16 * NOP: on *most* (read: all supported) ARM platforms, the performance
17 * counter interrupts are regular interrupts and not an NMI. This
18 * means that when we receive the interrupt we can call
19 * perf_event_do_pending() that handles all of the work with
20 * interrupts enabled.
21 */
22static inline void
23set_perf_event_pending(void)
24{
25}
26
27/* ARM performance counters start from 1 (in the cp15 accesses) so use the
28 * same indexes here for consistency. */
29#define PERF_EVENT_INDEX_OFFSET 1
30
31#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index b011f2e939aa..ffc0e85775b4 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -67,6 +67,7 @@ static inline int pte_file(pte_t pte) { return 0; }
67 */ 67 */
68#define pgprot_noncached(prot) __pgprot(0) 68#define pgprot_noncached(prot) __pgprot(0)
69#define pgprot_writecombine(prot) __pgprot(0) 69#define pgprot_writecombine(prot) __pgprot(0)
70#define pgprot_dmacoherent(prot) __pgprot(0)
70 71
71 72
72/* 73/*
@@ -86,8 +87,8 @@ extern unsigned int kobjsize(const void *objp);
86 * All 32bit addresses are effectively valid for vmalloc... 87 * All 32bit addresses are effectively valid for vmalloc...
87 * Sort of meaningless for non-VM targets. 88 * Sort of meaningless for non-VM targets.
88 */ 89 */
89#define VMALLOC_START 0 90#define VMALLOC_START 0UL
90#define VMALLOC_END 0xffffffff 91#define VMALLOC_END 0xffffffffUL
91 92
92#define FIRST_USER_ADDRESS (0) 93#define FIRST_USER_ADDRESS (0)
93 94
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 201ccaa11f61..11397687f42c 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -304,13 +304,23 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
304 304
305static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 305static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
306 306
307#define __pgprot_modify(prot,mask,bits) \
308 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
309
307/* 310/*
308 * Mark the prot value as uncacheable and unbufferable. 311 * Mark the prot value as uncacheable and unbufferable.
309 */ 312 */
310#define pgprot_noncached(prot) \ 313#define pgprot_noncached(prot) \
311 __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_UNCACHED) 314 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
312#define pgprot_writecombine(prot) \ 315#define pgprot_writecombine(prot) \
313 __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_BUFFERABLE) 316 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
317#if __LINUX_ARM_ARCH__ >= 7
318#define pgprot_dmacoherent(prot) \
319 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
320#else
321#define pgprot_dmacoherent(prot) \
322 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
323#endif
314 324
315#define pmd_none(pmd) (!pmd_val(pmd)) 325#define pmd_none(pmd) (!pmd_val(pmd))
316#define pmd_present(pmd) (pmd_val(pmd)) 326#define pmd_present(pmd) (pmd_val(pmd))
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
new file mode 100644
index 000000000000..2829b9f981a1
--- /dev/null
+++ b/arch/arm/include/asm/pmu.h
@@ -0,0 +1,75 @@
1/*
2 * linux/arch/arm/include/asm/pmu.h
3 *
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#ifndef __ARM_PMU_H__
13#define __ARM_PMU_H__
14
15#ifdef CONFIG_CPU_HAS_PMU
16
17struct pmu_irqs {
18 const int *irqs;
19 int num_irqs;
20};
21
22/**
23 * reserve_pmu() - reserve the hardware performance counters
24 *
25 * Reserve the hardware performance counters in the system for exclusive use.
26 * The 'struct pmu_irqs' for the system is returned on success, ERR_PTR()
27 * encoded error on failure.
28 */
29extern const struct pmu_irqs *
30reserve_pmu(void);
31
32/**
33 * release_pmu() - Relinquish control of the performance counters
34 *
35 * Release the performance counters and allow someone else to use them.
36 * Callers must have disabled the counters and released IRQs before calling
37 * this. The 'struct pmu_irqs' returned from reserve_pmu() must be passed as
38 * a cookie.
39 */
40extern int
41release_pmu(const struct pmu_irqs *irqs);
42
43/**
44 * init_pmu() - Initialise the PMU.
45 *
46 * Initialise the system ready for PMU enabling. This should typically set the
47 * IRQ affinity and nothing else. The users (oprofile/perf events etc) will do
48 * the actual hardware initialisation.
49 */
50extern int
51init_pmu(void);
52
53#else /* CONFIG_CPU_HAS_PMU */
54
55static inline const struct pmu_irqs *
56reserve_pmu(void)
57{
58 return ERR_PTR(-ENODEV);
59}
60
61static inline int
62release_pmu(const struct pmu_irqs *irqs)
63{
64 return -ENODEV;
65}
66
67static inline int
68init_pmu(void)
69{
70 return -ENODEV;
71}
72
73#endif /* CONFIG_CPU_HAS_PMU */
74
75#endif /* __ARM_PMU_H__ */
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index 3976412685f8..8fdae9bc9abb 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -24,206 +24,228 @@
24 * CPU_NAME - the prefix for CPU related functions 24 * CPU_NAME - the prefix for CPU related functions
25 */ 25 */
26 26
27#ifdef CONFIG_CPU_32 27#ifdef CONFIG_CPU_ARM610
28# ifdef CONFIG_CPU_ARM610 28# ifdef CPU_NAME
29# ifdef CPU_NAME 29# undef MULTI_CPU
30# undef MULTI_CPU 30# define MULTI_CPU
31# define MULTI_CPU 31# else
32# else 32# define CPU_NAME cpu_arm6
33# define CPU_NAME cpu_arm6
34# endif
35# endif 33# endif
36# ifdef CONFIG_CPU_ARM7TDMI 34#endif
37# ifdef CPU_NAME 35
38# undef MULTI_CPU 36#ifdef CONFIG_CPU_ARM7TDMI
39# define MULTI_CPU 37# ifdef CPU_NAME
40# else 38# undef MULTI_CPU
41# define CPU_NAME cpu_arm7tdmi 39# define MULTI_CPU
42# endif 40# else
41# define CPU_NAME cpu_arm7tdmi
43# endif 42# endif
44# ifdef CONFIG_CPU_ARM710 43#endif
45# ifdef CPU_NAME 44
46# undef MULTI_CPU 45#ifdef CONFIG_CPU_ARM710
47# define MULTI_CPU 46# ifdef CPU_NAME
48# else 47# undef MULTI_CPU
49# define CPU_NAME cpu_arm7 48# define MULTI_CPU
50# endif 49# else
50# define CPU_NAME cpu_arm7
51# endif 51# endif
52# ifdef CONFIG_CPU_ARM720T 52#endif
53# ifdef CPU_NAME 53
54# undef MULTI_CPU 54#ifdef CONFIG_CPU_ARM720T
55# define MULTI_CPU 55# ifdef CPU_NAME
56# else 56# undef MULTI_CPU
57# define CPU_NAME cpu_arm720 57# define MULTI_CPU
58# endif 58# else
59# define CPU_NAME cpu_arm720
59# endif 60# endif
60# ifdef CONFIG_CPU_ARM740T 61#endif
61# ifdef CPU_NAME 62
62# undef MULTI_CPU 63#ifdef CONFIG_CPU_ARM740T
63# define MULTI_CPU 64# ifdef CPU_NAME
64# else 65# undef MULTI_CPU
65# define CPU_NAME cpu_arm740 66# define MULTI_CPU
66# endif 67# else
68# define CPU_NAME cpu_arm740
67# endif 69# endif
68# ifdef CONFIG_CPU_ARM9TDMI 70#endif
69# ifdef CPU_NAME 71
70# undef MULTI_CPU 72#ifdef CONFIG_CPU_ARM9TDMI
71# define MULTI_CPU 73# ifdef CPU_NAME
72# else 74# undef MULTI_CPU
73# define CPU_NAME cpu_arm9tdmi 75# define MULTI_CPU
74# endif 76# else
77# define CPU_NAME cpu_arm9tdmi
75# endif 78# endif
76# ifdef CONFIG_CPU_ARM920T 79#endif
77# ifdef CPU_NAME 80
78# undef MULTI_CPU 81#ifdef CONFIG_CPU_ARM920T
79# define MULTI_CPU 82# ifdef CPU_NAME
80# else 83# undef MULTI_CPU
81# define CPU_NAME cpu_arm920 84# define MULTI_CPU
82# endif 85# else
86# define CPU_NAME cpu_arm920
83# endif 87# endif
84# ifdef CONFIG_CPU_ARM922T 88#endif
85# ifdef CPU_NAME 89
86# undef MULTI_CPU 90#ifdef CONFIG_CPU_ARM922T
87# define MULTI_CPU 91# ifdef CPU_NAME
88# else 92# undef MULTI_CPU
89# define CPU_NAME cpu_arm922 93# define MULTI_CPU
90# endif 94# else
95# define CPU_NAME cpu_arm922
91# endif 96# endif
92# ifdef CONFIG_CPU_FA526 97#endif
93# ifdef CPU_NAME 98
94# undef MULTI_CPU 99#ifdef CONFIG_CPU_FA526
95# define MULTI_CPU 100# ifdef CPU_NAME
96# else 101# undef MULTI_CPU
97# define CPU_NAME cpu_fa526 102# define MULTI_CPU
98# endif 103# else
104# define CPU_NAME cpu_fa526
99# endif 105# endif
100# ifdef CONFIG_CPU_ARM925T 106#endif
101# ifdef CPU_NAME 107
102# undef MULTI_CPU 108#ifdef CONFIG_CPU_ARM925T
103# define MULTI_CPU 109# ifdef CPU_NAME
104# else 110# undef MULTI_CPU
105# define CPU_NAME cpu_arm925 111# define MULTI_CPU
106# endif 112# else
113# define CPU_NAME cpu_arm925
107# endif 114# endif
108# ifdef CONFIG_CPU_ARM926T 115#endif
109# ifdef CPU_NAME 116
110# undef MULTI_CPU 117#ifdef CONFIG_CPU_ARM926T
111# define MULTI_CPU 118# ifdef CPU_NAME
112# else 119# undef MULTI_CPU
113# define CPU_NAME cpu_arm926 120# define MULTI_CPU
114# endif 121# else
122# define CPU_NAME cpu_arm926
115# endif 123# endif
116# ifdef CONFIG_CPU_ARM940T 124#endif
117# ifdef CPU_NAME 125
118# undef MULTI_CPU 126#ifdef CONFIG_CPU_ARM940T
119# define MULTI_CPU 127# ifdef CPU_NAME
120# else 128# undef MULTI_CPU
121# define CPU_NAME cpu_arm940 129# define MULTI_CPU
122# endif 130# else
131# define CPU_NAME cpu_arm940
123# endif 132# endif
124# ifdef CONFIG_CPU_ARM946E 133#endif
125# ifdef CPU_NAME 134
126# undef MULTI_CPU 135#ifdef CONFIG_CPU_ARM946E
127# define MULTI_CPU 136# ifdef CPU_NAME
128# else 137# undef MULTI_CPU
129# define CPU_NAME cpu_arm946 138# define MULTI_CPU
130# endif 139# else
140# define CPU_NAME cpu_arm946
131# endif 141# endif
132# ifdef CONFIG_CPU_SA110 142#endif
133# ifdef CPU_NAME 143
134# undef MULTI_CPU 144#ifdef CONFIG_CPU_SA110
135# define MULTI_CPU 145# ifdef CPU_NAME
136# else 146# undef MULTI_CPU
137# define CPU_NAME cpu_sa110 147# define MULTI_CPU
138# endif 148# else
149# define CPU_NAME cpu_sa110
139# endif 150# endif
140# ifdef CONFIG_CPU_SA1100 151#endif
141# ifdef CPU_NAME 152
142# undef MULTI_CPU 153#ifdef CONFIG_CPU_SA1100
143# define MULTI_CPU 154# ifdef CPU_NAME
144# else 155# undef MULTI_CPU
145# define CPU_NAME cpu_sa1100 156# define MULTI_CPU
146# endif 157# else
158# define CPU_NAME cpu_sa1100
147# endif 159# endif
148# ifdef CONFIG_CPU_ARM1020 160#endif
149# ifdef CPU_NAME 161
150# undef MULTI_CPU 162#ifdef CONFIG_CPU_ARM1020
151# define MULTI_CPU 163# ifdef CPU_NAME
152# else 164# undef MULTI_CPU
153# define CPU_NAME cpu_arm1020 165# define MULTI_CPU
154# endif 166# else
167# define CPU_NAME cpu_arm1020
155# endif 168# endif
156# ifdef CONFIG_CPU_ARM1020E 169#endif
157# ifdef CPU_NAME 170
158# undef MULTI_CPU 171#ifdef CONFIG_CPU_ARM1020E
159# define MULTI_CPU 172# ifdef CPU_NAME
160# else 173# undef MULTI_CPU
161# define CPU_NAME cpu_arm1020e 174# define MULTI_CPU
162# endif 175# else
176# define CPU_NAME cpu_arm1020e
163# endif 177# endif
164# ifdef CONFIG_CPU_ARM1022 178#endif
165# ifdef CPU_NAME 179
166# undef MULTI_CPU 180#ifdef CONFIG_CPU_ARM1022
167# define MULTI_CPU 181# ifdef CPU_NAME
168# else 182# undef MULTI_CPU
169# define CPU_NAME cpu_arm1022 183# define MULTI_CPU
170# endif 184# else
185# define CPU_NAME cpu_arm1022
171# endif 186# endif
172# ifdef CONFIG_CPU_ARM1026 187#endif
173# ifdef CPU_NAME 188
174# undef MULTI_CPU 189#ifdef CONFIG_CPU_ARM1026
175# define MULTI_CPU 190# ifdef CPU_NAME
176# else 191# undef MULTI_CPU
177# define CPU_NAME cpu_arm1026 192# define MULTI_CPU
178# endif 193# else
194# define CPU_NAME cpu_arm1026
179# endif 195# endif
180# ifdef CONFIG_CPU_XSCALE 196#endif
181# ifdef CPU_NAME 197
182# undef MULTI_CPU 198#ifdef CONFIG_CPU_XSCALE
183# define MULTI_CPU 199# ifdef CPU_NAME
184# else 200# undef MULTI_CPU
185# define CPU_NAME cpu_xscale 201# define MULTI_CPU
186# endif 202# else
203# define CPU_NAME cpu_xscale
187# endif 204# endif
188# ifdef CONFIG_CPU_XSC3 205#endif
189# ifdef CPU_NAME 206
190# undef MULTI_CPU 207#ifdef CONFIG_CPU_XSC3
191# define MULTI_CPU 208# ifdef CPU_NAME
192# else 209# undef MULTI_CPU
193# define CPU_NAME cpu_xsc3 210# define MULTI_CPU
194# endif 211# else
212# define CPU_NAME cpu_xsc3
195# endif 213# endif
196# ifdef CONFIG_CPU_MOHAWK 214#endif
197# ifdef CPU_NAME 215
198# undef MULTI_CPU 216#ifdef CONFIG_CPU_MOHAWK
199# define MULTI_CPU 217# ifdef CPU_NAME
200# else 218# undef MULTI_CPU
201# define CPU_NAME cpu_mohawk 219# define MULTI_CPU
202# endif 220# else
221# define CPU_NAME cpu_mohawk
203# endif 222# endif
204# ifdef CONFIG_CPU_FEROCEON 223#endif
205# ifdef CPU_NAME 224
206# undef MULTI_CPU 225#ifdef CONFIG_CPU_FEROCEON
207# define MULTI_CPU 226# ifdef CPU_NAME
208# else 227# undef MULTI_CPU
209# define CPU_NAME cpu_feroceon 228# define MULTI_CPU
210# endif 229# else
230# define CPU_NAME cpu_feroceon
211# endif 231# endif
212# ifdef CONFIG_CPU_V6 232#endif
213# ifdef CPU_NAME 233
214# undef MULTI_CPU 234#ifdef CONFIG_CPU_V6
215# define MULTI_CPU 235# ifdef CPU_NAME
216# else 236# undef MULTI_CPU
217# define CPU_NAME cpu_v6 237# define MULTI_CPU
218# endif 238# else
239# define CPU_NAME cpu_v6
219# endif 240# endif
220# ifdef CONFIG_CPU_V7 241#endif
221# ifdef CPU_NAME 242
222# undef MULTI_CPU 243#ifdef CONFIG_CPU_V7
223# define MULTI_CPU 244# ifdef CPU_NAME
224# else 245# undef MULTI_CPU
225# define CPU_NAME cpu_v7 246# define MULTI_CPU
226# endif 247# else
248# define CPU_NAME cpu_v7
227# endif 249# endif
228#endif 250#endif
229 251
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index bbecccda76d0..9dcb11e59026 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -97,9 +97,15 @@
97 * stack during a system call. Note that sizeof(struct pt_regs) 97 * stack during a system call. Note that sizeof(struct pt_regs)
98 * has to be a multiple of 8. 98 * has to be a multiple of 8.
99 */ 99 */
100#ifndef __KERNEL__
100struct pt_regs { 101struct pt_regs {
101 long uregs[18]; 102 long uregs[18];
102}; 103};
104#else /* __KERNEL__ */
105struct pt_regs {
106 unsigned long uregs[18];
107};
108#endif /* __KERNEL__ */
103 109
104#define ARM_cpsr uregs[16] 110#define ARM_cpsr uregs[16]
105#define ARM_pc uregs[15] 111#define ARM_pc uregs[15]
@@ -122,6 +128,8 @@ struct pt_regs {
122 128
123#ifdef __KERNEL__ 129#ifdef __KERNEL__
124 130
131#define arch_has_single_step() (1)
132
125#define user_mode(regs) \ 133#define user_mode(regs) \
126 (((regs)->ARM_cpsr & 0xf) == 0) 134 (((regs)->ARM_cpsr & 0xf) == 0)
127 135
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 5ccce0a9b03c..f392fb4437af 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -223,18 +223,6 @@ extern struct meminfo meminfo;
223#define bank_phys_end(bank) ((bank)->start + (bank)->size) 223#define bank_phys_end(bank) ((bank)->start + (bank)->size)
224#define bank_phys_size(bank) (bank)->size 224#define bank_phys_size(bank) (bank)->size
225 225
226/*
227 * Early command line parameters.
228 */
229struct early_params {
230 const char *arg;
231 void (*fn)(char **p);
232};
233
234#define __early_param(name,fn) \
235static struct early_params __early_##fn __used \
236__attribute__((__section__(".early_param.init"))) = { name, fn }
237
238#endif /* __KERNEL__ */ 226#endif /* __KERNEL__ */
239 227
240#endif 228#endif
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 59303e200845..e6215305544a 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -13,4 +13,9 @@ static inline int tlb_ops_need_broadcast(void)
13 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; 13 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
14} 14}
15 15
16static inline int cache_ops_need_broadcast(void)
17{
18 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1;
19}
20
16#endif 21#endif
diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h
index 7be0978b2625..634f357be6bb 100644
--- a/arch/arm/include/asm/smp_twd.h
+++ b/arch/arm/include/asm/smp_twd.h
@@ -1,6 +1,23 @@
1#ifndef __ASMARM_SMP_TWD_H 1#ifndef __ASMARM_SMP_TWD_H
2#define __ASMARM_SMP_TWD_H 2#define __ASMARM_SMP_TWD_H
3 3
4#define TWD_TIMER_LOAD 0x00
5#define TWD_TIMER_COUNTER 0x04
6#define TWD_TIMER_CONTROL 0x08
7#define TWD_TIMER_INTSTAT 0x0C
8
9#define TWD_WDOG_LOAD 0x20
10#define TWD_WDOG_COUNTER 0x24
11#define TWD_WDOG_CONTROL 0x28
12#define TWD_WDOG_INTSTAT 0x2C
13#define TWD_WDOG_RESETSTAT 0x30
14#define TWD_WDOG_DISABLE 0x34
15
16#define TWD_TIMER_CONTROL_ENABLE (1 << 0)
17#define TWD_TIMER_CONTROL_ONESHOT (0 << 1)
18#define TWD_TIMER_CONTROL_PERIODIC (1 << 1)
19#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2)
20
4struct clock_event_device; 21struct clock_event_device;
5 22
6extern void __iomem *twd_base; 23extern void __iomem *twd_base;
diff --git a/arch/arm/include/asm/socket.h b/arch/arm/include/asm/socket.h
index 92ac61d294fd..90ffd04b8e74 100644
--- a/arch/arm/include/asm/socket.h
+++ b/arch/arm/include/asm/socket.h
@@ -60,4 +60,6 @@
60#define SO_PROTOCOL 38 60#define SO_PROTOCOL 38
61#define SO_DOMAIN 39 61#define SO_DOMAIN 39
62 62
63#define SO_RXQ_OVFL 40
64
63#endif /* _ASM_SOCKET_H */ 65#endif /* _ASM_SOCKET_H */
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index c13681ac1ede..17eb355707dd 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,6 +5,22 @@
5#error SMP not supported on pre-ARMv6 CPUs 5#error SMP not supported on pre-ARMv6 CPUs
6#endif 6#endif
7 7
8static inline void dsb_sev(void)
9{
10#if __LINUX_ARM_ARCH__ >= 7
11 __asm__ __volatile__ (
12 "dsb\n"
13 "sev"
14 );
15#elif defined(CONFIG_CPU_32v6K)
16 __asm__ __volatile__ (
17 "mcr p15, 0, %0, c7, c10, 4\n"
18 "sev"
19 : : "r" (0)
20 );
21#endif
22}
23
8/* 24/*
9 * ARMv6 Spin-locking. 25 * ARMv6 Spin-locking.
10 * 26 *
@@ -17,13 +33,13 @@
17 * Locked value: 1 33 * Locked value: 1
18 */ 34 */
19 35
20#define __raw_spin_is_locked(x) ((x)->lock != 0) 36#define arch_spin_is_locked(x) ((x)->lock != 0)
21#define __raw_spin_unlock_wait(lock) \ 37#define arch_spin_unlock_wait(lock) \
22 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 38 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
23 39
24#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 40#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
25 41
26static inline void __raw_spin_lock(raw_spinlock_t *lock) 42static inline void arch_spin_lock(arch_spinlock_t *lock)
27{ 43{
28 unsigned long tmp; 44 unsigned long tmp;
29 45
@@ -43,7 +59,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
43 smp_mb(); 59 smp_mb();
44} 60}
45 61
46static inline int __raw_spin_trylock(raw_spinlock_t *lock) 62static inline int arch_spin_trylock(arch_spinlock_t *lock)
47{ 63{
48 unsigned long tmp; 64 unsigned long tmp;
49 65
@@ -63,19 +79,17 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
63 } 79 }
64} 80}
65 81
66static inline void __raw_spin_unlock(raw_spinlock_t *lock) 82static inline void arch_spin_unlock(arch_spinlock_t *lock)
67{ 83{
68 smp_mb(); 84 smp_mb();
69 85
70 __asm__ __volatile__( 86 __asm__ __volatile__(
71" str %1, [%0]\n" 87" str %1, [%0]\n"
72#ifdef CONFIG_CPU_32v6K
73" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */
74" sev"
75#endif
76 : 88 :
77 : "r" (&lock->lock), "r" (0) 89 : "r" (&lock->lock), "r" (0)
78 : "cc"); 90 : "cc");
91
92 dsb_sev();
79} 93}
80 94
81/* 95/*
@@ -86,7 +100,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
86 * just write zero since the lock is exclusively held. 100 * just write zero since the lock is exclusively held.
87 */ 101 */
88 102
89static inline void __raw_write_lock(raw_rwlock_t *rw) 103static inline void arch_write_lock(arch_rwlock_t *rw)
90{ 104{
91 unsigned long tmp; 105 unsigned long tmp;
92 106
@@ -106,7 +120,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
106 smp_mb(); 120 smp_mb();
107} 121}
108 122
109static inline int __raw_write_trylock(raw_rwlock_t *rw) 123static inline int arch_write_trylock(arch_rwlock_t *rw)
110{ 124{
111 unsigned long tmp; 125 unsigned long tmp;
112 126
@@ -126,23 +140,21 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
126 } 140 }
127} 141}
128 142
129static inline void __raw_write_unlock(raw_rwlock_t *rw) 143static inline void arch_write_unlock(arch_rwlock_t *rw)
130{ 144{
131 smp_mb(); 145 smp_mb();
132 146
133 __asm__ __volatile__( 147 __asm__ __volatile__(
134 "str %1, [%0]\n" 148 "str %1, [%0]\n"
135#ifdef CONFIG_CPU_32v6K
136" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */
137" sev\n"
138#endif
139 : 149 :
140 : "r" (&rw->lock), "r" (0) 150 : "r" (&rw->lock), "r" (0)
141 : "cc"); 151 : "cc");
152
153 dsb_sev();
142} 154}
143 155
144/* write_can_lock - would write_trylock() succeed? */ 156/* write_can_lock - would write_trylock() succeed? */
145#define __raw_write_can_lock(x) ((x)->lock == 0) 157#define arch_write_can_lock(x) ((x)->lock == 0)
146 158
147/* 159/*
148 * Read locks are a bit more hairy: 160 * Read locks are a bit more hairy:
@@ -156,7 +168,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
156 * currently active. However, we know we won't have any write 168 * currently active. However, we know we won't have any write
157 * locks. 169 * locks.
158 */ 170 */
159static inline void __raw_read_lock(raw_rwlock_t *rw) 171static inline void arch_read_lock(arch_rwlock_t *rw)
160{ 172{
161 unsigned long tmp, tmp2; 173 unsigned long tmp, tmp2;
162 174
@@ -176,7 +188,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
176 smp_mb(); 188 smp_mb();
177} 189}
178 190
179static inline void __raw_read_unlock(raw_rwlock_t *rw) 191static inline void arch_read_unlock(arch_rwlock_t *rw)
180{ 192{
181 unsigned long tmp, tmp2; 193 unsigned long tmp, tmp2;
182 194
@@ -188,17 +200,15 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
188" strex %1, %0, [%2]\n" 200" strex %1, %0, [%2]\n"
189" teq %1, #0\n" 201" teq %1, #0\n"
190" bne 1b" 202" bne 1b"
191#ifdef CONFIG_CPU_32v6K
192"\n cmp %0, #0\n"
193" mcreq p15, 0, %0, c7, c10, 4\n"
194" seveq"
195#endif
196 : "=&r" (tmp), "=&r" (tmp2) 203 : "=&r" (tmp), "=&r" (tmp2)
197 : "r" (&rw->lock) 204 : "r" (&rw->lock)
198 : "cc"); 205 : "cc");
206
207 if (tmp == 0)
208 dsb_sev();
199} 209}
200 210
201static inline int __raw_read_trylock(raw_rwlock_t *rw) 211static inline int arch_read_trylock(arch_rwlock_t *rw)
202{ 212{
203 unsigned long tmp, tmp2 = 1; 213 unsigned long tmp, tmp2 = 1;
204 214
@@ -215,13 +225,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
215} 225}
216 226
217/* read_can_lock - would read_trylock() succeed? */ 227/* read_can_lock - would read_trylock() succeed? */
218#define __raw_read_can_lock(x) ((x)->lock < 0x80000000) 228#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
219 229
220#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 230#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
221#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 231#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
222 232
223#define _raw_spin_relax(lock) cpu_relax() 233#define arch_spin_relax(lock) cpu_relax()
224#define _raw_read_relax(lock) cpu_relax() 234#define arch_read_relax(lock) cpu_relax()
225#define _raw_write_relax(lock) cpu_relax() 235#define arch_write_relax(lock) cpu_relax()
226 236
227#endif /* __ASM_SPINLOCK_H */ 237#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index 43e83f6d2ee5..d14d197ae04a 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 volatile unsigned int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define __RAW_RW_LOCK_UNLOCKED { 0 } 18#define __ARCH_RW_LOCK_UNLOCKED { 0 }
19 19
20#endif 20#endif
diff --git a/arch/arm/include/asm/swab.h b/arch/arm/include/asm/swab.h
index ca2bf2f6d6ea..9997ad20eff1 100644
--- a/arch/arm/include/asm/swab.h
+++ b/arch/arm/include/asm/swab.h
@@ -22,6 +22,24 @@
22# define __SWAB_64_THRU_32__ 22# define __SWAB_64_THRU_32__
23#endif 23#endif
24 24
25#if defined(__KERNEL__) && __LINUX_ARM_ARCH__ >= 6
26
27static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
28{
29 __asm__ ("rev16 %0, %1" : "=r" (x) : "r" (x));
30 return x;
31}
32#define __arch_swab16 __arch_swab16
33
34static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
35{
36 __asm__ ("rev %0, %1" : "=r" (x) : "r" (x));
37 return x;
38}
39#define __arch_swab32 __arch_swab32
40
41#else
42
25static inline __attribute_const__ __u32 __arch_swab32(__u32 x) 43static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
26{ 44{
27 __u32 t; 45 __u32 t;
@@ -48,3 +66,4 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
48 66
49#endif 67#endif
50 68
69#endif
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index d65b2f5bf41f..4ace45ec3ef8 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -60,6 +60,8 @@
60#include <linux/linkage.h> 60#include <linux/linkage.h>
61#include <linux/irqflags.h> 61#include <linux/irqflags.h>
62 62
63#include <asm/outercache.h>
64
63#define __exception __attribute__((section(".exception.text"))) 65#define __exception __attribute__((section(".exception.text")))
64 66
65struct thread_info; 67struct thread_info;
@@ -73,8 +75,7 @@ extern unsigned int mem_fclk_21285;
73 75
74struct pt_regs; 76struct pt_regs;
75 77
76void die(const char *msg, struct pt_regs *regs, int err) 78void die(const char *msg, struct pt_regs *regs, int err);
77 __attribute__((noreturn));
78 79
79struct siginfo; 80struct siginfo;
80void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, 81void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
@@ -138,21 +139,28 @@ extern unsigned int user_debug;
138#define dmb() __asm__ __volatile__ ("" : : : "memory") 139#define dmb() __asm__ __volatile__ ("" : : : "memory")
139#endif 140#endif
140 141
141#ifndef CONFIG_SMP 142#ifdef CONFIG_ARCH_HAS_BARRIERS
143#include <mach/barriers.h>
144#elif __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
145#define mb() do { dsb(); outer_sync(); } while (0)
146#define rmb() dmb()
147#define wmb() mb()
148#else
142#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 149#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
143#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 150#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
144#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 151#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
152#endif
153
154#ifndef CONFIG_SMP
145#define smp_mb() barrier() 155#define smp_mb() barrier()
146#define smp_rmb() barrier() 156#define smp_rmb() barrier()
147#define smp_wmb() barrier() 157#define smp_wmb() barrier()
148#else 158#else
149#define mb() dmb()
150#define rmb() dmb()
151#define wmb() dmb()
152#define smp_mb() dmb() 159#define smp_mb() dmb()
153#define smp_rmb() dmb() 160#define smp_rmb() dmb()
154#define smp_wmb() dmb() 161#define smp_wmb() dmb()
155#endif 162#endif
163
156#define read_barrier_depends() do { } while(0) 164#define read_barrier_depends() do { } while(0)
157#define smp_read_barrier_depends() do { } while(0) 165#define smp_read_barrier_depends() do { } while(0)
158 166
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 2dfb7d7a66e9..b74970ec02c4 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -115,7 +115,8 @@ extern void iwmmxt_task_restore(struct thread_info *, void *);
115extern void iwmmxt_task_release(struct thread_info *); 115extern void iwmmxt_task_release(struct thread_info *);
116extern void iwmmxt_task_switch(struct thread_info *); 116extern void iwmmxt_task_switch(struct thread_info *);
117 117
118extern void vfp_sync_state(struct thread_info *thread); 118extern void vfp_sync_hwstate(struct thread_info *);
119extern void vfp_flush_hwstate(struct thread_info *);
119 120
120#endif 121#endif
121 122
diff --git a/arch/arm/include/asm/thread_notify.h b/arch/arm/include/asm/thread_notify.h
index f27379d7f72a..c4391ba20350 100644
--- a/arch/arm/include/asm/thread_notify.h
+++ b/arch/arm/include/asm/thread_notify.h
@@ -41,7 +41,7 @@ static inline void thread_notify(unsigned long rc, struct thread_info *thread)
41 * These are the reason codes for the thread notifier. 41 * These are the reason codes for the thread notifier.
42 */ 42 */
43#define THREAD_NOTIFY_FLUSH 0 43#define THREAD_NOTIFY_FLUSH 0
44#define THREAD_NOTIFY_RELEASE 1 44#define THREAD_NOTIFY_EXIT 1
45#define THREAD_NOTIFY_SWITCH 2 45#define THREAD_NOTIFY_SWITCH 2
46 46
47#endif 47#endif
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index c2f1605de359..bd863d8608cd 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -46,6 +46,9 @@
46#define TLB_V7_UIS_FULL (1 << 20) 46#define TLB_V7_UIS_FULL (1 << 20)
47#define TLB_V7_UIS_ASID (1 << 21) 47#define TLB_V7_UIS_ASID (1 << 21)
48 48
49/* Inner Shareable BTB operation (ARMv7 MP extensions) */
50#define TLB_V7_IS_BTB (1 << 22)
51
49#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ 52#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
50#define TLB_DCLEAN (1 << 30) 53#define TLB_DCLEAN (1 << 30)
51#define TLB_WB (1 << 31) 54#define TLB_WB (1 << 31)
@@ -183,7 +186,7 @@
183#endif 186#endif
184 187
185#ifdef CONFIG_SMP 188#ifdef CONFIG_SMP
186#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \ 189#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
187 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) 190 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
188#else 191#else
189#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \ 192#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
@@ -339,6 +342,12 @@ static inline void local_flush_tlb_all(void)
339 dsb(); 342 dsb();
340 isb(); 343 isb();
341 } 344 }
345 if (tlb_flag(TLB_V7_IS_BTB)) {
346 /* flush the branch target cache */
347 asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
348 dsb();
349 isb();
350 }
342} 351}
343 352
344static inline void local_flush_tlb_mm(struct mm_struct *mm) 353static inline void local_flush_tlb_mm(struct mm_struct *mm)
@@ -376,6 +385,12 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
376 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); 385 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
377 dsb(); 386 dsb();
378 } 387 }
388 if (tlb_flag(TLB_V7_IS_BTB)) {
389 /* flush the branch target cache */
390 asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
391 dsb();
392 isb();
393 }
379} 394}
380 395
381static inline void 396static inline void
@@ -416,6 +431,12 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
416 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); 431 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
417 dsb(); 432 dsb();
418 } 433 }
434 if (tlb_flag(TLB_V7_IS_BTB)) {
435 /* flush the branch target cache */
436 asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
437 dsb();
438 isb();
439 }
419} 440}
420 441
421static inline void local_flush_tlb_kernel_page(unsigned long kaddr) 442static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
@@ -454,6 +475,12 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
454 dsb(); 475 dsb();
455 isb(); 476 isb();
456 } 477 }
478 if (tlb_flag(TLB_V7_IS_BTB)) {
479 /* flush the branch target cache */
480 asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
481 dsb();
482 isb();
483 }
457} 484}
458 485
459/* 486/*
@@ -529,7 +556,8 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
529 * cache entries for the kernels virtual memory range are written 556 * cache entries for the kernels virtual memory range are written
530 * back to the page. 557 * back to the page.
531 */ 558 */
532extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); 559extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
560 pte_t *ptep);
533 561
534#endif 562#endif
535 563
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 1d6bd40a4322..33e4a48fe103 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -229,16 +229,16 @@ do { \
229 __asm__ __volatile__( \ 229 __asm__ __volatile__( \
230 "1: ldrbt %1,[%2]\n" \ 230 "1: ldrbt %1,[%2]\n" \
231 "2:\n" \ 231 "2:\n" \
232 " .section .fixup,\"ax\"\n" \ 232 " .pushsection .fixup,\"ax\"\n" \
233 " .align 2\n" \ 233 " .align 2\n" \
234 "3: mov %0, %3\n" \ 234 "3: mov %0, %3\n" \
235 " mov %1, #0\n" \ 235 " mov %1, #0\n" \
236 " b 2b\n" \ 236 " b 2b\n" \
237 " .previous\n" \ 237 " .popsection\n" \
238 " .section __ex_table,\"a\"\n" \ 238 " .pushsection __ex_table,\"a\"\n" \
239 " .align 3\n" \ 239 " .align 3\n" \
240 " .long 1b, 3b\n" \ 240 " .long 1b, 3b\n" \
241 " .previous" \ 241 " .popsection" \
242 : "+r" (err), "=&r" (x) \ 242 : "+r" (err), "=&r" (x) \
243 : "r" (addr), "i" (-EFAULT) \ 243 : "r" (addr), "i" (-EFAULT) \
244 : "cc") 244 : "cc")
@@ -265,16 +265,16 @@ do { \
265 __asm__ __volatile__( \ 265 __asm__ __volatile__( \
266 "1: ldrt %1,[%2]\n" \ 266 "1: ldrt %1,[%2]\n" \
267 "2:\n" \ 267 "2:\n" \
268 " .section .fixup,\"ax\"\n" \ 268 " .pushsection .fixup,\"ax\"\n" \
269 " .align 2\n" \ 269 " .align 2\n" \
270 "3: mov %0, %3\n" \ 270 "3: mov %0, %3\n" \
271 " mov %1, #0\n" \ 271 " mov %1, #0\n" \
272 " b 2b\n" \ 272 " b 2b\n" \
273 " .previous\n" \ 273 " .popsection\n" \
274 " .section __ex_table,\"a\"\n" \ 274 " .pushsection __ex_table,\"a\"\n" \
275 " .align 3\n" \ 275 " .align 3\n" \
276 " .long 1b, 3b\n" \ 276 " .long 1b, 3b\n" \
277 " .previous" \ 277 " .popsection" \
278 : "+r" (err), "=&r" (x) \ 278 : "+r" (err), "=&r" (x) \
279 : "r" (addr), "i" (-EFAULT) \ 279 : "r" (addr), "i" (-EFAULT) \
280 : "cc") 280 : "cc")
@@ -310,15 +310,15 @@ do { \
310 __asm__ __volatile__( \ 310 __asm__ __volatile__( \
311 "1: strbt %1,[%2]\n" \ 311 "1: strbt %1,[%2]\n" \
312 "2:\n" \ 312 "2:\n" \
313 " .section .fixup,\"ax\"\n" \ 313 " .pushsection .fixup,\"ax\"\n" \
314 " .align 2\n" \ 314 " .align 2\n" \
315 "3: mov %0, %3\n" \ 315 "3: mov %0, %3\n" \
316 " b 2b\n" \ 316 " b 2b\n" \
317 " .previous\n" \ 317 " .popsection\n" \
318 " .section __ex_table,\"a\"\n" \ 318 " .pushsection __ex_table,\"a\"\n" \
319 " .align 3\n" \ 319 " .align 3\n" \
320 " .long 1b, 3b\n" \ 320 " .long 1b, 3b\n" \
321 " .previous" \ 321 " .popsection" \
322 : "+r" (err) \ 322 : "+r" (err) \
323 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ 323 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
324 : "cc") 324 : "cc")
@@ -343,15 +343,15 @@ do { \
343 __asm__ __volatile__( \ 343 __asm__ __volatile__( \
344 "1: strt %1,[%2]\n" \ 344 "1: strt %1,[%2]\n" \
345 "2:\n" \ 345 "2:\n" \
346 " .section .fixup,\"ax\"\n" \ 346 " .pushsection .fixup,\"ax\"\n" \
347 " .align 2\n" \ 347 " .align 2\n" \
348 "3: mov %0, %3\n" \ 348 "3: mov %0, %3\n" \
349 " b 2b\n" \ 349 " b 2b\n" \
350 " .previous\n" \ 350 " .popsection\n" \
351 " .section __ex_table,\"a\"\n" \ 351 " .pushsection __ex_table,\"a\"\n" \
352 " .align 3\n" \ 352 " .align 3\n" \
353 " .long 1b, 3b\n" \ 353 " .long 1b, 3b\n" \
354 " .previous" \ 354 " .popsection" \
355 : "+r" (err) \ 355 : "+r" (err) \
356 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ 356 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
357 : "cc") 357 : "cc")
@@ -371,16 +371,16 @@ do { \
371 THUMB( "1: strt " __reg_oper1 ", [%1]\n" ) \ 371 THUMB( "1: strt " __reg_oper1 ", [%1]\n" ) \
372 THUMB( "2: strt " __reg_oper0 ", [%1, #4]\n" ) \ 372 THUMB( "2: strt " __reg_oper0 ", [%1, #4]\n" ) \
373 "3:\n" \ 373 "3:\n" \
374 " .section .fixup,\"ax\"\n" \ 374 " .pushsection .fixup,\"ax\"\n" \
375 " .align 2\n" \ 375 " .align 2\n" \
376 "4: mov %0, %3\n" \ 376 "4: mov %0, %3\n" \
377 " b 3b\n" \ 377 " b 3b\n" \
378 " .previous\n" \ 378 " .popsection\n" \
379 " .section __ex_table,\"a\"\n" \ 379 " .pushsection __ex_table,\"a\"\n" \
380 " .align 3\n" \ 380 " .align 3\n" \
381 " .long 1b, 4b\n" \ 381 " .long 1b, 4b\n" \
382 " .long 2b, 4b\n" \ 382 " .long 2b, 4b\n" \
383 " .previous" \ 383 " .popsection" \
384 : "+r" (err), "+r" (__pu_addr) \ 384 : "+r" (err), "+r" (__pu_addr) \
385 : "r" (x), "i" (-EFAULT) \ 385 : "r" (x), "i" (-EFAULT) \
386 : "cc") 386 : "cc")
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h
index bf65e9f4525d..47f023aa8495 100644
--- a/arch/arm/include/asm/ucontext.h
+++ b/arch/arm/include/asm/ucontext.h
@@ -59,23 +59,22 @@ struct iwmmxt_sigframe {
59#endif /* CONFIG_IWMMXT */ 59#endif /* CONFIG_IWMMXT */
60 60
61#ifdef CONFIG_VFP 61#ifdef CONFIG_VFP
62#if __LINUX_ARM_ARCH__ < 6
63/* For ARM pre-v6, we use fstmiax and fldmiax. This adds one extra
64 * word after the registers, and a word of padding at the end for
65 * alignment. */
66#define VFP_MAGIC 0x56465001 62#define VFP_MAGIC 0x56465001
67#define VFP_STORAGE_SIZE 152
68#else
69#define VFP_MAGIC 0x56465002
70#define VFP_STORAGE_SIZE 144
71#endif
72 63
73struct vfp_sigframe 64struct vfp_sigframe
74{ 65{
75 unsigned long magic; 66 unsigned long magic;
76 unsigned long size; 67 unsigned long size;
77 union vfp_state storage; 68 struct user_vfp ufp;
78}; 69 struct user_vfp_exc ufp_exc;
70} __attribute__((__aligned__(8)));
71
72/*
73 * 8 byte for magic and size, 264 byte for ufp, 12 bytes for ufp_exc,
74 * 4 bytes padding.
75 */
76#define VFP_STORAGE_SIZE sizeof(struct vfp_sigframe)
77
79#endif /* CONFIG_VFP */ 78#endif /* CONFIG_VFP */
80 79
81/* 80/*
@@ -91,7 +90,7 @@ struct aux_sigframe {
91#ifdef CONFIG_IWMMXT 90#ifdef CONFIG_IWMMXT
92 struct iwmmxt_sigframe iwmmxt; 91 struct iwmmxt_sigframe iwmmxt;
93#endif 92#endif
94#if 0 && defined CONFIG_VFP /* Not yet saved. */ 93#ifdef CONFIG_VFP
95 struct vfp_sigframe vfp; 94 struct vfp_sigframe vfp;
96#endif 95#endif
97 /* Something that isn't a valid magic number for any coprocessor. */ 96 /* Something that isn't a valid magic number for any coprocessor. */
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 4e506d09e5f9..dd2bf53000fe 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -391,6 +391,7 @@
391#define __NR_pwritev (__NR_SYSCALL_BASE+362) 391#define __NR_pwritev (__NR_SYSCALL_BASE+362)
392#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363) 392#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363)
393#define __NR_perf_event_open (__NR_SYSCALL_BASE+364) 393#define __NR_perf_event_open (__NR_SYSCALL_BASE+364)
394#define __NR_recvmmsg (__NR_SYSCALL_BASE+365)
394 395
395/* 396/*
396 * The following SWIs are ARM private. 397 * The following SWIs are ARM private.
@@ -442,9 +443,12 @@
442#define __ARCH_WANT_SYS_SIGPROCMASK 443#define __ARCH_WANT_SYS_SIGPROCMASK
443#define __ARCH_WANT_SYS_RT_SIGACTION 444#define __ARCH_WANT_SYS_RT_SIGACTION
444#define __ARCH_WANT_SYS_RT_SIGSUSPEND 445#define __ARCH_WANT_SYS_RT_SIGSUSPEND
446#define __ARCH_WANT_SYS_OLD_MMAP
447#define __ARCH_WANT_SYS_OLD_SELECT
445 448
446#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) 449#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT)
447#define __ARCH_WANT_SYS_TIME 450#define __ARCH_WANT_SYS_TIME
451#define __ARCH_WANT_SYS_IPC
448#define __ARCH_WANT_SYS_OLDUMOUNT 452#define __ARCH_WANT_SYS_OLDUMOUNT
449#define __ARCH_WANT_SYS_ALARM 453#define __ARCH_WANT_SYS_ALARM
450#define __ARCH_WANT_SYS_UTIME 454#define __ARCH_WANT_SYS_UTIME
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h
index df95e050f9dd..05ac4b06876a 100644
--- a/arch/arm/include/asm/user.h
+++ b/arch/arm/include/asm/user.h
@@ -83,11 +83,21 @@ struct user{
83 83
84/* 84/*
85 * User specific VFP registers. If only VFPv2 is present, registers 16 to 31 85 * User specific VFP registers. If only VFPv2 is present, registers 16 to 31
86 * are ignored by the ptrace system call. 86 * are ignored by the ptrace system call and the signal handler.
87 */ 87 */
88struct user_vfp { 88struct user_vfp {
89 unsigned long long fpregs[32]; 89 unsigned long long fpregs[32];
90 unsigned long fpscr; 90 unsigned long fpscr;
91}; 91};
92 92
93/*
94 * VFP exception registers exposed to user space during signal delivery.
95 * Fields not relavant to the current VFP architecture are ignored.
96 */
97struct user_vfp_exc {
98 unsigned long fpexc;
99 unsigned long fpinst;
100 unsigned long fpinst2;
101};
102
93#endif /* _ARM_USER_H */ 103#endif /* _ARM_USER_H */