aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2010-04-29 19:53:17 -0400
committerH. Peter Anvin <hpa@zytor.com>2010-04-29 19:53:17 -0400
commitd9c5841e22231e4e49fd0a1004164e6fce59b7a6 (patch)
treee1f589c46b3ff79bbe7b1b2469f6362f94576da6 /arch/arm/include/asm
parentb701a47ba48b698976fb2fe05fb285b0edc1d26a (diff)
parent5967ed87ade85a421ef814296c3c7f182b08c225 (diff)
Merge branch 'x86/asm' into x86/atomic
Merge reason: Conflict between LOCK_PREFIX_HERE and relative alternatives pointers Resolved Conflicts: arch/x86/include/asm/alternative.h arch/x86/kernel/alternative.c Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/atomic.h228
-rw-r--r--arch/arm/include/asm/cacheflush.h121
-rw-r--r--arch/arm/include/asm/clkdev.h4
-rw-r--r--arch/arm/include/asm/cpu.h1
-rw-r--r--arch/arm/include/asm/dma-mapping.h87
-rw-r--r--arch/arm/include/asm/dma.h4
-rw-r--r--arch/arm/include/asm/elf.h1
-rw-r--r--arch/arm/include/asm/entry-macro-vic2.S57
-rw-r--r--arch/arm/include/asm/hardware/iop3xx-adma.h12
-rw-r--r--arch/arm/include/asm/hardware/it8152.h12
-rw-r--r--arch/arm/include/asm/hardware/locomo.h4
-rw-r--r--arch/arm/include/asm/hardware/sa1111.h4
-rw-r--r--arch/arm/include/asm/highmem.h15
-rw-r--r--arch/arm/include/asm/io.h11
-rw-r--r--arch/arm/include/asm/irq.h1
-rw-r--r--arch/arm/include/asm/kmap_types.h1
-rw-r--r--arch/arm/include/asm/mach/time.h8
-rw-r--r--arch/arm/include/asm/memory.h23
-rw-r--r--arch/arm/include/asm/mmu.h1
-rw-r--r--arch/arm/include/asm/mmu_context.h15
-rw-r--r--arch/arm/include/asm/outercache.h75
-rw-r--r--arch/arm/include/asm/page.h7
-rw-r--r--arch/arm/include/asm/pci.h11
-rw-r--r--arch/arm/include/asm/perf_event.h31
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h5
-rw-r--r--arch/arm/include/asm/pmu.h75
-rw-r--r--arch/arm/include/asm/ptrace.h8
-rw-r--r--arch/arm/include/asm/setup.h12
-rw-r--r--arch/arm/include/asm/smp_plat.h5
-rw-r--r--arch/arm/include/asm/spinlock.h36
-rw-r--r--arch/arm/include/asm/system.h19
-rw-r--r--arch/arm/include/asm/thread_info.h3
-rw-r--r--arch/arm/include/asm/tlbflush.h3
-rw-r--r--arch/arm/include/asm/ucontext.h23
-rw-r--r--arch/arm/include/asm/unistd.h4
-rw-r--r--arch/arm/include/asm/user.h12
36 files changed, 742 insertions, 197 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index d0daeab2234..e8ddec2cb15 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -235,6 +235,234 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
235#define smp_mb__before_atomic_inc() smp_mb() 235#define smp_mb__before_atomic_inc() smp_mb()
236#define smp_mb__after_atomic_inc() smp_mb() 236#define smp_mb__after_atomic_inc() smp_mb()
237 237
238#ifndef CONFIG_GENERIC_ATOMIC64
239typedef struct {
240 u64 __aligned(8) counter;
241} atomic64_t;
242
243#define ATOMIC64_INIT(i) { (i) }
244
245static inline u64 atomic64_read(atomic64_t *v)
246{
247 u64 result;
248
249 __asm__ __volatile__("@ atomic64_read\n"
250" ldrexd %0, %H0, [%1]"
251 : "=&r" (result)
252 : "r" (&v->counter)
253 );
254
255 return result;
256}
257
258static inline void atomic64_set(atomic64_t *v, u64 i)
259{
260 u64 tmp;
261
262 __asm__ __volatile__("@ atomic64_set\n"
263"1: ldrexd %0, %H0, [%1]\n"
264" strexd %0, %2, %H2, [%1]\n"
265" teq %0, #0\n"
266" bne 1b"
267 : "=&r" (tmp)
268 : "r" (&v->counter), "r" (i)
269 : "cc");
270}
271
272static inline void atomic64_add(u64 i, atomic64_t *v)
273{
274 u64 result;
275 unsigned long tmp;
276
277 __asm__ __volatile__("@ atomic64_add\n"
278"1: ldrexd %0, %H0, [%2]\n"
279" adds %0, %0, %3\n"
280" adc %H0, %H0, %H3\n"
281" strexd %1, %0, %H0, [%2]\n"
282" teq %1, #0\n"
283" bne 1b"
284 : "=&r" (result), "=&r" (tmp)
285 : "r" (&v->counter), "r" (i)
286 : "cc");
287}
288
289static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
290{
291 u64 result;
292 unsigned long tmp;
293
294 smp_mb();
295
296 __asm__ __volatile__("@ atomic64_add_return\n"
297"1: ldrexd %0, %H0, [%2]\n"
298" adds %0, %0, %3\n"
299" adc %H0, %H0, %H3\n"
300" strexd %1, %0, %H0, [%2]\n"
301" teq %1, #0\n"
302" bne 1b"
303 : "=&r" (result), "=&r" (tmp)
304 : "r" (&v->counter), "r" (i)
305 : "cc");
306
307 smp_mb();
308
309 return result;
310}
311
312static inline void atomic64_sub(u64 i, atomic64_t *v)
313{
314 u64 result;
315 unsigned long tmp;
316
317 __asm__ __volatile__("@ atomic64_sub\n"
318"1: ldrexd %0, %H0, [%2]\n"
319" subs %0, %0, %3\n"
320" sbc %H0, %H0, %H3\n"
321" strexd %1, %0, %H0, [%2]\n"
322" teq %1, #0\n"
323" bne 1b"
324 : "=&r" (result), "=&r" (tmp)
325 : "r" (&v->counter), "r" (i)
326 : "cc");
327}
328
329static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
330{
331 u64 result;
332 unsigned long tmp;
333
334 smp_mb();
335
336 __asm__ __volatile__("@ atomic64_sub_return\n"
337"1: ldrexd %0, %H0, [%2]\n"
338" subs %0, %0, %3\n"
339" sbc %H0, %H0, %H3\n"
340" strexd %1, %0, %H0, [%2]\n"
341" teq %1, #0\n"
342" bne 1b"
343 : "=&r" (result), "=&r" (tmp)
344 : "r" (&v->counter), "r" (i)
345 : "cc");
346
347 smp_mb();
348
349 return result;
350}
351
352static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
353{
354 u64 oldval;
355 unsigned long res;
356
357 smp_mb();
358
359 do {
360 __asm__ __volatile__("@ atomic64_cmpxchg\n"
361 "ldrexd %1, %H1, [%2]\n"
362 "mov %0, #0\n"
363 "teq %1, %3\n"
364 "teqeq %H1, %H3\n"
365 "strexdeq %0, %4, %H4, [%2]"
366 : "=&r" (res), "=&r" (oldval)
367 : "r" (&ptr->counter), "r" (old), "r" (new)
368 : "cc");
369 } while (res);
370
371 smp_mb();
372
373 return oldval;
374}
375
376static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
377{
378 u64 result;
379 unsigned long tmp;
380
381 smp_mb();
382
383 __asm__ __volatile__("@ atomic64_xchg\n"
384"1: ldrexd %0, %H0, [%2]\n"
385" strexd %1, %3, %H3, [%2]\n"
386" teq %1, #0\n"
387" bne 1b"
388 : "=&r" (result), "=&r" (tmp)
389 : "r" (&ptr->counter), "r" (new)
390 : "cc");
391
392 smp_mb();
393
394 return result;
395}
396
397static inline u64 atomic64_dec_if_positive(atomic64_t *v)
398{
399 u64 result;
400 unsigned long tmp;
401
402 smp_mb();
403
404 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
405"1: ldrexd %0, %H0, [%2]\n"
406" subs %0, %0, #1\n"
407" sbc %H0, %H0, #0\n"
408" teq %H0, #0\n"
409" bmi 2f\n"
410" strexd %1, %0, %H0, [%2]\n"
411" teq %1, #0\n"
412" bne 1b\n"
413"2:"
414 : "=&r" (result), "=&r" (tmp)
415 : "r" (&v->counter)
416 : "cc");
417
418 smp_mb();
419
420 return result;
421}
422
423static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
424{
425 u64 val;
426 unsigned long tmp;
427 int ret = 1;
428
429 smp_mb();
430
431 __asm__ __volatile__("@ atomic64_add_unless\n"
432"1: ldrexd %0, %H0, [%3]\n"
433" teq %0, %4\n"
434" teqeq %H0, %H4\n"
435" moveq %1, #0\n"
436" beq 2f\n"
437" adds %0, %0, %5\n"
438" adc %H0, %H0, %H5\n"
439" strexd %2, %0, %H0, [%3]\n"
440" teq %2, #0\n"
441" bne 1b\n"
442"2:"
443 : "=&r" (val), "=&r" (ret), "=&r" (tmp)
444 : "r" (&v->counter), "r" (u), "r" (a)
445 : "cc");
446
447 if (ret)
448 smp_mb();
449
450 return ret;
451}
452
453#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
454#define atomic64_inc(v) atomic64_add(1LL, (v))
455#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
456#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
457#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
458#define atomic64_dec(v) atomic64_sub(1LL, (v))
459#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
460#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
461#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
462
463#else /* !CONFIG_GENERIC_ATOMIC64 */
464#include <asm-generic/atomic64.h>
465#endif
238#include <asm-generic/atomic-long.h> 466#include <asm-generic/atomic-long.h>
239#endif 467#endif
240#endif 468#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 730aefcfbee..0d08d4170b6 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -15,6 +15,7 @@
15#include <asm/glue.h> 15#include <asm/glue.h>
16#include <asm/shmparam.h> 16#include <asm/shmparam.h>
17#include <asm/cachetype.h> 17#include <asm/cachetype.h>
18#include <asm/outercache.h>
18 19
19#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) 20#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
20 21
@@ -42,7 +43,8 @@
42#endif 43#endif
43 44
44#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ 45#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
45 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) 46 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
47 defined(CONFIG_CPU_ARM1026)
46# define MULTI_CACHE 1 48# define MULTI_CACHE 1
47#endif 49#endif
48 50
@@ -154,16 +156,16 @@
154 * Please note that the implementation of these, and the required 156 * Please note that the implementation of these, and the required
155 * effects are cache-type (VIVT/VIPT/PIPT) specific. 157 * effects are cache-type (VIVT/VIPT/PIPT) specific.
156 * 158 *
157 * flush_cache_kern_all() 159 * flush_kern_all()
158 * 160 *
159 * Unconditionally clean and invalidate the entire cache. 161 * Unconditionally clean and invalidate the entire cache.
160 * 162 *
161 * flush_cache_user_mm(mm) 163 * flush_user_all()
162 * 164 *
163 * Clean and invalidate all user space cache entries 165 * Clean and invalidate all user space cache entries
164 * before a change of page tables. 166 * before a change of page tables.
165 * 167 *
166 * flush_cache_user_range(start, end, flags) 168 * flush_user_range(start, end, flags)
167 * 169 *
168 * Clean and invalidate a range of cache entries in the 170 * Clean and invalidate a range of cache entries in the
169 * specified address space before a change of page tables. 171 * specified address space before a change of page tables.
@@ -179,23 +181,22 @@
179 * - start - virtual start address 181 * - start - virtual start address
180 * - end - virtual end address 182 * - end - virtual end address
181 * 183 *
182 * DMA Cache Coherency 184 * coherent_user_range(start, end)
183 * ===================
184 *
185 * dma_inv_range(start, end)
186 * 185 *
187 * Invalidate (discard) the specified virtual address range. 186 * Ensure coherency between the Icache and the Dcache in the
188 * May not write back any entries. If 'start' or 'end' 187 * region described by start, end. If you have non-snooping
189 * are not cache line aligned, those lines must be written 188 * Harvard caches, you need to implement this function.
190 * back.
191 * - start - virtual start address 189 * - start - virtual start address
192 * - end - virtual end address 190 * - end - virtual end address
193 * 191 *
194 * dma_clean_range(start, end) 192 * flush_kern_dcache_area(kaddr, size)
195 * 193 *
196 * Clean (write back) the specified virtual address range. 194 * Ensure that the data held in page is written back.
197 * - start - virtual start address 195 * - kaddr - page address
198 * - end - virtual end address 196 * - size - region size
197 *
198 * DMA Cache Coherency
199 * ===================
199 * 200 *
200 * dma_flush_range(start, end) 201 * dma_flush_range(start, end)
201 * 202 *
@@ -213,15 +214,10 @@ struct cpu_cache_fns {
213 void (*coherent_user_range)(unsigned long, unsigned long); 214 void (*coherent_user_range)(unsigned long, unsigned long);
214 void (*flush_kern_dcache_area)(void *, size_t); 215 void (*flush_kern_dcache_area)(void *, size_t);
215 216
216 void (*dma_inv_range)(const void *, const void *); 217 void (*dma_map_area)(const void *, size_t, int);
217 void (*dma_clean_range)(const void *, const void *); 218 void (*dma_unmap_area)(const void *, size_t, int);
218 void (*dma_flush_range)(const void *, const void *);
219};
220 219
221struct outer_cache_fns { 220 void (*dma_flush_range)(const void *, const void *);
222 void (*inv_range)(unsigned long, unsigned long);
223 void (*clean_range)(unsigned long, unsigned long);
224 void (*flush_range)(unsigned long, unsigned long);
225}; 221};
226 222
227/* 223/*
@@ -244,8 +240,8 @@ extern struct cpu_cache_fns cpu_cache;
244 * is visible to DMA, or data written by DMA to system memory is 240 * is visible to DMA, or data written by DMA to system memory is
245 * visible to the CPU. 241 * visible to the CPU.
246 */ 242 */
247#define dmac_inv_range cpu_cache.dma_inv_range 243#define dmac_map_area cpu_cache.dma_map_area
248#define dmac_clean_range cpu_cache.dma_clean_range 244#define dmac_unmap_area cpu_cache.dma_unmap_area
249#define dmac_flush_range cpu_cache.dma_flush_range 245#define dmac_flush_range cpu_cache.dma_flush_range
250 246
251#else 247#else
@@ -270,58 +266,23 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
270 * is visible to DMA, or data written by DMA to system memory is 266 * is visible to DMA, or data written by DMA to system memory is
271 * visible to the CPU. 267 * visible to the CPU.
272 */ 268 */
273#define dmac_inv_range __glue(_CACHE,_dma_inv_range) 269#define dmac_map_area __glue(_CACHE,_dma_map_area)
274#define dmac_clean_range __glue(_CACHE,_dma_clean_range) 270#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
275#define dmac_flush_range __glue(_CACHE,_dma_flush_range) 271#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
276 272
277extern void dmac_inv_range(const void *, const void *); 273extern void dmac_map_area(const void *, size_t, int);
278extern void dmac_clean_range(const void *, const void *); 274extern void dmac_unmap_area(const void *, size_t, int);
279extern void dmac_flush_range(const void *, const void *); 275extern void dmac_flush_range(const void *, const void *);
280 276
281#endif 277#endif
282 278
283#ifdef CONFIG_OUTER_CACHE
284
285extern struct outer_cache_fns outer_cache;
286
287static inline void outer_inv_range(unsigned long start, unsigned long end)
288{
289 if (outer_cache.inv_range)
290 outer_cache.inv_range(start, end);
291}
292static inline void outer_clean_range(unsigned long start, unsigned long end)
293{
294 if (outer_cache.clean_range)
295 outer_cache.clean_range(start, end);
296}
297static inline void outer_flush_range(unsigned long start, unsigned long end)
298{
299 if (outer_cache.flush_range)
300 outer_cache.flush_range(start, end);
301}
302
303#else
304
305static inline void outer_inv_range(unsigned long start, unsigned long end)
306{ }
307static inline void outer_clean_range(unsigned long start, unsigned long end)
308{ }
309static inline void outer_flush_range(unsigned long start, unsigned long end)
310{ }
311
312#endif
313
314/* 279/*
315 * Copy user data from/to a page which is mapped into a different 280 * Copy user data from/to a page which is mapped into a different
316 * processes address space. Really, we want to allow our "user 281 * processes address space. Really, we want to allow our "user
317 * space" model to handle this. 282 * space" model to handle this.
318 */ 283 */
319#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 284extern void copy_to_user_page(struct vm_area_struct *, struct page *,
320 do { \ 285 unsigned long, void *, const void *, unsigned long);
321 memcpy(dst, src, len); \
322 flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
323 } while (0)
324
325#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 286#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
326 do { \ 287 do { \
327 memcpy(dst, src, len); \ 288 memcpy(dst, src, len); \
@@ -355,17 +316,6 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
355 } 316 }
356} 317}
357 318
358static inline void
359vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
360 unsigned long uaddr, void *kaddr,
361 unsigned long len, int write)
362{
363 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
364 unsigned long addr = (unsigned long)kaddr;
365 __cpuc_coherent_kern_range(addr, addr + len);
366 }
367}
368
369#ifndef CONFIG_CPU_CACHE_VIPT 319#ifndef CONFIG_CPU_CACHE_VIPT
370#define flush_cache_mm(mm) \ 320#define flush_cache_mm(mm) \
371 vivt_flush_cache_mm(mm) 321 vivt_flush_cache_mm(mm)
@@ -373,15 +323,10 @@ vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
373 vivt_flush_cache_range(vma,start,end) 323 vivt_flush_cache_range(vma,start,end)
374#define flush_cache_page(vma,addr,pfn) \ 324#define flush_cache_page(vma,addr,pfn) \
375 vivt_flush_cache_page(vma,addr,pfn) 325 vivt_flush_cache_page(vma,addr,pfn)
376#define flush_ptrace_access(vma,page,ua,ka,len,write) \
377 vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
378#else 326#else
379extern void flush_cache_mm(struct mm_struct *mm); 327extern void flush_cache_mm(struct mm_struct *mm);
380extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 328extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
381extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); 329extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
382extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
383 unsigned long uaddr, void *kaddr,
384 unsigned long len, int write);
385#endif 330#endif
386 331
387#define flush_cache_dup_mm(mm) flush_cache_mm(mm) 332#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
@@ -432,6 +377,16 @@ static inline void __flush_icache_all(void)
432 : "r" (0)); 377 : "r" (0));
433#endif 378#endif
434} 379}
380static inline void flush_kernel_vmap_range(void *addr, int size)
381{
382 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
383 __cpuc_flush_dcache_area(addr, (size_t)size);
384}
385static inline void invalidate_kernel_vmap_range(void *addr, int size)
386{
387 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
388 __cpuc_flush_dcache_area(addr, (size_t)size);
389}
435 390
436#define ARCH_HAS_FLUSH_ANON_PAGE 391#define ARCH_HAS_FLUSH_ANON_PAGE
437static inline void flush_anon_page(struct vm_area_struct *vma, 392static inline void flush_anon_page(struct vm_area_struct *vma,
diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h
index b6ec7c627b3..b56c1389b6f 100644
--- a/arch/arm/include/asm/clkdev.h
+++ b/arch/arm/include/asm/clkdev.h
@@ -13,6 +13,7 @@
13#define __ASM_CLKDEV_H 13#define __ASM_CLKDEV_H
14 14
15struct clk; 15struct clk;
16struct device;
16 17
17struct clk_lookup { 18struct clk_lookup {
18 struct list_head node; 19 struct list_head node;
@@ -27,4 +28,7 @@ struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
27void clkdev_add(struct clk_lookup *cl); 28void clkdev_add(struct clk_lookup *cl);
28void clkdev_drop(struct clk_lookup *cl); 29void clkdev_drop(struct clk_lookup *cl);
29 30
31void clkdev_add_table(struct clk_lookup *, size_t);
32int clk_add_alias(const char *, const char *, char *, struct device *);
33
30#endif 34#endif
diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h
index 634b2d7c612..793968173be 100644
--- a/arch/arm/include/asm/cpu.h
+++ b/arch/arm/include/asm/cpu.h
@@ -11,6 +11,7 @@
11#define __ASM_ARM_CPU_H 11#define __ASM_ARM_CPU_H
12 12
13#include <linux/percpu.h> 13#include <linux/percpu.h>
14#include <linux/cpu.h>
14 15
15struct cpuinfo_arm { 16struct cpuinfo_arm {
16 struct cpu cpu; 17 struct cpu cpu;
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index a96300bf83f..69ce0727edb 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -57,18 +57,58 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
57#endif 57#endif
58 58
59/* 59/*
60 * DMA-consistent mapping functions. These allocate/free a region of 60 * The DMA API is built upon the notion of "buffer ownership". A buffer
61 * uncached, unwrite-buffered mapped memory space for use with DMA 61 * is either exclusively owned by the CPU (and therefore may be accessed
62 * devices. This is the "generic" version. The PCI specific version 62 * by it) or exclusively owned by the DMA device. These helper functions
63 * is in pci.h 63 * represent the transitions between these two ownership states.
64 * 64 *
65 * Note: Drivers should NOT use this function directly, as it will break 65 * Note, however, that on later ARMs, this notion does not work due to
66 * platforms with CONFIG_DMABOUNCE. 66 * speculative prefetches. We model our approach on the assumption that
67 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 67 * the CPU does do speculative prefetches, which means we clean caches
68 * before transfers and delay cache invalidation until transfer completion.
69 *
70 * Private support functions: these are not part of the API and are
71 * liable to change. Drivers must not use these.
68 */ 72 */
69extern void dma_cache_maint(const void *kaddr, size_t size, int rw); 73static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
70extern void dma_cache_maint_page(struct page *page, unsigned long offset, 74 enum dma_data_direction dir)
71 size_t size, int rw); 75{
76 extern void ___dma_single_cpu_to_dev(const void *, size_t,
77 enum dma_data_direction);
78
79 if (!arch_is_coherent())
80 ___dma_single_cpu_to_dev(kaddr, size, dir);
81}
82
83static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
84 enum dma_data_direction dir)
85{
86 extern void ___dma_single_dev_to_cpu(const void *, size_t,
87 enum dma_data_direction);
88
89 if (!arch_is_coherent())
90 ___dma_single_dev_to_cpu(kaddr, size, dir);
91}
92
93static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
94 size_t size, enum dma_data_direction dir)
95{
96 extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
97 size_t, enum dma_data_direction);
98
99 if (!arch_is_coherent())
100 ___dma_page_cpu_to_dev(page, off, size, dir);
101}
102
103static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
104 size_t size, enum dma_data_direction dir)
105{
106 extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
107 size_t, enum dma_data_direction);
108
109 if (!arch_is_coherent())
110 ___dma_page_dev_to_cpu(page, off, size, dir);
111}
72 112
73/* 113/*
74 * Return whether the given device DMA address mask can be supported 114 * Return whether the given device DMA address mask can be supported
@@ -88,6 +128,14 @@ static inline int dma_supported(struct device *dev, u64 mask)
88 128
89static inline int dma_set_mask(struct device *dev, u64 dma_mask) 129static inline int dma_set_mask(struct device *dev, u64 dma_mask)
90{ 130{
131#ifdef CONFIG_DMABOUNCE
132 if (dev->archdata.dmabounce) {
133 if (dma_mask >= ISA_DMA_THRESHOLD)
134 return 0;
135 else
136 return -EIO;
137 }
138#endif
91 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 139 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
92 return -EIO; 140 return -EIO;
93 141
@@ -304,8 +352,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
304{ 352{
305 BUG_ON(!valid_dma_direction(dir)); 353 BUG_ON(!valid_dma_direction(dir));
306 354
307 if (!arch_is_coherent()) 355 __dma_single_cpu_to_dev(cpu_addr, size, dir);
308 dma_cache_maint(cpu_addr, size, dir);
309 356
310 return virt_to_dma(dev, cpu_addr); 357 return virt_to_dma(dev, cpu_addr);
311} 358}
@@ -329,8 +376,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
329{ 376{
330 BUG_ON(!valid_dma_direction(dir)); 377 BUG_ON(!valid_dma_direction(dir));
331 378
332 if (!arch_is_coherent()) 379 __dma_page_cpu_to_dev(page, offset, size, dir);
333 dma_cache_maint_page(page, offset, size, dir);
334 380
335 return page_to_dma(dev, page) + offset; 381 return page_to_dma(dev, page) + offset;
336} 382}
@@ -352,7 +398,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
352static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, 398static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
353 size_t size, enum dma_data_direction dir) 399 size_t size, enum dma_data_direction dir)
354{ 400{
355 /* nothing to do */ 401 __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
356} 402}
357 403
358/** 404/**
@@ -372,7 +418,8 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
372static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, 418static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
373 size_t size, enum dma_data_direction dir) 419 size_t size, enum dma_data_direction dir)
374{ 420{
375 /* nothing to do */ 421 __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK,
422 size, dir);
376} 423}
377#endif /* CONFIG_DMABOUNCE */ 424#endif /* CONFIG_DMABOUNCE */
378 425
@@ -400,7 +447,10 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
400{ 447{
401 BUG_ON(!valid_dma_direction(dir)); 448 BUG_ON(!valid_dma_direction(dir));
402 449
403 dmabounce_sync_for_cpu(dev, handle, offset, size, dir); 450 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
451 return;
452
453 __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
404} 454}
405 455
406static inline void dma_sync_single_range_for_device(struct device *dev, 456static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -412,8 +462,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
412 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) 462 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
413 return; 463 return;
414 464
415 if (!arch_is_coherent()) 465 __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
416 dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
417} 466}
418 467
419static inline void dma_sync_single_for_cpu(struct device *dev, 468static inline void dma_sync_single_for_cpu(struct device *dev,
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
index 7edf3536df2..ca51143f97f 100644
--- a/arch/arm/include/asm/dma.h
+++ b/arch/arm/include/asm/dma.h
@@ -138,12 +138,12 @@ extern int get_dma_residue(unsigned int chan);
138#define NO_DMA 255 138#define NO_DMA 255
139#endif 139#endif
140 140
141#endif /* CONFIG_ISA_DMA_API */
142
141#ifdef CONFIG_PCI 143#ifdef CONFIG_PCI
142extern int isa_dma_bridge_buggy; 144extern int isa_dma_bridge_buggy;
143#else 145#else
144#define isa_dma_bridge_buggy (0) 146#define isa_dma_bridge_buggy (0)
145#endif 147#endif
146 148
147#endif /* CONFIG_ISA_DMA_API */
148
149#endif /* __ASM_ARM_DMA_H */ 149#endif /* __ASM_ARM_DMA_H */
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index a399bb5730f..bff056489cc 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -98,6 +98,7 @@ extern int elf_check_arch(const struct elf32_hdr *);
98extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int); 98extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
99#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk) 99#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk)
100 100
101struct task_struct;
101int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); 102int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
102#define ELF_CORE_COPY_TASK_REGS dump_task_regs 103#define ELF_CORE_COPY_TASK_REGS dump_task_regs
103 104
diff --git a/arch/arm/include/asm/entry-macro-vic2.S b/arch/arm/include/asm/entry-macro-vic2.S
new file mode 100644
index 00000000000..3ceb85e4385
--- /dev/null
+++ b/arch/arm/include/asm/entry-macro-vic2.S
@@ -0,0 +1,57 @@
1/* arch/arm/include/asm/entry-macro-vic2.S
2 *
3 * Originally arch/arm/mach-s3c6400/include/mach/entry-macro.S
4 *
5 * Copyright 2008 Openmoko, Inc.
6 * Copyright 2008 Simtec Electronics
7 * http://armlinux.simtec.co.uk/
8 * Ben Dooks <ben@simtec.co.uk>
9 *
10 * Low-level IRQ helper macros for a device with two VICs
11 *
12 * This file is licensed under the terms of the GNU General Public
13 * License version 2. This program is licensed "as is" without any
14 * warranty of any kind, whether express or implied.
15*/
16
17/* This should be included from <mach/entry-macro.S> with the necessary
18 * defines for virtual addresses and IRQ bases for the two vics.
19 *
20 * The code needs the following defined:
21 * IRQ_VIC0_BASE IRQ number of VIC0's first IRQ
22 * IRQ_VIC1_BASE IRQ number of VIC1's first IRQ
23 * VA_VIC0 Virtual address of VIC0
24 * VA_VIC1 Virtual address of VIC1
25 *
26 * Note, code assumes VIC0's virtual address is an ARM immediate constant
27 * away from VIC1.
28*/
29
30#include <asm/hardware/vic.h>
31
32 .macro disable_fiq
33 .endm
34
35 .macro get_irqnr_preamble, base, tmp
36 ldr \base, =VA_VIC0
37 .endm
38
39 .macro arch_ret_to_user, tmp1, tmp2
40 .endm
41
42 .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
43
44 @ check the vic0
45 mov \irqnr, #IRQ_VIC0_BASE + 31
46 ldr \irqstat, [ \base, # VIC_IRQ_STATUS ]
47 teq \irqstat, #0
48
49 @ otherwise try vic1
50 addeq \tmp, \base, #(VA_VIC1 - VA_VIC0)
51 addeq \irqnr, \irqnr, #(IRQ_VIC1_BASE - IRQ_VIC0_BASE)
52 ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
53 teqeq \irqstat, #0
54
55 clzne \irqstat, \irqstat
56 subne \irqnr, \irqnr, \irqstat
57 .endm
diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h
index 1a8c7279a28..9b28f1243bd 100644
--- a/arch/arm/include/asm/hardware/iop3xx-adma.h
+++ b/arch/arm/include/asm/hardware/iop3xx-adma.h
@@ -366,8 +366,7 @@ static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
366 slot_cnt += *slots_per_op; 366 slot_cnt += *slots_per_op;
367 } 367 }
368 368
369 if (len) 369 slot_cnt += *slots_per_op;
370 slot_cnt += *slots_per_op;
371 370
372 return slot_cnt; 371 return slot_cnt;
373} 372}
@@ -389,8 +388,7 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
389 slot_cnt += *slots_per_op; 388 slot_cnt += *slots_per_op;
390 } 389 }
391 390
392 if (len) 391 slot_cnt += *slots_per_op;
393 slot_cnt += *slots_per_op;
394 392
395 return slot_cnt; 393 return slot_cnt;
396} 394}
@@ -737,10 +735,8 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
737 i += slots_per_op; 735 i += slots_per_op;
738 } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT); 736 } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
739 737
740 if (len) { 738 iter = iop_hw_desc_slot_idx(hw_desc, i);
741 iter = iop_hw_desc_slot_idx(hw_desc, i); 739 iter->byte_count = len;
742 iter->byte_count = len;
743 }
744 } 740 }
745} 741}
746 742
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
index 74b5fff7f57..6700c7fc7eb 100644
--- a/arch/arm/include/asm/hardware/it8152.h
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -75,6 +75,18 @@ extern unsigned long it8152_base_address;
75 IT8152_PD_IRQ(1) USB (USBR) 75 IT8152_PD_IRQ(1) USB (USBR)
76 IT8152_PD_IRQ(0) Audio controller (ACR) 76 IT8152_PD_IRQ(0) Audio controller (ACR)
77 */ 77 */
78#define IT8152_IRQ(x) (IRQ_BOARD_END + (x))
79
80/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
81#define IT8152_LD_IRQ_COUNT 9
82#define IT8152_LP_IRQ_COUNT 16
83#define IT8152_PD_IRQ_COUNT 15
84
85/* Priorities: */
86#define IT8152_PD_IRQ(i) IT8152_IRQ(i)
87#define IT8152_LP_IRQ(i) (IT8152_IRQ(i) + IT8152_PD_IRQ_COUNT)
88#define IT8152_LD_IRQ(i) (IT8152_IRQ(i) + IT8152_PD_IRQ_COUNT + IT8152_LP_IRQ_COUNT)
89
78/* frequently used interrupts */ 90/* frequently used interrupts */
79#define IT8152_PCISERR IT8152_PD_IRQ(14) 91#define IT8152_PCISERR IT8152_PD_IRQ(14)
80#define IT8152_H2PTADR IT8152_PD_IRQ(13) 92#define IT8152_H2PTADR IT8152_PD_IRQ(13)
diff --git a/arch/arm/include/asm/hardware/locomo.h b/arch/arm/include/asm/hardware/locomo.h
index 954b1be991b..74e51d6bd93 100644
--- a/arch/arm/include/asm/hardware/locomo.h
+++ b/arch/arm/include/asm/hardware/locomo.h
@@ -214,4 +214,8 @@ void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int
214/* Frontlight control */ 214/* Frontlight control */
215void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf); 215void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf);
216 216
217struct locomo_platform_data {
218 int irq_base; /* IRQ base for cascaded on-chip IRQs */
219};
220
217#endif 221#endif
diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h
index 5da2595759e..92ed254c175 100644
--- a/arch/arm/include/asm/hardware/sa1111.h
+++ b/arch/arm/include/asm/hardware/sa1111.h
@@ -578,4 +578,8 @@ void sa1111_set_io_dir(struct sa1111_dev *sadev, unsigned int bits, unsigned int
578void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v); 578void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v);
579void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v); 579void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v);
580 580
581struct sa1111_platform_data {
582 int irq_base; /* base for cascaded on-chip IRQs */
583};
584
581#endif /* _ASM_ARCH_SA1111 */ 585#endif /* _ASM_ARCH_SA1111 */
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 7f36d00600b..feb988a7ec3 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -11,7 +11,11 @@
11 11
12#define kmap_prot PAGE_KERNEL 12#define kmap_prot PAGE_KERNEL
13 13
14#define flush_cache_kmaps() flush_cache_all() 14#define flush_cache_kmaps() \
15 do { \
16 if (cache_is_vivt()) \
17 flush_cache_all(); \
18 } while (0)
15 19
16extern pte_t *pkmap_page_table; 20extern pte_t *pkmap_page_table;
17 21
@@ -21,11 +25,20 @@ extern void *kmap_high(struct page *page);
21extern void *kmap_high_get(struct page *page); 25extern void *kmap_high_get(struct page *page);
22extern void kunmap_high(struct page *page); 26extern void kunmap_high(struct page *page);
23 27
28extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte);
29extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
30
31/*
32 * The following functions are already defined by <linux/highmem.h>
33 * when CONFIG_HIGHMEM is not set.
34 */
35#ifdef CONFIG_HIGHMEM
24extern void *kmap(struct page *page); 36extern void *kmap(struct page *page);
25extern void kunmap(struct page *page); 37extern void kunmap(struct page *page);
26extern void *kmap_atomic(struct page *page, enum km_type type); 38extern void *kmap_atomic(struct page *page, enum km_type type);
27extern void kunmap_atomic(void *kvaddr, enum km_type type); 39extern void kunmap_atomic(void *kvaddr, enum km_type type);
28extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 40extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
29extern struct page *kmap_atomic_to_page(const void *ptr); 41extern struct page *kmap_atomic_to_page(const void *ptr);
42#endif
30 43
31#endif 44#endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index d2a59cfc30c..c980156f326 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -69,9 +69,16 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
69/* 69/*
70 * __arm_ioremap takes CPU physical address. 70 * __arm_ioremap takes CPU physical address.
71 * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page 71 * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page
72 * The _caller variety takes a __builtin_return_address(0) value for
73 * /proc/vmalloc to use - and should only be used in non-inline functions.
72 */ 74 */
73extern void __iomem * __arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); 75extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
74extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int); 76 size_t, unsigned int, void *);
77extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int,
78 void *);
79
80extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
81extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int);
75extern void __iounmap(volatile void __iomem *addr); 82extern void __iounmap(volatile void __iomem *addr);
76 83
77/* 84/*
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 328f14a8b79..237282f7c76 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -17,6 +17,7 @@
17 17
18#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
19struct irqaction; 19struct irqaction;
20struct pt_regs;
20extern void migrate_irqs(void); 21extern void migrate_irqs(void);
21 22
22extern void asm_do_IRQ(unsigned int, struct pt_regs *); 23extern void asm_do_IRQ(unsigned int, struct pt_regs *);
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index c019949a518..c4b2ea3fbe4 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -18,6 +18,7 @@ enum km_type {
18 KM_IRQ1, 18 KM_IRQ1,
19 KM_SOFTIRQ0, 19 KM_SOFTIRQ0,
20 KM_SOFTIRQ1, 20 KM_SOFTIRQ1,
21 KM_L1_CACHE,
21 KM_L2_CACHE, 22 KM_L2_CACHE,
22 KM_TYPE_NR 23 KM_TYPE_NR
23}; 24};
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index b2cc1fcd040..8bffc3ff3ac 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -46,12 +46,4 @@ struct sys_timer {
46extern struct sys_timer *system_timer; 46extern struct sys_timer *system_timer;
47extern void timer_tick(void); 47extern void timer_tick(void);
48 48
49/*
50 * Kernel time keeping support.
51 */
52struct timespec;
53extern int (*set_rtc)(void);
54extern void save_time_delta(struct timespec *delta, struct timespec *rtc);
55extern void restore_time_delta(struct timespec *delta, struct timespec *rtc);
56
57#endif 49#endif
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 5421d82a257..4312ee5e3d0 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -76,6 +76,17 @@
76 */ 76 */
77#define IOREMAP_MAX_ORDER 24 77#define IOREMAP_MAX_ORDER 24
78 78
79/*
80 * Size of DMA-consistent memory region. Must be multiple of 2M,
81 * between 2MB and 14MB inclusive.
82 */
83#ifndef CONSISTENT_DMA_SIZE
84#define CONSISTENT_DMA_SIZE SZ_2M
85#endif
86
87#define CONSISTENT_END (0xffe00000UL)
88#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
89
79#else /* CONFIG_MMU */ 90#else /* CONFIG_MMU */
80 91
81/* 92/*
@@ -93,11 +104,11 @@
93#endif 104#endif
94 105
95#ifndef PHYS_OFFSET 106#ifndef PHYS_OFFSET
96#define PHYS_OFFSET (CONFIG_DRAM_BASE) 107#define PHYS_OFFSET UL(CONFIG_DRAM_BASE)
97#endif 108#endif
98 109
99#ifndef END_MEM 110#ifndef END_MEM
100#define END_MEM (CONFIG_DRAM_BASE + CONFIG_DRAM_SIZE) 111#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
101#endif 112#endif
102 113
103#ifndef PAGE_OFFSET 114#ifndef PAGE_OFFSET
@@ -113,14 +124,6 @@
113#endif /* !CONFIG_MMU */ 124#endif /* !CONFIG_MMU */
114 125
115/* 126/*
116 * Size of DMA-consistent memory region. Must be multiple of 2M,
117 * between 2MB and 14MB inclusive.
118 */
119#ifndef CONSISTENT_DMA_SIZE
120#define CONSISTENT_DMA_SIZE SZ_2M
121#endif
122
123/*
124 * Physical vs virtual RAM address space conversion. These are 127 * Physical vs virtual RAM address space conversion. These are
125 * private definitions which should NOT be used outside memory.h 128 * private definitions which should NOT be used outside memory.h
126 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. 129 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index b561584d04a..68870c77667 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -6,6 +6,7 @@
6typedef struct { 6typedef struct {
7#ifdef CONFIG_CPU_HAS_ASID 7#ifdef CONFIG_CPU_HAS_ASID
8 unsigned int id; 8 unsigned int id;
9 spinlock_t id_lock;
9#endif 10#endif
10 unsigned int kvm_seq; 11 unsigned int kvm_seq;
11} mm_context_t; 12} mm_context_t;
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index de6cefb329d..a0b3cac0547 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -43,12 +43,23 @@ void __check_kvm_seq(struct mm_struct *mm);
43#define ASID_FIRST_VERSION (1 << ASID_BITS) 43#define ASID_FIRST_VERSION (1 << ASID_BITS)
44 44
45extern unsigned int cpu_last_asid; 45extern unsigned int cpu_last_asid;
46#ifdef CONFIG_SMP
47DECLARE_PER_CPU(struct mm_struct *, current_mm);
48#endif
46 49
47void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); 50void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
48void __new_context(struct mm_struct *mm); 51void __new_context(struct mm_struct *mm);
49 52
50static inline void check_context(struct mm_struct *mm) 53static inline void check_context(struct mm_struct *mm)
51{ 54{
55 /*
56 * This code is executed with interrupts enabled. Therefore,
57 * mm->context.id cannot be updated to the latest ASID version
58 * on a different CPU (and condition below not triggered)
59 * without first getting an IPI to reset the context. The
60 * alternative is to take a read_lock on mm->context.id_lock
61 * (after changing its type to rwlock_t).
62 */
52 if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) 63 if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
53 __new_context(mm); 64 __new_context(mm);
54 65
@@ -108,6 +119,10 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
108 __flush_icache_all(); 119 __flush_icache_all();
109#endif 120#endif
110 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { 121 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
122#ifdef CONFIG_SMP
123 struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
124 *crt_mm = next;
125#endif
111 check_context(next); 126 check_context(next);
112 cpu_switch_mm(next->pgd, next); 127 cpu_switch_mm(next->pgd, next);
113 if (cache_is_vivt()) 128 if (cache_is_vivt())
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
new file mode 100644
index 00000000000..25f76bae57a
--- /dev/null
+++ b/arch/arm/include/asm/outercache.h
@@ -0,0 +1,75 @@
1/*
2 * arch/arm/include/asm/outercache.h
3 *
4 * Copyright (C) 2010 ARM Ltd.
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef __ASM_OUTERCACHE_H
22#define __ASM_OUTERCACHE_H
23
24struct outer_cache_fns {
25 void (*inv_range)(unsigned long, unsigned long);
26 void (*clean_range)(unsigned long, unsigned long);
27 void (*flush_range)(unsigned long, unsigned long);
28#ifdef CONFIG_OUTER_CACHE_SYNC
29 void (*sync)(void);
30#endif
31};
32
33#ifdef CONFIG_OUTER_CACHE
34
35extern struct outer_cache_fns outer_cache;
36
37static inline void outer_inv_range(unsigned long start, unsigned long end)
38{
39 if (outer_cache.inv_range)
40 outer_cache.inv_range(start, end);
41}
42static inline void outer_clean_range(unsigned long start, unsigned long end)
43{
44 if (outer_cache.clean_range)
45 outer_cache.clean_range(start, end);
46}
47static inline void outer_flush_range(unsigned long start, unsigned long end)
48{
49 if (outer_cache.flush_range)
50 outer_cache.flush_range(start, end);
51}
52
53#else
54
55static inline void outer_inv_range(unsigned long start, unsigned long end)
56{ }
57static inline void outer_clean_range(unsigned long start, unsigned long end)
58{ }
59static inline void outer_flush_range(unsigned long start, unsigned long end)
60{ }
61
62#endif
63
64#ifdef CONFIG_OUTER_CACHE_SYNC
65static inline void outer_sync(void)
66{
67 if (outer_cache.sync)
68 outer_cache.sync();
69}
70#else
71static inline void outer_sync(void)
72{ }
73#endif
74
75#endif /* __ASM_OUTERCACHE_H */
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 3a32af4cce3..a485ac3c869 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -117,11 +117,12 @@
117#endif 117#endif
118 118
119struct page; 119struct page;
120struct vm_area_struct;
120 121
121struct cpu_user_fns { 122struct cpu_user_fns {
122 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); 123 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
123 void (*cpu_copy_user_highpage)(struct page *to, struct page *from, 124 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
124 unsigned long vaddr); 125 unsigned long vaddr, struct vm_area_struct *vma);
125}; 126};
126 127
127#ifdef MULTI_USER 128#ifdef MULTI_USER
@@ -137,7 +138,7 @@ extern struct cpu_user_fns cpu_user;
137 138
138extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); 139extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
139extern void __cpu_copy_user_highpage(struct page *to, struct page *from, 140extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
140 unsigned long vaddr); 141 unsigned long vaddr, struct vm_area_struct *vma);
141#endif 142#endif
142 143
143#define clear_user_highpage(page,vaddr) \ 144#define clear_user_highpage(page,vaddr) \
@@ -145,7 +146,7 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
145 146
146#define __HAVE_ARCH_COPY_USER_HIGHPAGE 147#define __HAVE_ARCH_COPY_USER_HIGHPAGE
147#define copy_user_highpage(to,from,vaddr,vma) \ 148#define copy_user_highpage(to,from,vaddr,vma) \
148 __cpu_copy_user_highpage(to, from, vaddr) 149 __cpu_copy_user_highpage(to, from, vaddr, vma)
149 150
150#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) 151#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
151extern void copy_page(void *to, const void *from); 152extern void copy_page(void *to, const void *from);
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index 226cddd2fb6..47980118d0a 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -30,17 +30,6 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
30 */ 30 */
31#define PCI_DMA_BUS_IS_PHYS (1) 31#define PCI_DMA_BUS_IS_PHYS (1)
32 32
33/*
34 * Whether pci_unmap_{single,page} is a nop depends upon the
35 * configuration.
36 */
37#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME;
38#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME;
39#define pci_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
40#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
41#define pci_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
42#define pci_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
43
44#ifdef CONFIG_PCI 33#ifdef CONFIG_PCI
45static inline void pci_dma_burst_advice(struct pci_dev *pdev, 34static inline void pci_dma_burst_advice(struct pci_dev *pdev,
46 enum pci_dma_burst_strategy *strat, 35 enum pci_dma_burst_strategy *strat,
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
new file mode 100644
index 00000000000..49e3049aba3
--- /dev/null
+++ b/arch/arm/include/asm/perf_event.h
@@ -0,0 +1,31 @@
1/*
2 * linux/arch/arm/include/asm/perf_event.h
3 *
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#ifndef __ARM_PERF_EVENT_H__
13#define __ARM_PERF_EVENT_H__
14
15/*
16 * NOP: on *most* (read: all supported) ARM platforms, the performance
17 * counter interrupts are regular interrupts and not an NMI. This
18 * means that when we receive the interrupt we can call
19 * perf_event_do_pending() that handles all of the work with
20 * interrupts enabled.
21 */
22static inline void
23set_perf_event_pending(void)
24{
25}
26
27/* ARM performance counters start from 1 (in the cp15 accesses) so use the
28 * same indexes here for consistency. */
29#define PERF_EVENT_INDEX_OFFSET 1
30
31#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index b011f2e939a..ffc0e85775b 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -67,6 +67,7 @@ static inline int pte_file(pte_t pte) { return 0; }
67 */ 67 */
68#define pgprot_noncached(prot) __pgprot(0) 68#define pgprot_noncached(prot) __pgprot(0)
69#define pgprot_writecombine(prot) __pgprot(0) 69#define pgprot_writecombine(prot) __pgprot(0)
70#define pgprot_dmacoherent(prot) __pgprot(0)
70 71
71 72
72/* 73/*
@@ -86,8 +87,8 @@ extern unsigned int kobjsize(const void *objp);
86 * All 32bit addresses are effectively valid for vmalloc... 87 * All 32bit addresses are effectively valid for vmalloc...
87 * Sort of meaningless for non-VM targets. 88 * Sort of meaningless for non-VM targets.
88 */ 89 */
89#define VMALLOC_START 0 90#define VMALLOC_START 0UL
90#define VMALLOC_END 0xffffffff 91#define VMALLOC_END 0xffffffffUL
91 92
92#define FIRST_USER_ADDRESS (0) 93#define FIRST_USER_ADDRESS (0)
93 94
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
new file mode 100644
index 00000000000..2829b9f981a
--- /dev/null
+++ b/arch/arm/include/asm/pmu.h
@@ -0,0 +1,75 @@
1/*
2 * linux/arch/arm/include/asm/pmu.h
3 *
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#ifndef __ARM_PMU_H__
13#define __ARM_PMU_H__
14
15#ifdef CONFIG_CPU_HAS_PMU
16
17struct pmu_irqs {
18 const int *irqs;
19 int num_irqs;
20};
21
22/**
23 * reserve_pmu() - reserve the hardware performance counters
24 *
25 * Reserve the hardware performance counters in the system for exclusive use.
26 * The 'struct pmu_irqs' for the system is returned on success, ERR_PTR()
27 * encoded error on failure.
28 */
29extern const struct pmu_irqs *
30reserve_pmu(void);
31
32/**
33 * release_pmu() - Relinquish control of the performance counters
34 *
35 * Release the performance counters and allow someone else to use them.
36 * Callers must have disabled the counters and released IRQs before calling
37 * this. The 'struct pmu_irqs' returned from reserve_pmu() must be passed as
38 * a cookie.
39 */
40extern int
41release_pmu(const struct pmu_irqs *irqs);
42
43/**
44 * init_pmu() - Initialise the PMU.
45 *
46 * Initialise the system ready for PMU enabling. This should typically set the
47 * IRQ affinity and nothing else. The users (oprofile/perf events etc) will do
48 * the actual hardware initialisation.
49 */
50extern int
51init_pmu(void);
52
53#else /* CONFIG_CPU_HAS_PMU */
54
55static inline const struct pmu_irqs *
56reserve_pmu(void)
57{
58 return ERR_PTR(-ENODEV);
59}
60
61static inline int
62release_pmu(const struct pmu_irqs *irqs)
63{
64 return -ENODEV;
65}
66
67static inline int
68init_pmu(void)
69{
70 return -ENODEV;
71}
72
73#endif /* CONFIG_CPU_HAS_PMU */
74
75#endif /* __ARM_PMU_H__ */
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index bbecccda76d..9dcb11e5902 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -97,9 +97,15 @@
97 * stack during a system call. Note that sizeof(struct pt_regs) 97 * stack during a system call. Note that sizeof(struct pt_regs)
98 * has to be a multiple of 8. 98 * has to be a multiple of 8.
99 */ 99 */
100#ifndef __KERNEL__
100struct pt_regs { 101struct pt_regs {
101 long uregs[18]; 102 long uregs[18];
102}; 103};
104#else /* __KERNEL__ */
105struct pt_regs {
106 unsigned long uregs[18];
107};
108#endif /* __KERNEL__ */
103 109
104#define ARM_cpsr uregs[16] 110#define ARM_cpsr uregs[16]
105#define ARM_pc uregs[15] 111#define ARM_pc uregs[15]
@@ -122,6 +128,8 @@ struct pt_regs {
122 128
123#ifdef __KERNEL__ 129#ifdef __KERNEL__
124 130
131#define arch_has_single_step() (1)
132
125#define user_mode(regs) \ 133#define user_mode(regs) \
126 (((regs)->ARM_cpsr & 0xf) == 0) 134 (((regs)->ARM_cpsr & 0xf) == 0)
127 135
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 5ccce0a9b03..f392fb4437a 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -223,18 +223,6 @@ extern struct meminfo meminfo;
223#define bank_phys_end(bank) ((bank)->start + (bank)->size) 223#define bank_phys_end(bank) ((bank)->start + (bank)->size)
224#define bank_phys_size(bank) (bank)->size 224#define bank_phys_size(bank) (bank)->size
225 225
226/*
227 * Early command line parameters.
228 */
229struct early_params {
230 const char *arg;
231 void (*fn)(char **p);
232};
233
234#define __early_param(name,fn) \
235static struct early_params __early_##fn __used \
236__attribute__((__section__(".early_param.init"))) = { name, fn }
237
238#endif /* __KERNEL__ */ 226#endif /* __KERNEL__ */
239 227
240#endif 228#endif
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 59303e20084..e6215305544 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -13,4 +13,9 @@ static inline int tlb_ops_need_broadcast(void)
13 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; 13 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
14} 14}
15 15
16static inline int cache_ops_need_broadcast(void)
17{
18 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1;
19}
20
16#endif 21#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index c91c64cab92..17eb355707d 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,6 +5,22 @@
5#error SMP not supported on pre-ARMv6 CPUs 5#error SMP not supported on pre-ARMv6 CPUs
6#endif 6#endif
7 7
8static inline void dsb_sev(void)
9{
10#if __LINUX_ARM_ARCH__ >= 7
11 __asm__ __volatile__ (
12 "dsb\n"
13 "sev"
14 );
15#elif defined(CONFIG_CPU_32v6K)
16 __asm__ __volatile__ (
17 "mcr p15, 0, %0, c7, c10, 4\n"
18 "sev"
19 : : "r" (0)
20 );
21#endif
22}
23
8/* 24/*
9 * ARMv6 Spin-locking. 25 * ARMv6 Spin-locking.
10 * 26 *
@@ -69,13 +85,11 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
69 85
70 __asm__ __volatile__( 86 __asm__ __volatile__(
71" str %1, [%0]\n" 87" str %1, [%0]\n"
72#ifdef CONFIG_CPU_32v6K
73" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */
74" sev"
75#endif
76 : 88 :
77 : "r" (&lock->lock), "r" (0) 89 : "r" (&lock->lock), "r" (0)
78 : "cc"); 90 : "cc");
91
92 dsb_sev();
79} 93}
80 94
81/* 95/*
@@ -132,13 +146,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
132 146
133 __asm__ __volatile__( 147 __asm__ __volatile__(
134 "str %1, [%0]\n" 148 "str %1, [%0]\n"
135#ifdef CONFIG_CPU_32v6K
136" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */
137" sev\n"
138#endif
139 : 149 :
140 : "r" (&rw->lock), "r" (0) 150 : "r" (&rw->lock), "r" (0)
141 : "cc"); 151 : "cc");
152
153 dsb_sev();
142} 154}
143 155
144/* write_can_lock - would write_trylock() succeed? */ 156/* write_can_lock - would write_trylock() succeed? */
@@ -188,14 +200,12 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
188" strex %1, %0, [%2]\n" 200" strex %1, %0, [%2]\n"
189" teq %1, #0\n" 201" teq %1, #0\n"
190" bne 1b" 202" bne 1b"
191#ifdef CONFIG_CPU_32v6K
192"\n cmp %0, #0\n"
193" mcreq p15, 0, %0, c7, c10, 4\n"
194" seveq"
195#endif
196 : "=&r" (tmp), "=&r" (tmp2) 203 : "=&r" (tmp), "=&r" (tmp2)
197 : "r" (&rw->lock) 204 : "r" (&rw->lock)
198 : "cc"); 205 : "cc");
206
207 if (tmp == 0)
208 dsb_sev();
199} 209}
200 210
201static inline int arch_read_trylock(arch_rwlock_t *rw) 211static inline int arch_read_trylock(arch_rwlock_t *rw)
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 058e7e90881..4ace45ec3ef 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -60,6 +60,8 @@
60#include <linux/linkage.h> 60#include <linux/linkage.h>
61#include <linux/irqflags.h> 61#include <linux/irqflags.h>
62 62
63#include <asm/outercache.h>
64
63#define __exception __attribute__((section(".exception.text"))) 65#define __exception __attribute__((section(".exception.text")))
64 66
65struct thread_info; 67struct thread_info;
@@ -73,8 +75,7 @@ extern unsigned int mem_fclk_21285;
73 75
74struct pt_regs; 76struct pt_regs;
75 77
76void die(const char *msg, struct pt_regs *regs, int err) 78void die(const char *msg, struct pt_regs *regs, int err);
77 __attribute__((noreturn));
78 79
79struct siginfo; 80struct siginfo;
80void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, 81void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
@@ -138,10 +139,12 @@ extern unsigned int user_debug;
138#define dmb() __asm__ __volatile__ ("" : : : "memory") 139#define dmb() __asm__ __volatile__ ("" : : : "memory")
139#endif 140#endif
140 141
141#if __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP) 142#ifdef CONFIG_ARCH_HAS_BARRIERS
142#define mb() dmb() 143#include <mach/barriers.h>
144#elif __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
145#define mb() do { dsb(); outer_sync(); } while (0)
143#define rmb() dmb() 146#define rmb() dmb()
144#define wmb() dmb() 147#define wmb() mb()
145#else 148#else
146#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 149#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
147#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 150#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
@@ -153,9 +156,9 @@ extern unsigned int user_debug;
153#define smp_rmb() barrier() 156#define smp_rmb() barrier()
154#define smp_wmb() barrier() 157#define smp_wmb() barrier()
155#else 158#else
156#define smp_mb() mb() 159#define smp_mb() dmb()
157#define smp_rmb() rmb() 160#define smp_rmb() dmb()
158#define smp_wmb() wmb() 161#define smp_wmb() dmb()
159#endif 162#endif
160 163
161#define read_barrier_depends() do { } while(0) 164#define read_barrier_depends() do { } while(0)
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 2dfb7d7a66e..b74970ec02c 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -115,7 +115,8 @@ extern void iwmmxt_task_restore(struct thread_info *, void *);
115extern void iwmmxt_task_release(struct thread_info *); 115extern void iwmmxt_task_release(struct thread_info *);
116extern void iwmmxt_task_switch(struct thread_info *); 116extern void iwmmxt_task_switch(struct thread_info *);
117 117
118extern void vfp_sync_state(struct thread_info *thread); 118extern void vfp_sync_hwstate(struct thread_info *);
119extern void vfp_flush_hwstate(struct thread_info *);
119 120
120#endif 121#endif
121 122
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index c2f1605de35..e085e2c545e 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -529,7 +529,8 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
529 * cache entries for the kernels virtual memory range are written 529 * cache entries for the kernels virtual memory range are written
530 * back to the page. 530 * back to the page.
531 */ 531 */
532extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); 532extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
533 pte_t *ptep);
533 534
534#endif 535#endif
535 536
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h
index bf65e9f4525..47f023aa849 100644
--- a/arch/arm/include/asm/ucontext.h
+++ b/arch/arm/include/asm/ucontext.h
@@ -59,23 +59,22 @@ struct iwmmxt_sigframe {
59#endif /* CONFIG_IWMMXT */ 59#endif /* CONFIG_IWMMXT */
60 60
61#ifdef CONFIG_VFP 61#ifdef CONFIG_VFP
62#if __LINUX_ARM_ARCH__ < 6
63/* For ARM pre-v6, we use fstmiax and fldmiax. This adds one extra
64 * word after the registers, and a word of padding at the end for
65 * alignment. */
66#define VFP_MAGIC 0x56465001 62#define VFP_MAGIC 0x56465001
67#define VFP_STORAGE_SIZE 152
68#else
69#define VFP_MAGIC 0x56465002
70#define VFP_STORAGE_SIZE 144
71#endif
72 63
73struct vfp_sigframe 64struct vfp_sigframe
74{ 65{
75 unsigned long magic; 66 unsigned long magic;
76 unsigned long size; 67 unsigned long size;
77 union vfp_state storage; 68 struct user_vfp ufp;
78}; 69 struct user_vfp_exc ufp_exc;
70} __attribute__((__aligned__(8)));
71
72/*
73 * 8 byte for magic and size, 264 byte for ufp, 12 bytes for ufp_exc,
74 * 4 bytes padding.
75 */
76#define VFP_STORAGE_SIZE sizeof(struct vfp_sigframe)
77
79#endif /* CONFIG_VFP */ 78#endif /* CONFIG_VFP */
80 79
81/* 80/*
@@ -91,7 +90,7 @@ struct aux_sigframe {
91#ifdef CONFIG_IWMMXT 90#ifdef CONFIG_IWMMXT
92 struct iwmmxt_sigframe iwmmxt; 91 struct iwmmxt_sigframe iwmmxt;
93#endif 92#endif
94#if 0 && defined CONFIG_VFP /* Not yet saved. */ 93#ifdef CONFIG_VFP
95 struct vfp_sigframe vfp; 94 struct vfp_sigframe vfp;
96#endif 95#endif
97 /* Something that isn't a valid magic number for any coprocessor. */ 96 /* Something that isn't a valid magic number for any coprocessor. */
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 4e506d09e5f..dd2bf53000f 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -391,6 +391,7 @@
391#define __NR_pwritev (__NR_SYSCALL_BASE+362) 391#define __NR_pwritev (__NR_SYSCALL_BASE+362)
392#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363) 392#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363)
393#define __NR_perf_event_open (__NR_SYSCALL_BASE+364) 393#define __NR_perf_event_open (__NR_SYSCALL_BASE+364)
394#define __NR_recvmmsg (__NR_SYSCALL_BASE+365)
394 395
395/* 396/*
396 * The following SWIs are ARM private. 397 * The following SWIs are ARM private.
@@ -442,9 +443,12 @@
442#define __ARCH_WANT_SYS_SIGPROCMASK 443#define __ARCH_WANT_SYS_SIGPROCMASK
443#define __ARCH_WANT_SYS_RT_SIGACTION 444#define __ARCH_WANT_SYS_RT_SIGACTION
444#define __ARCH_WANT_SYS_RT_SIGSUSPEND 445#define __ARCH_WANT_SYS_RT_SIGSUSPEND
446#define __ARCH_WANT_SYS_OLD_MMAP
447#define __ARCH_WANT_SYS_OLD_SELECT
445 448
446#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) 449#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT)
447#define __ARCH_WANT_SYS_TIME 450#define __ARCH_WANT_SYS_TIME
451#define __ARCH_WANT_SYS_IPC
448#define __ARCH_WANT_SYS_OLDUMOUNT 452#define __ARCH_WANT_SYS_OLDUMOUNT
449#define __ARCH_WANT_SYS_ALARM 453#define __ARCH_WANT_SYS_ALARM
450#define __ARCH_WANT_SYS_UTIME 454#define __ARCH_WANT_SYS_UTIME
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h
index df95e050f9d..05ac4b06876 100644
--- a/arch/arm/include/asm/user.h
+++ b/arch/arm/include/asm/user.h
@@ -83,11 +83,21 @@ struct user{
83 83
84/* 84/*
85 * User specific VFP registers. If only VFPv2 is present, registers 16 to 31 85 * User specific VFP registers. If only VFPv2 is present, registers 16 to 31
86 * are ignored by the ptrace system call. 86 * are ignored by the ptrace system call and the signal handler.
87 */ 87 */
88struct user_vfp { 88struct user_vfp {
89 unsigned long long fpregs[32]; 89 unsigned long long fpregs[32];
90 unsigned long fpscr; 90 unsigned long fpscr;
91}; 91};
92 92
93/*
94 * VFP exception registers exposed to user space during signal delivery.
95 * Fields not relavant to the current VFP architecture are ignored.
96 */
97struct user_vfp_exc {
98 unsigned long fpexc;
99 unsigned long fpinst;
100 unsigned long fpinst2;
101};
102
93#endif /* _ARM_USER_H */ 103#endif /* _ARM_USER_H */