aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-12-04 09:59:47 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-12-04 09:59:47 -0500
commit5cb2faa6ede7ada9cb2bffc832c4ce60f53d6834 (patch)
tree7b72b66081d042a41dc822575503133364857ce2 /arch/arm/include/asm
parente0ee98513d1a2e24d2ddbdecf4216bcca29d1158 (diff)
parent6060e8df517847bf445ebc61de7d4d9c7faae990 (diff)
Merge branch 'pending-misc' (early part) into devel
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/cacheflush.h27
-rw-r--r--arch/arm/include/asm/elf.h3
-rw-r--r--arch/arm/include/asm/kmap_types.h6
-rw-r--r--arch/arm/include/asm/tlbflush.h3
-rw-r--r--arch/arm/include/asm/unistd.h9
5 files changed, 40 insertions, 8 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index fd03fb63a332..9fd6d3ab68c0 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -331,15 +331,15 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
331 * Convert calls to our calling convention. 331 * Convert calls to our calling convention.
332 */ 332 */
333#define flush_cache_all() __cpuc_flush_kern_all() 333#define flush_cache_all() __cpuc_flush_kern_all()
334#ifndef CONFIG_CPU_CACHE_VIPT 334
335static inline void flush_cache_mm(struct mm_struct *mm) 335static inline void vivt_flush_cache_mm(struct mm_struct *mm)
336{ 336{
337 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) 337 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
338 __cpuc_flush_user_all(); 338 __cpuc_flush_user_all();
339} 339}
340 340
341static inline void 341static inline void
342flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 342vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
343{ 343{
344 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) 344 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
345 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), 345 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
@@ -347,7 +347,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
347} 347}
348 348
349static inline void 349static inline void
350flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 350vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
351{ 351{
352 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 352 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
353 unsigned long addr = user_addr & PAGE_MASK; 353 unsigned long addr = user_addr & PAGE_MASK;
@@ -356,7 +356,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned l
356} 356}
357 357
358static inline void 358static inline void
359flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 359vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
360 unsigned long uaddr, void *kaddr, 360 unsigned long uaddr, void *kaddr,
361 unsigned long len, int write) 361 unsigned long len, int write)
362{ 362{
@@ -365,6 +365,16 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
365 __cpuc_coherent_kern_range(addr, addr + len); 365 __cpuc_coherent_kern_range(addr, addr + len);
366 } 366 }
367} 367}
368
369#ifndef CONFIG_CPU_CACHE_VIPT
370#define flush_cache_mm(mm) \
371 vivt_flush_cache_mm(mm)
372#define flush_cache_range(vma,start,end) \
373 vivt_flush_cache_range(vma,start,end)
374#define flush_cache_page(vma,addr,pfn) \
375 vivt_flush_cache_page(vma,addr,pfn)
376#define flush_ptrace_access(vma,page,ua,ka,len,write) \
377 vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
368#else 378#else
369extern void flush_cache_mm(struct mm_struct *mm); 379extern void flush_cache_mm(struct mm_struct *mm);
370extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 380extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
@@ -410,13 +420,16 @@ extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
410 */ 420 */
411extern void flush_dcache_page(struct page *); 421extern void flush_dcache_page(struct page *);
412 422
413extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
414
415static inline void __flush_icache_all(void) 423static inline void __flush_icache_all(void)
416{ 424{
425#ifdef CONFIG_ARM_ERRATA_411920
426 extern void v6_icache_inval_all(void);
427 v6_icache_inval_all();
428#else
417 asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n" 429 asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
418 : 430 :
419 : "r" (0)); 431 : "r" (0));
432#endif
420} 433}
421 434
422#define ARCH_HAS_FLUSH_ANON_PAGE 435#define ARCH_HAS_FLUSH_ANON_PAGE
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index c3b911ee9151..6aac3f5bb2f3 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -98,6 +98,9 @@ extern int elf_check_arch(const struct elf32_hdr *);
98extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int); 98extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
99#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk) 99#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk)
100 100
101int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
102#define ELF_CORE_COPY_TASK_REGS dump_task_regs
103
101#define USE_ELF_CORE_DUMP 104#define USE_ELF_CORE_DUMP
102#define ELF_EXEC_PAGESIZE 4096 105#define ELF_EXEC_PAGESIZE 4096
103 106
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index d16ec97ec9a9..c019949a5189 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -22,4 +22,10 @@ enum km_type {
22 KM_TYPE_NR 22 KM_TYPE_NR
23}; 23};
24 24
25#ifdef CONFIG_DEBUG_HIGHMEM
26#define KM_NMI (-1)
27#define KM_NMI_PTE (-1)
28#define KM_IRQ_PTE (-1)
29#endif
30
25#endif 31#endif
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index a45ab5dd8255..c2f1605de359 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -350,7 +350,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
350 if (tlb_flag(TLB_WB)) 350 if (tlb_flag(TLB_WB))
351 dsb(); 351 dsb();
352 352
353 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { 353 if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
354 if (tlb_flag(TLB_V3_FULL)) 354 if (tlb_flag(TLB_V3_FULL))
355 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); 355 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
356 if (tlb_flag(TLB_V4_U_FULL)) 356 if (tlb_flag(TLB_V4_U_FULL))
@@ -360,6 +360,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
360 if (tlb_flag(TLB_V4_I_FULL)) 360 if (tlb_flag(TLB_V4_I_FULL))
361 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); 361 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
362 } 362 }
363 put_cpu();
363 364
364 if (tlb_flag(TLB_V6_U_ASID)) 365 if (tlb_flag(TLB_V6_U_ASID))
365 asm("mcr p15, 0, %0, c8, c7, 2" : : "r" (asid) : "cc"); 366 asm("mcr p15, 0, %0, c8, c7, 2" : : "r" (asid) : "cc");
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 7020217fc49f..4e506d09e5f9 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -403,6 +403,15 @@
403#define __ARM_NR_set_tls (__ARM_NR_BASE+5) 403#define __ARM_NR_set_tls (__ARM_NR_BASE+5)
404 404
405/* 405/*
406 * *NOTE*: This is a ghost syscall private to the kernel. Only the
407 * __kuser_cmpxchg code in entry-armv.S should be aware of its
408 * existence. Don't ever use this from user code.
409 */
410#ifdef __KERNEL__
411#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0)
412#endif
413
414/*
406 * The following syscalls are obsolete and no longer available for EABI. 415 * The following syscalls are obsolete and no longer available for EABI.
407 */ 416 */
408#if defined(__ARM_EABI__) && !defined(__KERNEL__) 417#if defined(__ARM_EABI__) && !defined(__KERNEL__)