diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-10-25 06:40:02 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-12-01 13:20:07 -0500 |
commit | 2f0b192633f1fbf253b21c90938733491549edae (patch) | |
tree | a429eaf88e591ea9674075ac6389e9a751da9806 /arch/arm | |
parent | 29e553631b2a0d4eebd23db630572e1027a9967a (diff) |
ARM: Avoid duplicated implementation for VIVT cache flushing
We had two copies of the wrapper code for VIVT cache flushing - one in
asm/cacheflush.h and one in arch/arm/mm/flush.c. Reduce this down to
one common copy.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/include/asm/cacheflush.h | 20 | ||||
-rw-r--r-- | arch/arm/mm/flush.c | 17 |
2 files changed, 19 insertions, 18 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 3d0cdd21b882..61ae25caaacb 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h | |||
@@ -331,15 +331,15 @@ static inline void outer_flush_range(unsigned long start, unsigned long end) | |||
331 | * Convert calls to our calling convention. | 331 | * Convert calls to our calling convention. |
332 | */ | 332 | */ |
333 | #define flush_cache_all() __cpuc_flush_kern_all() | 333 | #define flush_cache_all() __cpuc_flush_kern_all() |
334 | #ifndef CONFIG_CPU_CACHE_VIPT | 334 | |
335 | static inline void flush_cache_mm(struct mm_struct *mm) | 335 | static inline void vivt_flush_cache_mm(struct mm_struct *mm) |
336 | { | 336 | { |
337 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) | 337 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) |
338 | __cpuc_flush_user_all(); | 338 | __cpuc_flush_user_all(); |
339 | } | 339 | } |
340 | 340 | ||
341 | static inline void | 341 | static inline void |
342 | flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | 342 | vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
343 | { | 343 | { |
344 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) | 344 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) |
345 | __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), | 345 | __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), |
@@ -347,7 +347,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long | |||
347 | } | 347 | } |
348 | 348 | ||
349 | static inline void | 349 | static inline void |
350 | flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) | 350 | vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) |
351 | { | 351 | { |
352 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { | 352 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { |
353 | unsigned long addr = user_addr & PAGE_MASK; | 353 | unsigned long addr = user_addr & PAGE_MASK; |
@@ -356,7 +356,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned l | |||
356 | } | 356 | } |
357 | 357 | ||
358 | static inline void | 358 | static inline void |
359 | flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | 359 | vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page, |
360 | unsigned long uaddr, void *kaddr, | 360 | unsigned long uaddr, void *kaddr, |
361 | unsigned long len, int write) | 361 | unsigned long len, int write) |
362 | { | 362 | { |
@@ -365,6 +365,16 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
365 | __cpuc_coherent_kern_range(addr, addr + len); | 365 | __cpuc_coherent_kern_range(addr, addr + len); |
366 | } | 366 | } |
367 | } | 367 | } |
368 | |||
369 | #ifndef CONFIG_CPU_CACHE_VIPT | ||
370 | #define flush_cache_mm(mm) \ | ||
371 | vivt_flush_cache_mm(mm) | ||
372 | #define flush_cache_range(vma,start,end) \ | ||
373 | vivt_flush_cache_range(vma,start,end) | ||
374 | #define flush_cache_page(vma,addr,pfn) \ | ||
375 | vivt_flush_cache_page(vma,addr,pfn) | ||
376 | #define flush_ptrace_access(vma,page,ua,ka,len,write) \ | ||
377 | vivt_flush_ptrace_access(vma,page,ua,ka,len,write) | ||
368 | #else | 378 | #else |
369 | extern void flush_cache_mm(struct mm_struct *mm); | 379 | extern void flush_cache_mm(struct mm_struct *mm); |
370 | extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | 380 | extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 7f294f307c83..a480f161a4bb 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -41,8 +41,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) | |||
41 | void flush_cache_mm(struct mm_struct *mm) | 41 | void flush_cache_mm(struct mm_struct *mm) |
42 | { | 42 | { |
43 | if (cache_is_vivt()) { | 43 | if (cache_is_vivt()) { |
44 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) | 44 | vivt_flush_cache_mm(mm); |
45 | __cpuc_flush_user_all(); | ||
46 | return; | 45 | return; |
47 | } | 46 | } |
48 | 47 | ||
@@ -59,9 +58,7 @@ void flush_cache_mm(struct mm_struct *mm) | |||
59 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | 58 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
60 | { | 59 | { |
61 | if (cache_is_vivt()) { | 60 | if (cache_is_vivt()) { |
62 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) | 61 | vivt_flush_cache_range(vma, start, end); |
63 | __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), | ||
64 | vma->vm_flags); | ||
65 | return; | 62 | return; |
66 | } | 63 | } |
67 | 64 | ||
@@ -78,10 +75,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned | |||
78 | void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) | 75 | void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) |
79 | { | 76 | { |
80 | if (cache_is_vivt()) { | 77 | if (cache_is_vivt()) { |
81 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { | 78 | vivt_flush_cache_page(vma, user_addr, pfn); |
82 | unsigned long addr = user_addr & PAGE_MASK; | ||
83 | __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); | ||
84 | } | ||
85 | return; | 79 | return; |
86 | } | 80 | } |
87 | 81 | ||
@@ -94,10 +88,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
94 | unsigned long len, int write) | 88 | unsigned long len, int write) |
95 | { | 89 | { |
96 | if (cache_is_vivt()) { | 90 | if (cache_is_vivt()) { |
97 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { | 91 | vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write); |
98 | unsigned long addr = (unsigned long)kaddr; | ||
99 | __cpuc_coherent_kern_range(addr, addr + len); | ||
100 | } | ||
101 | return; | 92 | return; |
102 | } | 93 | } |
103 | 94 | ||