diff options
author | Paul Mundt <lethal@linux-sh.org> | 2006-09-27 02:13:36 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2006-09-27 02:13:36 -0400 |
commit | 26ff6c11ef38e08990c1e417c299246e6ab18ff7 (patch) | |
tree | ebd37fd0270b7c7dfe8474a046663db78fcdb1ab /include/asm-sh/cpu-sh4 | |
parent | 9359e757709a211040e4b0151eae69248e7c6eca (diff) |
sh: page table alloc cleanups and page fault optimizations.
Cleanup of page table allocators, using generic folded PMD and PUD
helpers. TLB flushing operations are moved to a more sensible spot.
The page fault handler is also optimized slightly, we no longer waste
cycles on IRQ disabling for flushing of the page from the ITLB, since
we're already under CLI protection by the initial exception handler.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh/cpu-sh4')
-rw-r--r-- | include/asm-sh/cpu-sh4/cacheflush.h | 29 |
1 files changed, 12 insertions, 17 deletions
diff --git a/include/asm-sh/cpu-sh4/cacheflush.h b/include/asm-sh/cpu-sh4/cacheflush.h index ea58c4c5944d..a95fc951aff6 100644 --- a/include/asm-sh/cpu-sh4/cacheflush.h +++ b/include/asm-sh/cpu-sh4/cacheflush.h | |||
@@ -16,30 +16,26 @@ | |||
16 | * caching; in which case they're only semi-broken), | 16 | * caching; in which case they're only semi-broken), |
17 | * so we need them. | 17 | * so we need them. |
18 | */ | 18 | */ |
19 | struct page; | 19 | void flush_cache_all(void); |
20 | struct mm_struct; | 20 | void flush_cache_mm(struct mm_struct *mm); |
21 | struct vm_area_struct; | 21 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, |
22 | 22 | unsigned long end); | |
23 | extern void flush_cache_all(void); | 23 | void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, |
24 | extern void flush_cache_mm(struct mm_struct *mm); | 24 | unsigned long pfn); |
25 | extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | 25 | void flush_dcache_page(struct page *pg); |
26 | unsigned long end); | ||
27 | extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); | ||
28 | extern void flush_dcache_page(struct page *pg); | ||
29 | 26 | ||
30 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 27 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
31 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 28 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
32 | 29 | ||
33 | extern void flush_icache_range(unsigned long start, unsigned long end); | 30 | void flush_icache_range(unsigned long start, unsigned long end); |
34 | extern void flush_cache_sigtramp(unsigned long addr); | 31 | void flush_cache_sigtramp(unsigned long addr); |
35 | extern void flush_icache_user_range(struct vm_area_struct *vma, | 32 | void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, |
36 | struct page *page, unsigned long addr, | 33 | unsigned long addr, int len); |
37 | int len); | ||
38 | 34 | ||
39 | #define flush_icache_page(vma,pg) do { } while (0) | 35 | #define flush_icache_page(vma,pg) do { } while (0) |
40 | 36 | ||
41 | /* Initialization of P3 area for copy_user_page */ | 37 | /* Initialization of P3 area for copy_user_page */ |
42 | extern void p3_cache_init(void); | 38 | void p3_cache_init(void); |
43 | 39 | ||
44 | #define PG_mapped PG_arch_1 | 40 | #define PG_mapped PG_arch_1 |
45 | 41 | ||
@@ -57,4 +53,3 @@ static inline int remap_area_pages(unsigned long addr, unsigned long phys_addr, | |||
57 | } | 53 | } |
58 | #endif /* CONFIG_MMU */ | 54 | #endif /* CONFIG_MMU */ |
59 | #endif /* __ASM_CPU_SH4_CACHEFLUSH_H */ | 55 | #endif /* __ASM_CPU_SH4_CACHEFLUSH_H */ |
60 | |||