aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh/cpu-sh4
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-09-27 02:13:36 -0400
committerPaul Mundt <lethal@linux-sh.org>2006-09-27 02:13:36 -0400
commit26ff6c11ef38e08990c1e417c299246e6ab18ff7 (patch)
treeebd37fd0270b7c7dfe8474a046663db78fcdb1ab /include/asm-sh/cpu-sh4
parent9359e757709a211040e4b0151eae69248e7c6eca (diff)
sh: page table alloc cleanups and page fault optimizations.
Cleanup of page table allocators, using generic folded PMD and PUD helpers. TLB flushing operations are moved to a more sensible spot. The page fault handler is also optimized slightly, we no longer waste cycles on IRQ disabling for flushing of the page from the ITLB, since we're already under CLI protection by the initial exception handler. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh/cpu-sh4')
-rw-r--r--include/asm-sh/cpu-sh4/cacheflush.h29
1 files changed, 12 insertions, 17 deletions
diff --git a/include/asm-sh/cpu-sh4/cacheflush.h b/include/asm-sh/cpu-sh4/cacheflush.h
index ea58c4c5944d..a95fc951aff6 100644
--- a/include/asm-sh/cpu-sh4/cacheflush.h
+++ b/include/asm-sh/cpu-sh4/cacheflush.h
@@ -16,30 +16,26 @@
16 * caching; in which case they're only semi-broken), 16 * caching; in which case they're only semi-broken),
17 * so we need them. 17 * so we need them.
18 */ 18 */
19struct page; 19void flush_cache_all(void);
20struct mm_struct; 20void flush_cache_mm(struct mm_struct *mm);
21struct vm_area_struct; 21void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
22 22 unsigned long end);
23extern void flush_cache_all(void); 23void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
24extern void flush_cache_mm(struct mm_struct *mm); 24 unsigned long pfn);
25extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 25void flush_dcache_page(struct page *pg);
26 unsigned long end);
27extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
28extern void flush_dcache_page(struct page *pg);
29 26
30#define flush_dcache_mmap_lock(mapping) do { } while (0) 27#define flush_dcache_mmap_lock(mapping) do { } while (0)
31#define flush_dcache_mmap_unlock(mapping) do { } while (0) 28#define flush_dcache_mmap_unlock(mapping) do { } while (0)
32 29
33extern void flush_icache_range(unsigned long start, unsigned long end); 30void flush_icache_range(unsigned long start, unsigned long end);
34extern void flush_cache_sigtramp(unsigned long addr); 31void flush_cache_sigtramp(unsigned long addr);
35extern void flush_icache_user_range(struct vm_area_struct *vma, 32void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
36 struct page *page, unsigned long addr, 33 unsigned long addr, int len);
37 int len);
38 34
39#define flush_icache_page(vma,pg) do { } while (0) 35#define flush_icache_page(vma,pg) do { } while (0)
40 36
41/* Initialization of P3 area for copy_user_page */ 37/* Initialization of P3 area for copy_user_page */
42extern void p3_cache_init(void); 38void p3_cache_init(void);
43 39
44#define PG_mapped PG_arch_1 40#define PG_mapped PG_arch_1
45 41
@@ -57,4 +53,3 @@ static inline int remap_area_pages(unsigned long addr, unsigned long phys_addr,
57} 53}
58#endif /* CONFIG_MMU */ 54#endif /* CONFIG_MMU */
59#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */ 55#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */
60