aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh/cpu-sh3/cacheflush.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-09-27 11:49:07 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-27 11:49:07 -0400
commitb98adfccdf5f8dd34ae56a2d5adbe2c030bd4674 (patch)
tree1807a029520f550dd4f90c95ad0063bceb00d645 /include/asm-sh/cpu-sh3/cacheflush.h
parentba21fe71725f94792330ebc3034ef2b35a36276f (diff)
parent33573c0e3243aaa38b6ad96942de85a1b713c2ff (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6: (108 commits) sh: Fix occasional flush_cache_4096() stack corruption. sh: Calculate shm alignment at runtime. sh: dma-mapping compile fixes. sh: Initial vsyscall page support. sh: Clean up PAGE_SIZE definition for assembly use. sh: Selective flush_cache_mm() flushing. sh: More intelligent entry_mask/way_size calculation. sh: Support for L2 cache on newer SH-4A CPUs. sh: Update kexec support for API changes. sh: Optimized readsl()/writesl() support. sh: Report movli.l/movco.l capabilities. sh: CPU flags in AT_HWCAP in ELF auxvt. sh: Add support for 4K stacks. sh: Enable /proc/kcore support. sh: stack debugging support. sh: select CONFIG_EMBEDDED. sh: machvec rework. sh: Solution Engine SH7343 board support. sh: SH7710VoIPGW board support. sh: Enable verbose BUG() support. ...
Diffstat (limited to 'include/asm-sh/cpu-sh3/cacheflush.h')
-rw-r--r--include/asm-sh/cpu-sh3/cacheflush.h52
1 files changed, 16 insertions, 36 deletions
diff --git a/include/asm-sh/cpu-sh3/cacheflush.h b/include/asm-sh/cpu-sh3/cacheflush.h
index f51aed00c68f..03fde97a7fd0 100644
--- a/include/asm-sh/cpu-sh3/cacheflush.h
+++ b/include/asm-sh/cpu-sh3/cacheflush.h
@@ -10,7 +10,7 @@
10#ifndef __ASM_CPU_SH3_CACHEFLUSH_H 10#ifndef __ASM_CPU_SH3_CACHEFLUSH_H
11#define __ASM_CPU_SH3_CACHEFLUSH_H 11#define __ASM_CPU_SH3_CACHEFLUSH_H
12 12
13/* 13/*
14 * Cache flushing: 14 * Cache flushing:
15 * 15 *
16 * - flush_cache_all() flushes entire cache 16 * - flush_cache_all() flushes entire cache
@@ -35,53 +35,33 @@
35 /* 32KB cache, 4kb PAGE sizes need to check bit 12 */ 35 /* 32KB cache, 4kb PAGE sizes need to check bit 12 */
36#define CACHE_ALIAS 0x00001000 36#define CACHE_ALIAS 0x00001000
37 37
38struct page;
39struct mm_struct;
40struct vm_area_struct;
41
42extern void flush_cache_all(void);
43extern void flush_cache_mm(struct mm_struct *mm);
44extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
45 unsigned long end);
46extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
47extern void flush_dcache_page(struct page *pg);
48extern void flush_icache_range(unsigned long start, unsigned long end);
49extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
50
51#define flush_dcache_mmap_lock(mapping) do { } while (0)
52#define flush_dcache_mmap_unlock(mapping) do { } while (0)
53
54/* SH3 has unified cache so no special action needed here */
55#define flush_cache_sigtramp(vaddr) do { } while (0)
56#define flush_page_to_ram(page) do { } while (0)
57#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
58
59#define p3_cache_init() do { } while (0)
60
61#define PG_mapped PG_arch_1 38#define PG_mapped PG_arch_1
62 39
63/* We provide our own get_unmapped_area to avoid cache alias issue */ 40void flush_cache_all(void);
64#define HAVE_ARCH_UNMAPPED_AREA 41void flush_cache_mm(struct mm_struct *mm);
65 42void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
43 unsigned long end);
44void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
45void flush_dcache_page(struct page *pg);
46void flush_icache_range(unsigned long start, unsigned long end);
47void flush_icache_page(struct vm_area_struct *vma, struct page *page);
66#else 48#else
67
68#define flush_cache_all() do { } while (0) 49#define flush_cache_all() do { } while (0)
69#define flush_cache_mm(mm) do { } while (0) 50#define flush_cache_mm(mm) do { } while (0)
70#define flush_cache_range(vma, start, end) do { } while (0) 51#define flush_cache_range(vma, start, end) do { } while (0)
71#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 52#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
72#define flush_dcache_page(page) do { } while (0) 53#define flush_dcache_page(page) do { } while (0)
73#define flush_dcache_mmap_lock(mapping) do { } while (0)
74#define flush_dcache_mmap_unlock(mapping) do { } while (0)
75#define flush_icache_range(start, end) do { } while (0) 54#define flush_icache_range(start, end) do { } while (0)
76#define flush_icache_page(vma,pg) do { } while (0) 55#define flush_icache_page(vma,pg) do { } while (0)
77#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) 56#endif
78#define flush_cache_sigtramp(vaddr) do { } while (0)
79 57
80#define p3_cache_init() do { } while (0) 58#define flush_dcache_mmap_lock(mapping) do { } while (0)
59#define flush_dcache_mmap_unlock(mapping) do { } while (0)
81 60
82#define HAVE_ARCH_UNMAPPED_AREA 61/* SH3 has unified cache so no special action needed here */
62#define flush_cache_sigtramp(vaddr) do { } while (0)
63#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
83 64
84#endif 65#define p3_cache_init() do { } while (0)
85 66
86#endif /* __ASM_CPU_SH3_CACHEFLUSH_H */ 67#endif /* __ASM_CPU_SH3_CACHEFLUSH_H */
87