diff options
author | David Howells <dhowells@redhat.com> | 2010-10-27 12:28:43 -0400 |
---|---|---|
committer | David Howells <dhowells@redhat.com> | 2010-10-27 12:28:43 -0400 |
commit | 93c10d3d68c469c1addacbc541da5518f1de021d (patch) | |
tree | 0bf4b073fbd5a9362551b39757137cbab2f448a8 /arch/mn10300/include/asm | |
parent | 344af921e6f23ea82487d76918d2643fcc88c311 (diff) |
MN10300: Reorder asm/cacheflush.h to put primitives first
Reorder asm/cacheflush.h to put arch primitives first, before the main
functions so that the main functions can be inline asm rather than #defines
when non-trivial.
Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'arch/mn10300/include/asm')
-rw-r--r-- | arch/mn10300/include/asm/cacheflush.h | 88 |
1 files changed, 44 insertions, 44 deletions
diff --git a/arch/mn10300/include/asm/cacheflush.h b/arch/mn10300/include/asm/cacheflush.h index b85be1d2fd32..0b5d00438374 100644 --- a/arch/mn10300/include/asm/cacheflush.h +++ b/arch/mn10300/include/asm/cacheflush.h | |||
@@ -17,49 +17,7 @@ | |||
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | 18 | ||
19 | /* | 19 | /* |
20 | * virtually-indexed cache management (our cache is physically indexed) | 20 | * Primitive routines |
21 | */ | ||
22 | #define flush_cache_all() do {} while (0) | ||
23 | #define flush_cache_mm(mm) do {} while (0) | ||
24 | #define flush_cache_dup_mm(mm) do {} while (0) | ||
25 | #define flush_cache_range(mm, start, end) do {} while (0) | ||
26 | #define flush_cache_page(vma, vmaddr, pfn) do {} while (0) | ||
27 | #define flush_cache_vmap(start, end) do {} while (0) | ||
28 | #define flush_cache_vunmap(start, end) do {} while (0) | ||
29 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
30 | #define flush_dcache_page(page) do {} while (0) | ||
31 | #define flush_dcache_mmap_lock(mapping) do {} while (0) | ||
32 | #define flush_dcache_mmap_unlock(mapping) do {} while (0) | ||
33 | |||
34 | /* | ||
35 | * physically-indexed cache management | ||
36 | */ | ||
37 | #ifdef CONFIG_MN10300_CACHE_ENABLED | ||
38 | |||
39 | extern void flush_icache_range(unsigned long start, unsigned long end); | ||
40 | extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg); | ||
41 | |||
42 | #else | ||
43 | |||
44 | #define flush_icache_range(start, end) do {} while (0) | ||
45 | #define flush_icache_page(vma, pg) do {} while (0) | ||
46 | |||
47 | #endif | ||
48 | |||
49 | #define flush_icache_user_range(vma, pg, adr, len) \ | ||
50 | flush_icache_range(adr, adr + len) | ||
51 | |||
52 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | ||
53 | do { \ | ||
54 | memcpy(dst, src, len); \ | ||
55 | flush_icache_page(vma, page); \ | ||
56 | } while (0) | ||
57 | |||
58 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | ||
59 | memcpy(dst, src, len) | ||
60 | |||
61 | /* | ||
62 | * primitive routines | ||
63 | */ | 21 | */ |
64 | #ifdef CONFIG_MN10300_CACHE_ENABLED | 22 | #ifdef CONFIG_MN10300_CACHE_ENABLED |
65 | extern void mn10300_icache_inv(void); | 23 | extern void mn10300_icache_inv(void); |
@@ -106,7 +64,49 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size); | |||
106 | #endif /* CONFIG_MN10300_CACHE_ENABLED */ | 64 | #endif /* CONFIG_MN10300_CACHE_ENABLED */ |
107 | 65 | ||
108 | /* | 66 | /* |
109 | * internal debugging function | 67 | * Virtually-indexed cache management (our cache is physically indexed) |
68 | */ | ||
69 | #define flush_cache_all() do {} while (0) | ||
70 | #define flush_cache_mm(mm) do {} while (0) | ||
71 | #define flush_cache_dup_mm(mm) do {} while (0) | ||
72 | #define flush_cache_range(mm, start, end) do {} while (0) | ||
73 | #define flush_cache_page(vma, vmaddr, pfn) do {} while (0) | ||
74 | #define flush_cache_vmap(start, end) do {} while (0) | ||
75 | #define flush_cache_vunmap(start, end) do {} while (0) | ||
76 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
77 | #define flush_dcache_page(page) do {} while (0) | ||
78 | #define flush_dcache_mmap_lock(mapping) do {} while (0) | ||
79 | #define flush_dcache_mmap_unlock(mapping) do {} while (0) | ||
80 | |||
81 | /* | ||
82 | * Physically-indexed cache management | ||
83 | */ | ||
84 | #ifdef CONFIG_MN10300_CACHE_ENABLED | ||
85 | |||
86 | extern void flush_icache_range(unsigned long start, unsigned long end); | ||
87 | extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg); | ||
88 | |||
89 | #else | ||
90 | |||
91 | #define flush_icache_range(start, end) do {} while (0) | ||
92 | #define flush_icache_page(vma, pg) do {} while (0) | ||
93 | |||
94 | #endif | ||
95 | |||
96 | #define flush_icache_user_range(vma, pg, adr, len) \ | ||
97 | flush_icache_range(adr, adr + len) | ||
98 | |||
99 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | ||
100 | do { \ | ||
101 | memcpy(dst, src, len); \ | ||
102 | flush_icache_page(vma, page); \ | ||
103 | } while (0) | ||
104 | |||
105 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | ||
106 | memcpy(dst, src, len) | ||
107 | |||
108 | /* | ||
109 | * Internal debugging function | ||
110 | */ | 110 | */ |
111 | #ifdef CONFIG_DEBUG_PAGEALLOC | 111 | #ifdef CONFIG_DEBUG_PAGEALLOC |
112 | extern void kernel_map_pages(struct page *page, int numpages, int enable); | 112 | extern void kernel_map_pages(struct page *page, int numpages, int enable); |