aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mn10300/include/asm/cacheflush.h
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2009-04-10 09:19:03 -0400
committerDavid Howells <dhowells@redhat.com>2009-04-10 09:19:03 -0400
commitda7616610c8d2ec16a8ada44216e836e5fcbd08b (patch)
treeef3e8ccc7a01694c0ab0eeea387bc46a8807669e /arch/mn10300/include/asm/cacheflush.h
parent62b8e680e61d3f48f2a12ee248ca03ea8f376926 (diff)
Move arch headers from include/asm-mn10300/ to arch/mn10300/include/asm/.
Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'arch/mn10300/include/asm/cacheflush.h')
-rw-r--r--arch/mn10300/include/asm/cacheflush.h116
1 files changed, 116 insertions, 0 deletions
diff --git a/arch/mn10300/include/asm/cacheflush.h b/arch/mn10300/include/asm/cacheflush.h
new file mode 100644
index 000000000000..2db746a251f8
--- /dev/null
+++ b/arch/mn10300/include/asm/cacheflush.h
@@ -0,0 +1,116 @@
1/* MN10300 Cache flushing
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef _ASM_CACHEFLUSH_H
12#define _ASM_CACHEFLUSH_H
13
14#ifndef __ASSEMBLY__
15
16/* Keep includes the same across arches. */
17#include <linux/mm.h>
18
19/*
20 * virtually-indexed cache managment (our cache is physically indexed)
21 */
22#define flush_cache_all() do {} while (0)
23#define flush_cache_mm(mm) do {} while (0)
24#define flush_cache_dup_mm(mm) do {} while (0)
25#define flush_cache_range(mm, start, end) do {} while (0)
26#define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
27#define flush_cache_vmap(start, end) do {} while (0)
28#define flush_cache_vunmap(start, end) do {} while (0)
29#define flush_dcache_page(page) do {} while (0)
30#define flush_dcache_mmap_lock(mapping) do {} while (0)
31#define flush_dcache_mmap_unlock(mapping) do {} while (0)
32
33/*
34 * physically-indexed cache managment
35 */
36#ifndef CONFIG_MN10300_CACHE_DISABLED
37
38extern void flush_icache_range(unsigned long start, unsigned long end);
39extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
40
41#else
42
43#define flush_icache_range(start, end) do {} while (0)
44#define flush_icache_page(vma, pg) do {} while (0)
45
46#endif
47
48#define flush_icache_user_range(vma, pg, adr, len) \
49 flush_icache_range(adr, adr + len)
50
51#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
52 do { \
53 memcpy(dst, src, len); \
54 flush_icache_page(vma, page); \
55 } while (0)
56
57#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
58 memcpy(dst, src, len)
59
60/*
61 * primitive routines
62 */
63#ifndef CONFIG_MN10300_CACHE_DISABLED
64extern void mn10300_icache_inv(void);
65extern void mn10300_dcache_inv(void);
66extern void mn10300_dcache_inv_page(unsigned start);
67extern void mn10300_dcache_inv_range(unsigned start, unsigned end);
68extern void mn10300_dcache_inv_range2(unsigned start, unsigned size);
69#ifdef CONFIG_MN10300_CACHE_WBACK
70extern void mn10300_dcache_flush(void);
71extern void mn10300_dcache_flush_page(unsigned start);
72extern void mn10300_dcache_flush_range(unsigned start, unsigned end);
73extern void mn10300_dcache_flush_range2(unsigned start, unsigned size);
74extern void mn10300_dcache_flush_inv(void);
75extern void mn10300_dcache_flush_inv_page(unsigned start);
76extern void mn10300_dcache_flush_inv_range(unsigned start, unsigned end);
77extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
78#else
79#define mn10300_dcache_flush() do {} while (0)
80#define mn10300_dcache_flush_page(start) do {} while (0)
81#define mn10300_dcache_flush_range(start, end) do {} while (0)
82#define mn10300_dcache_flush_range2(start, size) do {} while (0)
83#define mn10300_dcache_flush_inv() mn10300_dcache_inv()
84#define mn10300_dcache_flush_inv_page(start) \
85 mn10300_dcache_inv_page((start))
86#define mn10300_dcache_flush_inv_range(start, end) \
87 mn10300_dcache_inv_range((start), (end))
88#define mn10300_dcache_flush_inv_range2(start, size) \
89 mn10300_dcache_inv_range2((start), (size))
90#endif /* CONFIG_MN10300_CACHE_WBACK */
91#else
92#define mn10300_icache_inv() do {} while (0)
93#define mn10300_dcache_inv() do {} while (0)
94#define mn10300_dcache_inv_page(start) do {} while (0)
95#define mn10300_dcache_inv_range(start, end) do {} while (0)
96#define mn10300_dcache_inv_range2(start, size) do {} while (0)
97#define mn10300_dcache_flush() do {} while (0)
98#define mn10300_dcache_flush_inv_page(start) do {} while (0)
99#define mn10300_dcache_flush_inv() do {} while (0)
100#define mn10300_dcache_flush_inv_range(start, end) do {} while (0)
101#define mn10300_dcache_flush_inv_range2(start, size) do {} while (0)
102#define mn10300_dcache_flush_page(start) do {} while (0)
103#define mn10300_dcache_flush_range(start, end) do {} while (0)
104#define mn10300_dcache_flush_range2(start, size) do {} while (0)
105#endif /* CONFIG_MN10300_CACHE_DISABLED */
106
107/*
108 * internal debugging function
109 */
110#ifdef CONFIG_DEBUG_PAGEALLOC
111extern void kernel_map_pages(struct page *page, int numpages, int enable);
112#endif
113
114#endif /* __ASSEMBLY__ */
115
116#endif /* _ASM_CACHEFLUSH_H */