diff options
Diffstat (limited to 'arch/score/mm')
-rw-r--r-- | arch/score/mm/cache.c | 125 |
1 files changed, 37 insertions, 88 deletions
diff --git a/arch/score/mm/cache.c b/arch/score/mm/cache.c index 1ebc67f18c6d..dbac9d9dfddd 100644 --- a/arch/score/mm/cache.c +++ b/arch/score/mm/cache.c | |||
@@ -32,34 +32,26 @@ | |||
32 | 32 | ||
33 | #include <asm/mmu_context.h> | 33 | #include <asm/mmu_context.h> |
34 | 34 | ||
35 | /* Cache operations. */ | 35 | /* |
36 | void (*flush_cache_all)(void); | 36 | Just flush entire Dcache!! |
37 | void (*__flush_cache_all)(void); | 37 | You must ensure the page doesn't include instructions, because |
38 | void (*flush_cache_mm)(struct mm_struct *mm); | 38 | the function will not flush the Icache. |
39 | void (*flush_cache_range)(struct vm_area_struct *vma, | 39 | The addr must be cache aligned. |
40 | unsigned long start, unsigned long end); | 40 | */ |
41 | void (*flush_cache_page)(struct vm_area_struct *vma, | 41 | static void flush_data_cache_page(unsigned long addr) |
42 | unsigned long page, unsigned long pfn); | 42 | { |
43 | void (*flush_icache_range)(unsigned long start, unsigned long end); | 43 | unsigned int i; |
44 | void (*__flush_cache_vmap)(void); | 44 | for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) { |
45 | void (*__flush_cache_vunmap)(void); | 45 | __asm__ __volatile__( |
46 | void (*flush_cache_sigtramp)(unsigned long addr); | 46 | "cache 0x0e, [%0, 0]\n" |
47 | void (*flush_data_cache_page)(unsigned long addr); | 47 | "cache 0x1a, [%0, 0]\n" |
48 | EXPORT_SYMBOL(flush_data_cache_page); | 48 | "nop\n" |
49 | void (*flush_icache_all)(void); | 49 | : : "r" (addr)); |
50 | 50 | addr += L1_CACHE_BYTES; | |
51 | /*Score 7 cache operations*/ | 51 | } |
52 | static inline void s7___flush_cache_all(void); | 52 | } |
53 | static void s7_flush_cache_mm(struct mm_struct *mm); | ||
54 | static void s7_flush_cache_range(struct vm_area_struct *vma, | ||
55 | unsigned long start, unsigned long end); | ||
56 | static void s7_flush_cache_page(struct vm_area_struct *vma, | ||
57 | unsigned long page, unsigned long pfn); | ||
58 | static void s7_flush_icache_range(unsigned long start, unsigned long end); | ||
59 | static void s7_flush_cache_sigtramp(unsigned long addr); | ||
60 | static void s7_flush_data_cache_page(unsigned long addr); | ||
61 | static void s7_flush_dcache_range(unsigned long start, unsigned long end); | ||
62 | 53 | ||
54 | /* called by update_mmu_cache. */ | ||
63 | void __update_cache(struct vm_area_struct *vma, unsigned long address, | 55 | void __update_cache(struct vm_area_struct *vma, unsigned long address, |
64 | pte_t pte) | 56 | pte_t pte) |
65 | { | 57 | { |
@@ -74,7 +66,7 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address, | |||
74 | if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) { | 66 | if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) { |
75 | addr = (unsigned long) page_address(page); | 67 | addr = (unsigned long) page_address(page); |
76 | if (exec) | 68 | if (exec) |
77 | s7_flush_data_cache_page(addr); | 69 | flush_data_cache_page(addr); |
78 | clear_bit(PG_arch_1, &page->flags); | 70 | clear_bit(PG_arch_1, &page->flags); |
79 | } | 71 | } |
80 | } | 72 | } |
@@ -101,44 +93,22 @@ static inline void setup_protection_map(void) | |||
101 | 93 | ||
102 | void __devinit cpu_cache_init(void) | 94 | void __devinit cpu_cache_init(void) |
103 | { | 95 | { |
104 | flush_cache_all = s7_flush_cache_all; | ||
105 | __flush_cache_all = s7___flush_cache_all; | ||
106 | flush_cache_mm = s7_flush_cache_mm; | ||
107 | flush_cache_range = s7_flush_cache_range; | ||
108 | flush_cache_page = s7_flush_cache_page; | ||
109 | flush_icache_range = s7_flush_icache_range; | ||
110 | flush_cache_sigtramp = s7_flush_cache_sigtramp; | ||
111 | flush_data_cache_page = s7_flush_data_cache_page; | ||
112 | |||
113 | setup_protection_map(); | 96 | setup_protection_map(); |
114 | } | 97 | } |
115 | 98 | ||
116 | void s7_flush_icache_all(void) | 99 | void flush_icache_all(void) |
117 | { | 100 | { |
118 | __asm__ __volatile__( | 101 | __asm__ __volatile__( |
119 | "la r8, s7_flush_icache_all\n" | 102 | "la r8, flush_icache_all\n" |
120 | "cache 0x10, [r8, 0]\n" | 103 | "cache 0x10, [r8, 0]\n" |
121 | "nop\nnop\nnop\nnop\nnop\nnop\n" | 104 | "nop\nnop\nnop\nnop\nnop\nnop\n" |
122 | : : : "r8"); | 105 | : : : "r8"); |
123 | } | 106 | } |
124 | 107 | ||
125 | void s7_flush_dcache_all(void) | 108 | void flush_dcache_all(void) |
126 | { | ||
127 | __asm__ __volatile__( | ||
128 | "la r8, s7_flush_dcache_all\n" | ||
129 | "cache 0x1f, [r8, 0]\n" | ||
130 | "nop\nnop\nnop\nnop\nnop\nnop\n" | ||
131 | "cache 0x1a, [r8, 0]\n" | ||
132 | "nop\nnop\nnop\nnop\nnop\nnop\n" | ||
133 | : : : "r8"); | ||
134 | } | ||
135 | |||
136 | void s7_flush_cache_all(void) | ||
137 | { | 109 | { |
138 | __asm__ __volatile__( | 110 | __asm__ __volatile__( |
139 | "la r8, s7_flush_cache_all\n" | 111 | "la r8, flush_dcache_all\n" |
140 | "cache 0x10, [r8, 0]\n" | ||
141 | "nop\nnop\nnop\nnop\nnop\nnop\n" | ||
142 | "cache 0x1f, [r8, 0]\n" | 112 | "cache 0x1f, [r8, 0]\n" |
143 | "nop\nnop\nnop\nnop\nnop\nnop\n" | 113 | "nop\nnop\nnop\nnop\nnop\nnop\n" |
144 | "cache 0x1a, [r8, 0]\n" | 114 | "cache 0x1a, [r8, 0]\n" |
@@ -146,10 +116,10 @@ void s7_flush_cache_all(void) | |||
146 | : : : "r8"); | 116 | : : : "r8"); |
147 | } | 117 | } |
148 | 118 | ||
149 | void s7___flush_cache_all(void) | 119 | void flush_cache_all(void) |
150 | { | 120 | { |
151 | __asm__ __volatile__( | 121 | __asm__ __volatile__( |
152 | "la r8, s7_flush_cache_all\n" | 122 | "la r8, flush_cache_all\n" |
153 | "cache 0x10, [r8, 0]\n" | 123 | "cache 0x10, [r8, 0]\n" |
154 | "nop\nnop\nnop\nnop\nnop\nnop\n" | 124 | "nop\nnop\nnop\nnop\nnop\nnop\n" |
155 | "cache 0x1f, [r8, 0]\n" | 125 | "cache 0x1f, [r8, 0]\n" |
@@ -159,11 +129,11 @@ void s7___flush_cache_all(void) | |||
159 | : : : "r8"); | 129 | : : : "r8"); |
160 | } | 130 | } |
161 | 131 | ||
162 | static void s7_flush_cache_mm(struct mm_struct *mm) | 132 | void flush_cache_mm(struct mm_struct *mm) |
163 | { | 133 | { |
164 | if (!(mm->context)) | 134 | if (!(mm->context)) |
165 | return; | 135 | return; |
166 | s7_flush_cache_all(); | 136 | flush_cache_all(); |
167 | } | 137 | } |
168 | 138 | ||
169 | /*if we flush a range precisely , the processing may be very long. | 139 | /*if we flush a range precisely , the processing may be very long. |
@@ -176,8 +146,7 @@ The interface is provided in hopes that the port can find | |||
176 | a suitably efficient method for removing multiple page | 146 | a suitably efficient method for removing multiple page |
177 | sized regions from the cache. | 147 | sized regions from the cache. |
178 | */ | 148 | */ |
179 | static void | 149 | void flush_cache_range(struct vm_area_struct *vma, |
180 | s7_flush_cache_range(struct vm_area_struct *vma, | ||
181 | unsigned long start, unsigned long end) | 150 | unsigned long start, unsigned long end) |
182 | { | 151 | { |
183 | struct mm_struct *mm = vma->vm_mm; | 152 | struct mm_struct *mm = vma->vm_mm; |
@@ -209,27 +178,26 @@ s7_flush_cache_range(struct vm_area_struct *vma, | |||
209 | tmpend = (start | (PAGE_SIZE-1)) > end ? | 178 | tmpend = (start | (PAGE_SIZE-1)) > end ? |
210 | end : (start | (PAGE_SIZE-1)); | 179 | end : (start | (PAGE_SIZE-1)); |
211 | 180 | ||
212 | s7_flush_dcache_range(start, tmpend); | 181 | flush_dcache_range(start, tmpend); |
213 | if (exec) | 182 | if (exec) |
214 | s7_flush_icache_range(start, tmpend); | 183 | flush_icache_range(start, tmpend); |
215 | start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); | 184 | start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); |
216 | } | 185 | } |
217 | } | 186 | } |
218 | 187 | ||
219 | static void | 188 | void flush_cache_page(struct vm_area_struct *vma, |
220 | s7_flush_cache_page(struct vm_area_struct *vma, | ||
221 | unsigned long addr, unsigned long pfn) | 189 | unsigned long addr, unsigned long pfn) |
222 | { | 190 | { |
223 | int exec = vma->vm_flags & VM_EXEC; | 191 | int exec = vma->vm_flags & VM_EXEC; |
224 | unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT); | 192 | unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT); |
225 | 193 | ||
226 | s7_flush_dcache_range(kaddr, kaddr + PAGE_SIZE); | 194 | flush_dcache_range(kaddr, kaddr + PAGE_SIZE); |
227 | 195 | ||
228 | if (exec) | 196 | if (exec) |
229 | s7_flush_icache_range(kaddr, kaddr + PAGE_SIZE); | 197 | flush_icache_range(kaddr, kaddr + PAGE_SIZE); |
230 | } | 198 | } |
231 | 199 | ||
232 | static void s7_flush_cache_sigtramp(unsigned long addr) | 200 | void flush_cache_sigtramp(unsigned long addr) |
233 | { | 201 | { |
234 | __asm__ __volatile__( | 202 | __asm__ __volatile__( |
235 | "cache 0x02, [%0, 0]\n" | 203 | "cache 0x02, [%0, 0]\n" |
@@ -248,30 +216,11 @@ static void s7_flush_cache_sigtramp(unsigned long addr) | |||
248 | } | 216 | } |
249 | 217 | ||
250 | /* | 218 | /* |
251 | Just flush entire Dcache!! | ||
252 | You must ensure the page doesn't include instructions, because | ||
253 | the function will not flush the Icache. | ||
254 | The addr must be cache aligned. | ||
255 | */ | ||
256 | static void s7_flush_data_cache_page(unsigned long addr) | ||
257 | { | ||
258 | unsigned int i; | ||
259 | for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) { | ||
260 | __asm__ __volatile__( | ||
261 | "cache 0x0e, [%0, 0]\n" | ||
262 | "cache 0x1a, [%0, 0]\n" | ||
263 | "nop\n" | ||
264 | : : "r" (addr)); | ||
265 | addr += L1_CACHE_BYTES; | ||
266 | } | ||
267 | } | ||
268 | |||
269 | /* | ||
270 | 1. WB and invalid a cache line of Dcache | 219 | 1. WB and invalid a cache line of Dcache |
271 | 2. Drain Write Buffer | 220 | 2. Drain Write Buffer |
272 | the range must be smaller than PAGE_SIZE | 221 | the range must be smaller than PAGE_SIZE |
273 | */ | 222 | */ |
274 | static void s7_flush_dcache_range(unsigned long start, unsigned long end) | 223 | void flush_dcache_range(unsigned long start, unsigned long end) |
275 | { | 224 | { |
276 | int size, i; | 225 | int size, i; |
277 | 226 | ||
@@ -290,7 +239,7 @@ static void s7_flush_dcache_range(unsigned long start, unsigned long end) | |||
290 | } | 239 | } |
291 | } | 240 | } |
292 | 241 | ||
293 | static void s7_flush_icache_range(unsigned long start, unsigned long end) | 242 | void flush_icache_range(unsigned long start, unsigned long end) |
294 | { | 243 | { |
295 | int size, i; | 244 | int size, i; |
296 | start = start & ~(L1_CACHE_BYTES - 1); | 245 | start = start & ~(L1_CACHE_BYTES - 1); |