aboutsummaryrefslogtreecommitdiffstats
path: root/arch/score/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/score/mm')
-rw-r--r--arch/score/mm/cache.c125
1 files changed, 37 insertions, 88 deletions
diff --git a/arch/score/mm/cache.c b/arch/score/mm/cache.c
index 1ebc67f18c6d..dbac9d9dfddd 100644
--- a/arch/score/mm/cache.c
+++ b/arch/score/mm/cache.c
@@ -32,34 +32,26 @@
32 32
33#include <asm/mmu_context.h> 33#include <asm/mmu_context.h>
34 34
35/* Cache operations. */ 35/*
36void (*flush_cache_all)(void); 36Just flush entire Dcache!!
37void (*__flush_cache_all)(void); 37You must ensure the page doesn't include instructions, because
38void (*flush_cache_mm)(struct mm_struct *mm); 38the function will not flush the Icache.
39void (*flush_cache_range)(struct vm_area_struct *vma, 39The addr must be cache aligned.
40 unsigned long start, unsigned long end); 40*/
41void (*flush_cache_page)(struct vm_area_struct *vma, 41static void flush_data_cache_page(unsigned long addr)
42 unsigned long page, unsigned long pfn); 42{
43void (*flush_icache_range)(unsigned long start, unsigned long end); 43 unsigned int i;
44void (*__flush_cache_vmap)(void); 44 for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) {
45void (*__flush_cache_vunmap)(void); 45 __asm__ __volatile__(
46void (*flush_cache_sigtramp)(unsigned long addr); 46 "cache 0x0e, [%0, 0]\n"
47void (*flush_data_cache_page)(unsigned long addr); 47 "cache 0x1a, [%0, 0]\n"
48EXPORT_SYMBOL(flush_data_cache_page); 48 "nop\n"
49void (*flush_icache_all)(void); 49 : : "r" (addr));
50 50 addr += L1_CACHE_BYTES;
51/*Score 7 cache operations*/ 51 }
52static inline void s7___flush_cache_all(void); 52}
53static void s7_flush_cache_mm(struct mm_struct *mm);
54static void s7_flush_cache_range(struct vm_area_struct *vma,
55 unsigned long start, unsigned long end);
56static void s7_flush_cache_page(struct vm_area_struct *vma,
57 unsigned long page, unsigned long pfn);
58static void s7_flush_icache_range(unsigned long start, unsigned long end);
59static void s7_flush_cache_sigtramp(unsigned long addr);
60static void s7_flush_data_cache_page(unsigned long addr);
61static void s7_flush_dcache_range(unsigned long start, unsigned long end);
62 53
54/* called by update_mmu_cache. */
63void __update_cache(struct vm_area_struct *vma, unsigned long address, 55void __update_cache(struct vm_area_struct *vma, unsigned long address,
64 pte_t pte) 56 pte_t pte)
65{ 57{
@@ -74,7 +66,7 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address,
74 if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) { 66 if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) {
75 addr = (unsigned long) page_address(page); 67 addr = (unsigned long) page_address(page);
76 if (exec) 68 if (exec)
77 s7_flush_data_cache_page(addr); 69 flush_data_cache_page(addr);
78 clear_bit(PG_arch_1, &page->flags); 70 clear_bit(PG_arch_1, &page->flags);
79 } 71 }
80} 72}
@@ -101,44 +93,22 @@ static inline void setup_protection_map(void)
101 93
102void __devinit cpu_cache_init(void) 94void __devinit cpu_cache_init(void)
103{ 95{
104 flush_cache_all = s7_flush_cache_all;
105 __flush_cache_all = s7___flush_cache_all;
106 flush_cache_mm = s7_flush_cache_mm;
107 flush_cache_range = s7_flush_cache_range;
108 flush_cache_page = s7_flush_cache_page;
109 flush_icache_range = s7_flush_icache_range;
110 flush_cache_sigtramp = s7_flush_cache_sigtramp;
111 flush_data_cache_page = s7_flush_data_cache_page;
112
113 setup_protection_map(); 96 setup_protection_map();
114} 97}
115 98
116void s7_flush_icache_all(void) 99void flush_icache_all(void)
117{ 100{
118 __asm__ __volatile__( 101 __asm__ __volatile__(
119 "la r8, s7_flush_icache_all\n" 102 "la r8, flush_icache_all\n"
120 "cache 0x10, [r8, 0]\n" 103 "cache 0x10, [r8, 0]\n"
121 "nop\nnop\nnop\nnop\nnop\nnop\n" 104 "nop\nnop\nnop\nnop\nnop\nnop\n"
122 : : : "r8"); 105 : : : "r8");
123} 106}
124 107
125void s7_flush_dcache_all(void) 108void flush_dcache_all(void)
126{
127 __asm__ __volatile__(
128 "la r8, s7_flush_dcache_all\n"
129 "cache 0x1f, [r8, 0]\n"
130 "nop\nnop\nnop\nnop\nnop\nnop\n"
131 "cache 0x1a, [r8, 0]\n"
132 "nop\nnop\nnop\nnop\nnop\nnop\n"
133 : : : "r8");
134}
135
136void s7_flush_cache_all(void)
137{ 109{
138 __asm__ __volatile__( 110 __asm__ __volatile__(
139 "la r8, s7_flush_cache_all\n" 111 "la r8, flush_dcache_all\n"
140 "cache 0x10, [r8, 0]\n"
141 "nop\nnop\nnop\nnop\nnop\nnop\n"
142 "cache 0x1f, [r8, 0]\n" 112 "cache 0x1f, [r8, 0]\n"
143 "nop\nnop\nnop\nnop\nnop\nnop\n" 113 "nop\nnop\nnop\nnop\nnop\nnop\n"
144 "cache 0x1a, [r8, 0]\n" 114 "cache 0x1a, [r8, 0]\n"
@@ -146,10 +116,10 @@ void s7_flush_cache_all(void)
146 : : : "r8"); 116 : : : "r8");
147} 117}
148 118
149void s7___flush_cache_all(void) 119void flush_cache_all(void)
150{ 120{
151 __asm__ __volatile__( 121 __asm__ __volatile__(
152 "la r8, s7_flush_cache_all\n" 122 "la r8, flush_cache_all\n"
153 "cache 0x10, [r8, 0]\n" 123 "cache 0x10, [r8, 0]\n"
154 "nop\nnop\nnop\nnop\nnop\nnop\n" 124 "nop\nnop\nnop\nnop\nnop\nnop\n"
155 "cache 0x1f, [r8, 0]\n" 125 "cache 0x1f, [r8, 0]\n"
@@ -159,11 +129,11 @@ void s7___flush_cache_all(void)
159 : : : "r8"); 129 : : : "r8");
160} 130}
161 131
162static void s7_flush_cache_mm(struct mm_struct *mm) 132void flush_cache_mm(struct mm_struct *mm)
163{ 133{
164 if (!(mm->context)) 134 if (!(mm->context))
165 return; 135 return;
166 s7_flush_cache_all(); 136 flush_cache_all();
167} 137}
168 138
169/*if we flush a range precisely , the processing may be very long. 139/*if we flush a range precisely , the processing may be very long.
@@ -176,8 +146,7 @@ The interface is provided in hopes that the port can find
176a suitably efficient method for removing multiple page 146a suitably efficient method for removing multiple page
177sized regions from the cache. 147sized regions from the cache.
178*/ 148*/
179static void 149void flush_cache_range(struct vm_area_struct *vma,
180s7_flush_cache_range(struct vm_area_struct *vma,
181 unsigned long start, unsigned long end) 150 unsigned long start, unsigned long end)
182{ 151{
183 struct mm_struct *mm = vma->vm_mm; 152 struct mm_struct *mm = vma->vm_mm;
@@ -209,27 +178,26 @@ s7_flush_cache_range(struct vm_area_struct *vma,
209 tmpend = (start | (PAGE_SIZE-1)) > end ? 178 tmpend = (start | (PAGE_SIZE-1)) > end ?
210 end : (start | (PAGE_SIZE-1)); 179 end : (start | (PAGE_SIZE-1));
211 180
212 s7_flush_dcache_range(start, tmpend); 181 flush_dcache_range(start, tmpend);
213 if (exec) 182 if (exec)
214 s7_flush_icache_range(start, tmpend); 183 flush_icache_range(start, tmpend);
215 start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); 184 start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
216 } 185 }
217} 186}
218 187
219static void 188void flush_cache_page(struct vm_area_struct *vma,
220s7_flush_cache_page(struct vm_area_struct *vma,
221 unsigned long addr, unsigned long pfn) 189 unsigned long addr, unsigned long pfn)
222{ 190{
223 int exec = vma->vm_flags & VM_EXEC; 191 int exec = vma->vm_flags & VM_EXEC;
224 unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT); 192 unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT);
225 193
226 s7_flush_dcache_range(kaddr, kaddr + PAGE_SIZE); 194 flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
227 195
228 if (exec) 196 if (exec)
229 s7_flush_icache_range(kaddr, kaddr + PAGE_SIZE); 197 flush_icache_range(kaddr, kaddr + PAGE_SIZE);
230} 198}
231 199
232static void s7_flush_cache_sigtramp(unsigned long addr) 200void flush_cache_sigtramp(unsigned long addr)
233{ 201{
234 __asm__ __volatile__( 202 __asm__ __volatile__(
235 "cache 0x02, [%0, 0]\n" 203 "cache 0x02, [%0, 0]\n"
@@ -248,30 +216,11 @@ static void s7_flush_cache_sigtramp(unsigned long addr)
248} 216}
249 217
250/* 218/*
251Just flush entire Dcache!!
252You must ensure the page doesn't include instructions, because
253the function will not flush the Icache.
254The addr must be cache aligned.
255*/
256static void s7_flush_data_cache_page(unsigned long addr)
257{
258 unsigned int i;
259 for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) {
260 __asm__ __volatile__(
261 "cache 0x0e, [%0, 0]\n"
262 "cache 0x1a, [%0, 0]\n"
263 "nop\n"
264 : : "r" (addr));
265 addr += L1_CACHE_BYTES;
266 }
267}
268
269/*
2701. WB and invalid a cache line of Dcache 2191. WB and invalid a cache line of Dcache
2712. Drain Write Buffer 2202. Drain Write Buffer
272the range must be smaller than PAGE_SIZE 221the range must be smaller than PAGE_SIZE
273*/ 222*/
274static void s7_flush_dcache_range(unsigned long start, unsigned long end) 223void flush_dcache_range(unsigned long start, unsigned long end)
275{ 224{
276 int size, i; 225 int size, i;
277 226
@@ -290,7 +239,7 @@ static void s7_flush_dcache_range(unsigned long start, unsigned long end)
290 } 239 }
291} 240}
292 241
293static void s7_flush_icache_range(unsigned long start, unsigned long end) 242void flush_icache_range(unsigned long start, unsigned long end)
294{ 243{
295 int size, i; 244 int size, i;
296 start = start & ~(L1_CACHE_BYTES - 1); 245 start = start & ~(L1_CACHE_BYTES - 1);