diff options
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r-- | arch/sh/mm/pmb.c | 586 |
1 files changed, 440 insertions, 146 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 280f6a166035..198bcff5e96f 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -3,11 +3,8 @@ | |||
3 | * | 3 | * |
4 | * Privileged Space Mapping Buffer (PMB) Support. | 4 | * Privileged Space Mapping Buffer (PMB) Support. |
5 | * | 5 | * |
6 | * Copyright (C) 2005, 2006, 2007 Paul Mundt | 6 | * Copyright (C) 2005 - 2010 Paul Mundt |
7 | * | 7 | * Copyright (C) 2010 Matt Fleming |
8 | * P1/P2 Section mapping definitions from map32.h, which was: | ||
9 | * | ||
10 | * Copyright 2003 (c) Lineo Solutions,Inc. | ||
11 | * | 8 | * |
12 | * This file is subject to the terms and conditions of the GNU General Public | 9 | * This file is subject to the terms and conditions of the GNU General Public |
13 | * License. See the file "COPYING" in the main directory of this archive | 10 | * License. See the file "COPYING" in the main directory of this archive |
@@ -24,47 +21,67 @@ | |||
24 | #include <linux/fs.h> | 21 | #include <linux/fs.h> |
25 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
26 | #include <linux/err.h> | 23 | #include <linux/err.h> |
24 | #include <linux/io.h> | ||
25 | #include <linux/spinlock.h> | ||
26 | #include <linux/rwlock.h> | ||
27 | #include <asm/sizes.h> | ||
27 | #include <asm/system.h> | 28 | #include <asm/system.h> |
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
29 | #include <asm/pgtable.h> | 30 | #include <asm/pgtable.h> |
31 | #include <asm/page.h> | ||
30 | #include <asm/mmu.h> | 32 | #include <asm/mmu.h> |
31 | #include <asm/io.h> | ||
32 | #include <asm/mmu_context.h> | 33 | #include <asm/mmu_context.h> |
33 | 34 | ||
34 | #define NR_PMB_ENTRIES 16 | 35 | struct pmb_entry; |
36 | |||
37 | struct pmb_entry { | ||
38 | unsigned long vpn; | ||
39 | unsigned long ppn; | ||
40 | unsigned long flags; | ||
41 | unsigned long size; | ||
35 | 42 | ||
36 | static void __pmb_unmap(struct pmb_entry *); | 43 | spinlock_t lock; |
44 | |||
45 | /* | ||
46 | * 0 .. NR_PMB_ENTRIES for specific entry selection, or | ||
47 | * PMB_NO_ENTRY to search for a free one | ||
48 | */ | ||
49 | int entry; | ||
37 | 50 | ||
51 | /* Adjacent entry link for contiguous multi-entry mappings */ | ||
52 | struct pmb_entry *link; | ||
53 | }; | ||
54 | |||
55 | static void pmb_unmap_entry(struct pmb_entry *, int depth); | ||
56 | |||
57 | static DEFINE_RWLOCK(pmb_rwlock); | ||
38 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; | 58 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; |
39 | static unsigned long pmb_map; | 59 | static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES); |
40 | 60 | ||
41 | static inline unsigned long mk_pmb_entry(unsigned int entry) | 61 | static __always_inline unsigned long mk_pmb_entry(unsigned int entry) |
42 | { | 62 | { |
43 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; | 63 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; |
44 | } | 64 | } |
45 | 65 | ||
46 | static inline unsigned long mk_pmb_addr(unsigned int entry) | 66 | static __always_inline unsigned long mk_pmb_addr(unsigned int entry) |
47 | { | 67 | { |
48 | return mk_pmb_entry(entry) | PMB_ADDR; | 68 | return mk_pmb_entry(entry) | PMB_ADDR; |
49 | } | 69 | } |
50 | 70 | ||
51 | static inline unsigned long mk_pmb_data(unsigned int entry) | 71 | static __always_inline unsigned long mk_pmb_data(unsigned int entry) |
52 | { | 72 | { |
53 | return mk_pmb_entry(entry) | PMB_DATA; | 73 | return mk_pmb_entry(entry) | PMB_DATA; |
54 | } | 74 | } |
55 | 75 | ||
56 | static int pmb_alloc_entry(void) | 76 | static int pmb_alloc_entry(void) |
57 | { | 77 | { |
58 | unsigned int pos; | 78 | int pos; |
59 | |||
60 | repeat: | ||
61 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); | ||
62 | |||
63 | if (unlikely(pos > NR_PMB_ENTRIES)) | ||
64 | return -ENOSPC; | ||
65 | 79 | ||
66 | if (test_and_set_bit(pos, &pmb_map)) | 80 | pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES); |
67 | goto repeat; | 81 | if (pos >= 0 && pos < NR_PMB_ENTRIES) |
82 | __set_bit(pos, pmb_map); | ||
83 | else | ||
84 | pos = -ENOSPC; | ||
68 | 85 | ||
69 | return pos; | 86 | return pos; |
70 | } | 87 | } |
@@ -73,21 +90,34 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | |||
73 | unsigned long flags, int entry) | 90 | unsigned long flags, int entry) |
74 | { | 91 | { |
75 | struct pmb_entry *pmbe; | 92 | struct pmb_entry *pmbe; |
93 | unsigned long irqflags; | ||
94 | void *ret = NULL; | ||
76 | int pos; | 95 | int pos; |
77 | 96 | ||
97 | write_lock_irqsave(&pmb_rwlock, irqflags); | ||
98 | |||
78 | if (entry == PMB_NO_ENTRY) { | 99 | if (entry == PMB_NO_ENTRY) { |
79 | pos = pmb_alloc_entry(); | 100 | pos = pmb_alloc_entry(); |
80 | if (pos < 0) | 101 | if (unlikely(pos < 0)) { |
81 | return ERR_PTR(pos); | 102 | ret = ERR_PTR(pos); |
103 | goto out; | ||
104 | } | ||
82 | } else { | 105 | } else { |
83 | if (test_bit(entry, &pmb_map)) | 106 | if (__test_and_set_bit(entry, pmb_map)) { |
84 | return ERR_PTR(-ENOSPC); | 107 | ret = ERR_PTR(-ENOSPC); |
108 | goto out; | ||
109 | } | ||
110 | |||
85 | pos = entry; | 111 | pos = entry; |
86 | } | 112 | } |
87 | 113 | ||
114 | write_unlock_irqrestore(&pmb_rwlock, irqflags); | ||
115 | |||
88 | pmbe = &pmb_entry_list[pos]; | 116 | pmbe = &pmb_entry_list[pos]; |
89 | if (!pmbe) | 117 | |
90 | return ERR_PTR(-ENOMEM); | 118 | memset(pmbe, 0, sizeof(struct pmb_entry)); |
119 | |||
120 | spin_lock_init(&pmbe->lock); | ||
91 | 121 | ||
92 | pmbe->vpn = vpn; | 122 | pmbe->vpn = vpn; |
93 | pmbe->ppn = ppn; | 123 | pmbe->ppn = ppn; |
@@ -95,101 +125,113 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | |||
95 | pmbe->entry = pos; | 125 | pmbe->entry = pos; |
96 | 126 | ||
97 | return pmbe; | 127 | return pmbe; |
128 | |||
129 | out: | ||
130 | write_unlock_irqrestore(&pmb_rwlock, irqflags); | ||
131 | return ret; | ||
98 | } | 132 | } |
99 | 133 | ||
100 | static void pmb_free(struct pmb_entry *pmbe) | 134 | static void pmb_free(struct pmb_entry *pmbe) |
101 | { | 135 | { |
102 | int pos = pmbe->entry; | 136 | __clear_bit(pmbe->entry, pmb_map); |
103 | |||
104 | pmbe->vpn = 0; | ||
105 | pmbe->ppn = 0; | ||
106 | pmbe->flags = 0; | ||
107 | pmbe->entry = 0; | ||
108 | 137 | ||
109 | clear_bit(pos, &pmb_map); | 138 | pmbe->entry = PMB_NO_ENTRY; |
139 | pmbe->link = NULL; | ||
110 | } | 140 | } |
111 | 141 | ||
112 | /* | 142 | /* |
113 | * Must be in P2 for __set_pmb_entry() | 143 | * Ensure that the PMB entries match our cache configuration. |
144 | * | ||
145 | * When we are in 32-bit address extended mode, CCR.CB becomes | ||
146 | * invalid, so care must be taken to manually adjust cacheable | ||
147 | * translations. | ||
114 | */ | 148 | */ |
115 | static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, | 149 | static __always_inline unsigned long pmb_cache_flags(void) |
116 | unsigned long flags, int pos) | ||
117 | { | 150 | { |
118 | ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); | 151 | unsigned long flags = 0; |
119 | 152 | ||
120 | #ifdef CONFIG_CACHE_WRITETHROUGH | 153 | #if defined(CONFIG_CACHE_WRITETHROUGH) |
121 | /* | 154 | flags |= PMB_C | PMB_WT | PMB_UB; |
122 | * When we are in 32-bit address extended mode, CCR.CB becomes | 155 | #elif defined(CONFIG_CACHE_WRITEBACK) |
123 | * invalid, so care must be taken to manually adjust cacheable | 156 | flags |= PMB_C; |
124 | * translations. | ||
125 | */ | ||
126 | if (likely(flags & PMB_C)) | ||
127 | flags |= PMB_WT; | ||
128 | #endif | 157 | #endif |
129 | 158 | ||
130 | ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); | 159 | return flags; |
131 | } | 160 | } |
132 | 161 | ||
133 | static void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) | 162 | /* |
163 | * Must be run uncached. | ||
164 | */ | ||
165 | static void __set_pmb_entry(struct pmb_entry *pmbe) | ||
134 | { | 166 | { |
135 | jump_to_uncached(); | 167 | writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); |
136 | __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry); | 168 | writel_uncached(pmbe->ppn | pmbe->flags | PMB_V, |
137 | back_to_cached(); | 169 | mk_pmb_data(pmbe->entry)); |
138 | } | 170 | } |
139 | 171 | ||
140 | static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) | 172 | static void __clear_pmb_entry(struct pmb_entry *pmbe) |
141 | { | 173 | { |
142 | unsigned int entry = pmbe->entry; | 174 | unsigned long addr, data; |
143 | unsigned long addr; | 175 | unsigned long addr_val, data_val; |
144 | 176 | ||
145 | if (unlikely(entry >= NR_PMB_ENTRIES)) | 177 | addr = mk_pmb_addr(pmbe->entry); |
146 | return; | 178 | data = mk_pmb_data(pmbe->entry); |
147 | 179 | ||
148 | jump_to_uncached(); | 180 | addr_val = __raw_readl(addr); |
181 | data_val = __raw_readl(data); | ||
149 | 182 | ||
150 | /* Clear V-bit */ | 183 | /* Clear V-bit */ |
151 | addr = mk_pmb_addr(entry); | 184 | writel_uncached(addr_val & ~PMB_V, addr); |
152 | ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); | 185 | writel_uncached(data_val & ~PMB_V, data); |
186 | } | ||
153 | 187 | ||
154 | addr = mk_pmb_data(entry); | 188 | static void set_pmb_entry(struct pmb_entry *pmbe) |
155 | ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); | 189 | { |
190 | unsigned long flags; | ||
156 | 191 | ||
157 | back_to_cached(); | 192 | spin_lock_irqsave(&pmbe->lock, flags); |
193 | __set_pmb_entry(pmbe); | ||
194 | spin_unlock_irqrestore(&pmbe->lock, flags); | ||
158 | } | 195 | } |
159 | 196 | ||
160 | |||
161 | static struct { | 197 | static struct { |
162 | unsigned long size; | 198 | unsigned long size; |
163 | int flag; | 199 | int flag; |
164 | } pmb_sizes[] = { | 200 | } pmb_sizes[] = { |
165 | { .size = 0x20000000, .flag = PMB_SZ_512M, }, | 201 | { .size = SZ_512M, .flag = PMB_SZ_512M, }, |
166 | { .size = 0x08000000, .flag = PMB_SZ_128M, }, | 202 | { .size = SZ_128M, .flag = PMB_SZ_128M, }, |
167 | { .size = 0x04000000, .flag = PMB_SZ_64M, }, | 203 | { .size = SZ_64M, .flag = PMB_SZ_64M, }, |
168 | { .size = 0x01000000, .flag = PMB_SZ_16M, }, | 204 | { .size = SZ_16M, .flag = PMB_SZ_16M, }, |
169 | }; | 205 | }; |
170 | 206 | ||
171 | long pmb_remap(unsigned long vaddr, unsigned long phys, | 207 | long pmb_remap(unsigned long vaddr, unsigned long phys, |
172 | unsigned long size, unsigned long flags) | 208 | unsigned long size, pgprot_t prot) |
173 | { | 209 | { |
174 | struct pmb_entry *pmbp, *pmbe; | 210 | struct pmb_entry *pmbp, *pmbe; |
175 | unsigned long wanted; | 211 | unsigned long wanted; |
176 | int pmb_flags, i; | 212 | int pmb_flags, i; |
177 | long err; | 213 | long err; |
214 | u64 flags; | ||
215 | |||
216 | flags = pgprot_val(prot); | ||
217 | |||
218 | pmb_flags = PMB_WT | PMB_UB; | ||
178 | 219 | ||
179 | /* Convert typical pgprot value to the PMB equivalent */ | 220 | /* Convert typical pgprot value to the PMB equivalent */ |
180 | if (flags & _PAGE_CACHABLE) { | 221 | if (flags & _PAGE_CACHABLE) { |
181 | if (flags & _PAGE_WT) | 222 | pmb_flags |= PMB_C; |
182 | pmb_flags = PMB_WT; | 223 | |
183 | else | 224 | if ((flags & _PAGE_WT) == 0) |
184 | pmb_flags = PMB_C; | 225 | pmb_flags &= ~(PMB_WT | PMB_UB); |
185 | } else | 226 | } |
186 | pmb_flags = PMB_WT | PMB_UB; | ||
187 | 227 | ||
188 | pmbp = NULL; | 228 | pmbp = NULL; |
189 | wanted = size; | 229 | wanted = size; |
190 | 230 | ||
191 | again: | 231 | again: |
192 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { | 232 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { |
233 | unsigned long flags; | ||
234 | |||
193 | if (size < pmb_sizes[i].size) | 235 | if (size < pmb_sizes[i].size) |
194 | continue; | 236 | continue; |
195 | 237 | ||
@@ -200,18 +242,25 @@ again: | |||
200 | goto out; | 242 | goto out; |
201 | } | 243 | } |
202 | 244 | ||
203 | set_pmb_entry(pmbe); | 245 | spin_lock_irqsave(&pmbe->lock, flags); |
246 | |||
247 | __set_pmb_entry(pmbe); | ||
204 | 248 | ||
205 | phys += pmb_sizes[i].size; | 249 | phys += pmb_sizes[i].size; |
206 | vaddr += pmb_sizes[i].size; | 250 | vaddr += pmb_sizes[i].size; |
207 | size -= pmb_sizes[i].size; | 251 | size -= pmb_sizes[i].size; |
208 | 252 | ||
253 | pmbe->size = pmb_sizes[i].size; | ||
254 | |||
209 | /* | 255 | /* |
210 | * Link adjacent entries that span multiple PMB entries | 256 | * Link adjacent entries that span multiple PMB entries |
211 | * for easier tear-down. | 257 | * for easier tear-down. |
212 | */ | 258 | */ |
213 | if (likely(pmbp)) | 259 | if (likely(pmbp)) { |
260 | spin_lock(&pmbp->lock); | ||
214 | pmbp->link = pmbe; | 261 | pmbp->link = pmbe; |
262 | spin_unlock(&pmbp->lock); | ||
263 | } | ||
215 | 264 | ||
216 | pmbp = pmbe; | 265 | pmbp = pmbe; |
217 | 266 | ||
@@ -221,16 +270,17 @@ again: | |||
221 | * pmb_sizes[i].size again. | 270 | * pmb_sizes[i].size again. |
222 | */ | 271 | */ |
223 | i--; | 272 | i--; |
273 | |||
274 | spin_unlock_irqrestore(&pmbe->lock, flags); | ||
224 | } | 275 | } |
225 | 276 | ||
226 | if (size >= 0x1000000) | 277 | if (size >= SZ_16M) |
227 | goto again; | 278 | goto again; |
228 | 279 | ||
229 | return wanted - size; | 280 | return wanted - size; |
230 | 281 | ||
231 | out: | 282 | out: |
232 | if (pmbp) | 283 | pmb_unmap_entry(pmbp, NR_PMB_ENTRIES); |
233 | __pmb_unmap(pmbp); | ||
234 | 284 | ||
235 | return err; | 285 | return err; |
236 | } | 286 | } |
@@ -240,24 +290,52 @@ void pmb_unmap(unsigned long addr) | |||
240 | struct pmb_entry *pmbe = NULL; | 290 | struct pmb_entry *pmbe = NULL; |
241 | int i; | 291 | int i; |
242 | 292 | ||
293 | read_lock(&pmb_rwlock); | ||
294 | |||
243 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | 295 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
244 | if (test_bit(i, &pmb_map)) { | 296 | if (test_bit(i, pmb_map)) { |
245 | pmbe = &pmb_entry_list[i]; | 297 | pmbe = &pmb_entry_list[i]; |
246 | if (pmbe->vpn == addr) | 298 | if (pmbe->vpn == addr) |
247 | break; | 299 | break; |
248 | } | 300 | } |
249 | } | 301 | } |
250 | 302 | ||
251 | if (unlikely(!pmbe)) | 303 | read_unlock(&pmb_rwlock); |
252 | return; | ||
253 | 304 | ||
254 | __pmb_unmap(pmbe); | 305 | pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); |
255 | } | 306 | } |
256 | 307 | ||
257 | static void __pmb_unmap(struct pmb_entry *pmbe) | 308 | static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) |
258 | { | 309 | { |
259 | BUG_ON(!test_bit(pmbe->entry, &pmb_map)); | 310 | return (b->vpn == (a->vpn + a->size)) && |
311 | (b->ppn == (a->ppn + a->size)) && | ||
312 | (b->flags == a->flags); | ||
313 | } | ||
260 | 314 | ||
315 | static bool pmb_size_valid(unsigned long size) | ||
316 | { | ||
317 | int i; | ||
318 | |||
319 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | ||
320 | if (pmb_sizes[i].size == size) | ||
321 | return true; | ||
322 | |||
323 | return false; | ||
324 | } | ||
325 | |||
326 | static int pmb_size_to_flags(unsigned long size) | ||
327 | { | ||
328 | int i; | ||
329 | |||
330 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | ||
331 | if (pmb_sizes[i].size == size) | ||
332 | return pmb_sizes[i].flag; | ||
333 | |||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) | ||
338 | { | ||
261 | do { | 339 | do { |
262 | struct pmb_entry *pmblink = pmbe; | 340 | struct pmb_entry *pmblink = pmbe; |
263 | 341 | ||
@@ -268,102 +346,312 @@ static void __pmb_unmap(struct pmb_entry *pmbe) | |||
268 | * this entry in pmb_alloc() (even if we haven't filled | 346 | * this entry in pmb_alloc() (even if we haven't filled |
269 | * it yet). | 347 | * it yet). |
270 | * | 348 | * |
271 | * Therefore, calling clear_pmb_entry() is safe as no | 349 | * Therefore, calling __clear_pmb_entry() is safe as no |
272 | * other mapping can be using that slot. | 350 | * other mapping can be using that slot. |
273 | */ | 351 | */ |
274 | clear_pmb_entry(pmbe); | 352 | __clear_pmb_entry(pmbe); |
275 | 353 | ||
276 | pmbe = pmblink->link; | 354 | pmbe = pmblink->link; |
277 | 355 | ||
278 | pmb_free(pmblink); | 356 | pmb_free(pmblink); |
279 | } while (pmbe); | 357 | } while (pmbe && --depth); |
358 | } | ||
359 | |||
360 | static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth) | ||
361 | { | ||
362 | unsigned long flags; | ||
363 | |||
364 | if (unlikely(!pmbe)) | ||
365 | return; | ||
366 | |||
367 | write_lock_irqsave(&pmb_rwlock, flags); | ||
368 | __pmb_unmap_entry(pmbe, depth); | ||
369 | write_unlock_irqrestore(&pmb_rwlock, flags); | ||
370 | } | ||
371 | |||
372 | static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) | ||
373 | { | ||
374 | return ppn >= __pa(memory_start) && ppn < __pa(memory_end); | ||
280 | } | 375 | } |
281 | 376 | ||
282 | #ifdef CONFIG_PMB | 377 | static void __init pmb_notify(void) |
283 | int __uses_jump_to_uncached pmb_init(void) | ||
284 | { | 378 | { |
285 | unsigned int i; | 379 | int i; |
286 | long size, ret; | ||
287 | 380 | ||
288 | jump_to_uncached(); | 381 | pr_info("PMB: boot mappings:\n"); |
382 | |||
383 | read_lock(&pmb_rwlock); | ||
384 | |||
385 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | ||
386 | struct pmb_entry *pmbe; | ||
387 | |||
388 | if (!test_bit(i, pmb_map)) | ||
389 | continue; | ||
390 | |||
391 | pmbe = &pmb_entry_list[i]; | ||
392 | |||
393 | pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n", | ||
394 | pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT, | ||
395 | pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un"); | ||
396 | } | ||
397 | |||
398 | read_unlock(&pmb_rwlock); | ||
399 | } | ||
400 | |||
401 | /* | ||
402 | * Sync our software copy of the PMB mappings with those in hardware. The | ||
403 | * mappings in the hardware PMB were either set up by the bootloader or | ||
404 | * very early on by the kernel. | ||
405 | */ | ||
406 | static void __init pmb_synchronize(void) | ||
407 | { | ||
408 | struct pmb_entry *pmbp = NULL; | ||
409 | int i, j; | ||
289 | 410 | ||
290 | /* | 411 | /* |
291 | * Insert PMB entries for the P1 and P2 areas so that, after | 412 | * Run through the initial boot mappings, log the established |
292 | * we've switched the MMU to 32-bit mode, the semantics of P1 | 413 | * ones, and blow away anything that falls outside of the valid |
293 | * and P2 are the same as in 29-bit mode, e.g. | 414 | * PPN range. Specifically, we only care about existing mappings |
415 | * that impact the cached/uncached sections. | ||
294 | * | 416 | * |
295 | * P1 - provides a cached window onto physical memory | 417 | * Note that touching these can be a bit of a minefield; the boot |
296 | * P2 - provides an uncached window onto physical memory | 418 | * loader can establish multi-page mappings with the same caching |
419 | * attributes, so we need to ensure that we aren't modifying a | ||
420 | * mapping that we're presently executing from, or may execute | ||
421 | * from in the case of straddling page boundaries. | ||
422 | * | ||
423 | * In the future we will have to tidy up after the boot loader by | ||
424 | * jumping between the cached and uncached mappings and tearing | ||
425 | * down alternating mappings while executing from the other. | ||
297 | */ | 426 | */ |
298 | size = __MEMORY_START + __MEMORY_SIZE; | 427 | for (i = 0; i < NR_PMB_ENTRIES; i++) { |
428 | unsigned long addr, data; | ||
429 | unsigned long addr_val, data_val; | ||
430 | unsigned long ppn, vpn, flags; | ||
431 | unsigned long irqflags; | ||
432 | unsigned int size; | ||
433 | struct pmb_entry *pmbe; | ||
299 | 434 | ||
300 | ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C); | 435 | addr = mk_pmb_addr(i); |
301 | BUG_ON(ret != size); | 436 | data = mk_pmb_data(i); |
302 | 437 | ||
303 | ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB); | 438 | addr_val = __raw_readl(addr); |
304 | BUG_ON(ret != size); | 439 | data_val = __raw_readl(data); |
305 | 440 | ||
306 | ctrl_outl(0, PMB_IRMCR); | 441 | /* |
442 | * Skip over any bogus entries | ||
443 | */ | ||
444 | if (!(data_val & PMB_V) || !(addr_val & PMB_V)) | ||
445 | continue; | ||
307 | 446 | ||
308 | /* PMB.SE and UB[7] */ | 447 | ppn = data_val & PMB_PFN_MASK; |
309 | ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR); | 448 | vpn = addr_val & PMB_PFN_MASK; |
310 | 449 | ||
311 | /* Flush out the TLB */ | 450 | /* |
312 | i = ctrl_inl(MMUCR); | 451 | * Only preserve in-range mappings. |
313 | i |= MMUCR_TI; | 452 | */ |
314 | ctrl_outl(i, MMUCR); | 453 | if (!pmb_ppn_in_range(ppn)) { |
454 | /* | ||
455 | * Invalidate anything out of bounds. | ||
456 | */ | ||
457 | writel_uncached(addr_val & ~PMB_V, addr); | ||
458 | writel_uncached(data_val & ~PMB_V, data); | ||
459 | continue; | ||
460 | } | ||
315 | 461 | ||
316 | back_to_cached(); | 462 | /* |
463 | * Update the caching attributes if necessary | ||
464 | */ | ||
465 | if (data_val & PMB_C) { | ||
466 | data_val &= ~PMB_CACHE_MASK; | ||
467 | data_val |= pmb_cache_flags(); | ||
317 | 468 | ||
318 | return 0; | 469 | writel_uncached(data_val, data); |
470 | } | ||
471 | |||
472 | size = data_val & PMB_SZ_MASK; | ||
473 | flags = size | (data_val & PMB_CACHE_MASK); | ||
474 | |||
475 | pmbe = pmb_alloc(vpn, ppn, flags, i); | ||
476 | if (IS_ERR(pmbe)) { | ||
477 | WARN_ON_ONCE(1); | ||
478 | continue; | ||
479 | } | ||
480 | |||
481 | spin_lock_irqsave(&pmbe->lock, irqflags); | ||
482 | |||
483 | for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) | ||
484 | if (pmb_sizes[j].flag == size) | ||
485 | pmbe->size = pmb_sizes[j].size; | ||
486 | |||
487 | if (pmbp) { | ||
488 | spin_lock(&pmbp->lock); | ||
489 | |||
490 | /* | ||
491 | * Compare the previous entry against the current one to | ||
492 | * see if the entries span a contiguous mapping. If so, | ||
493 | * setup the entry links accordingly. Compound mappings | ||
494 | * are later coalesced. | ||
495 | */ | ||
496 | if (pmb_can_merge(pmbp, pmbe)) | ||
497 | pmbp->link = pmbe; | ||
498 | |||
499 | spin_unlock(&pmbp->lock); | ||
500 | } | ||
501 | |||
502 | pmbp = pmbe; | ||
503 | |||
504 | spin_unlock_irqrestore(&pmbe->lock, irqflags); | ||
505 | } | ||
506 | } | ||
507 | |||
508 | static void __init pmb_merge(struct pmb_entry *head) | ||
509 | { | ||
510 | unsigned long span, newsize; | ||
511 | struct pmb_entry *tail; | ||
512 | int i = 1, depth = 0; | ||
513 | |||
514 | span = newsize = head->size; | ||
515 | |||
516 | tail = head->link; | ||
517 | while (tail) { | ||
518 | span += tail->size; | ||
519 | |||
520 | if (pmb_size_valid(span)) { | ||
521 | newsize = span; | ||
522 | depth = i; | ||
523 | } | ||
524 | |||
525 | /* This is the end of the line.. */ | ||
526 | if (!tail->link) | ||
527 | break; | ||
528 | |||
529 | tail = tail->link; | ||
530 | i++; | ||
531 | } | ||
532 | |||
533 | /* | ||
534 | * The merged page size must be valid. | ||
535 | */ | ||
536 | if (!pmb_size_valid(newsize)) | ||
537 | return; | ||
538 | |||
539 | head->flags &= ~PMB_SZ_MASK; | ||
540 | head->flags |= pmb_size_to_flags(newsize); | ||
541 | |||
542 | head->size = newsize; | ||
543 | |||
544 | __pmb_unmap_entry(head->link, depth); | ||
545 | __set_pmb_entry(head); | ||
319 | } | 546 | } |
320 | #else | 547 | |
321 | int __uses_jump_to_uncached pmb_init(void) | 548 | static void __init pmb_coalesce(void) |
322 | { | 549 | { |
550 | unsigned long flags; | ||
323 | int i; | 551 | int i; |
324 | unsigned long addr, data; | ||
325 | 552 | ||
326 | jump_to_uncached(); | 553 | write_lock_irqsave(&pmb_rwlock, flags); |
327 | 554 | ||
328 | for (i = 0; i < PMB_ENTRY_MAX; i++) { | 555 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
329 | struct pmb_entry *pmbe; | 556 | struct pmb_entry *pmbe; |
330 | unsigned long vpn, ppn, flags; | ||
331 | 557 | ||
332 | addr = PMB_DATA + (i << PMB_E_SHIFT); | 558 | if (!test_bit(i, pmb_map)) |
333 | data = ctrl_inl(addr); | ||
334 | if (!(data & PMB_V)) | ||
335 | continue; | 559 | continue; |
336 | 560 | ||
337 | if (data & PMB_C) { | 561 | pmbe = &pmb_entry_list[i]; |
338 | #if defined(CONFIG_CACHE_WRITETHROUGH) | ||
339 | data |= PMB_WT; | ||
340 | #elif defined(CONFIG_CACHE_WRITEBACK) | ||
341 | data &= ~PMB_WT; | ||
342 | #else | ||
343 | data &= ~(PMB_C | PMB_WT); | ||
344 | #endif | ||
345 | } | ||
346 | ctrl_outl(data, addr); | ||
347 | 562 | ||
348 | ppn = data & PMB_PFN_MASK; | 563 | /* |
564 | * We're only interested in compound mappings | ||
565 | */ | ||
566 | if (!pmbe->link) | ||
567 | continue; | ||
349 | 568 | ||
350 | flags = data & (PMB_C | PMB_WT | PMB_UB); | 569 | /* |
351 | flags |= data & PMB_SZ_MASK; | 570 | * Nothing to do if it already uses the largest possible |
571 | * page size. | ||
572 | */ | ||
573 | if (pmbe->size == SZ_512M) | ||
574 | continue; | ||
352 | 575 | ||
353 | addr = PMB_ADDR + (i << PMB_E_SHIFT); | 576 | pmb_merge(pmbe); |
354 | data = ctrl_inl(addr); | 577 | } |
355 | 578 | ||
356 | vpn = data & PMB_PFN_MASK; | 579 | write_unlock_irqrestore(&pmb_rwlock, flags); |
580 | } | ||
357 | 581 | ||
358 | pmbe = pmb_alloc(vpn, ppn, flags, i); | 582 | #ifdef CONFIG_UNCACHED_MAPPING |
359 | WARN_ON(IS_ERR(pmbe)); | 583 | static void __init pmb_resize(void) |
584 | { | ||
585 | int i; | ||
586 | |||
587 | /* | ||
588 | * If the uncached mapping was constructed by the kernel, it will | ||
589 | * already be a reasonable size. | ||
590 | */ | ||
591 | if (uncached_size == SZ_16M) | ||
592 | return; | ||
593 | |||
594 | read_lock(&pmb_rwlock); | ||
595 | |||
596 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | ||
597 | struct pmb_entry *pmbe; | ||
598 | unsigned long flags; | ||
599 | |||
600 | if (!test_bit(i, pmb_map)) | ||
601 | continue; | ||
602 | |||
603 | pmbe = &pmb_entry_list[i]; | ||
604 | |||
605 | if (pmbe->vpn != uncached_start) | ||
606 | continue; | ||
607 | |||
608 | /* | ||
609 | * Found it, now resize it. | ||
610 | */ | ||
611 | spin_lock_irqsave(&pmbe->lock, flags); | ||
612 | |||
613 | pmbe->size = SZ_16M; | ||
614 | pmbe->flags &= ~PMB_SZ_MASK; | ||
615 | pmbe->flags |= pmb_size_to_flags(pmbe->size); | ||
616 | |||
617 | uncached_resize(pmbe->size); | ||
618 | |||
619 | __set_pmb_entry(pmbe); | ||
620 | |||
621 | spin_unlock_irqrestore(&pmbe->lock, flags); | ||
360 | } | 622 | } |
361 | 623 | ||
362 | back_to_cached(); | 624 | read_lock(&pmb_rwlock); |
625 | } | ||
626 | #endif | ||
627 | |||
628 | void __init pmb_init(void) | ||
629 | { | ||
630 | /* Synchronize software state */ | ||
631 | pmb_synchronize(); | ||
363 | 632 | ||
364 | return 0; | 633 | /* Attempt to combine compound mappings */ |
634 | pmb_coalesce(); | ||
635 | |||
636 | #ifdef CONFIG_UNCACHED_MAPPING | ||
637 | /* Resize initial mappings, if necessary */ | ||
638 | pmb_resize(); | ||
639 | #endif | ||
640 | |||
641 | /* Log them */ | ||
642 | pmb_notify(); | ||
643 | |||
644 | writel_uncached(0, PMB_IRMCR); | ||
645 | |||
646 | /* Flush out the TLB */ | ||
647 | __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); | ||
648 | ctrl_barrier(); | ||
649 | } | ||
650 | |||
651 | bool __in_29bit_mode(void) | ||
652 | { | ||
653 | return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0; | ||
365 | } | 654 | } |
366 | #endif /* CONFIG_PMB */ | ||
367 | 655 | ||
368 | static int pmb_seq_show(struct seq_file *file, void *iter) | 656 | static int pmb_seq_show(struct seq_file *file, void *iter) |
369 | { | 657 | { |
@@ -378,8 +666,8 @@ static int pmb_seq_show(struct seq_file *file, void *iter) | |||
378 | unsigned int size; | 666 | unsigned int size; |
379 | char *sz_str = NULL; | 667 | char *sz_str = NULL; |
380 | 668 | ||
381 | addr = ctrl_inl(mk_pmb_addr(i)); | 669 | addr = __raw_readl(mk_pmb_addr(i)); |
382 | data = ctrl_inl(mk_pmb_data(i)); | 670 | data = __raw_readl(mk_pmb_data(i)); |
383 | 671 | ||
384 | size = data & PMB_SZ_MASK; | 672 | size = data & PMB_SZ_MASK; |
385 | sz_str = (size == PMB_SZ_16M) ? " 16MB": | 673 | sz_str = (size == PMB_SZ_16M) ? " 16MB": |
@@ -437,14 +725,21 @@ static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) | |||
437 | if (state.event == PM_EVENT_ON && | 725 | if (state.event == PM_EVENT_ON && |
438 | prev_state.event == PM_EVENT_FREEZE) { | 726 | prev_state.event == PM_EVENT_FREEZE) { |
439 | struct pmb_entry *pmbe; | 727 | struct pmb_entry *pmbe; |
728 | |||
729 | read_lock(&pmb_rwlock); | ||
730 | |||
440 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | 731 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
441 | if (test_bit(i, &pmb_map)) { | 732 | if (test_bit(i, pmb_map)) { |
442 | pmbe = &pmb_entry_list[i]; | 733 | pmbe = &pmb_entry_list[i]; |
443 | set_pmb_entry(pmbe); | 734 | set_pmb_entry(pmbe); |
444 | } | 735 | } |
445 | } | 736 | } |
737 | |||
738 | read_unlock(&pmb_rwlock); | ||
446 | } | 739 | } |
740 | |||
447 | prev_state = state; | 741 | prev_state = state; |
742 | |||
448 | return 0; | 743 | return 0; |
449 | } | 744 | } |
450 | 745 | ||
@@ -462,6 +757,5 @@ static int __init pmb_sysdev_init(void) | |||
462 | { | 757 | { |
463 | return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver); | 758 | return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver); |
464 | } | 759 | } |
465 | |||
466 | subsys_initcall(pmb_sysdev_init); | 760 | subsys_initcall(pmb_sysdev_init); |
467 | #endif | 761 | #endif |