diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-02-16 04:39:30 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-02-16 04:39:30 -0500 |
commit | efd54ea315f645ef318708aab5714a5f1f432d03 (patch) | |
tree | d1958ba3a18418e1b49298b90fdba33f37da4c27 /arch/sh | |
parent | 55cef91a5d553265f03fe159f9fcdfac36902248 (diff) |
sh: Merge the legacy PMB mapping and entry synchronization code.
This merges the code for iterating over the legacy PMB mappings and the
code for synchronizing software state with the hardware mappings. There's
really no reason to do the same iteration twice, and this also buys us
the legacy entry logging facility for the dynamic PMB case.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/include/asm/mmu.h | 1 | ||||
-rw-r--r-- | arch/sh/mm/pmb.c | 162 |
2 files changed, 70 insertions, 93 deletions
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index ca7d91e8aa72..2fcbedb55002 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h | |||
@@ -25,6 +25,7 @@ | |||
25 | #define PMB_C 0x00000008 | 25 | #define PMB_C 0x00000008 |
26 | #define PMB_WT 0x00000001 | 26 | #define PMB_WT 0x00000001 |
27 | #define PMB_UB 0x00000200 | 27 | #define PMB_UB 0x00000200 |
28 | #define PMB_CACHE_MASK (PMB_C | PMB_WT | PMB_UB) | ||
28 | #define PMB_V 0x00000100 | 29 | #define PMB_V 0x00000100 |
29 | 30 | ||
30 | #define PMB_NO_ENTRY (-1) | 31 | #define PMB_NO_ENTRY (-1) |
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index a06483076a41..f822f83418e4 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -276,41 +276,57 @@ static void __pmb_unmap(struct pmb_entry *pmbe) | |||
276 | } while (pmbe); | 276 | } while (pmbe); |
277 | } | 277 | } |
278 | 278 | ||
279 | #ifdef CONFIG_PMB_LEGACY | 279 | static inline void |
280 | pmb_log_mapping(unsigned long data_val, unsigned long vpn, unsigned long ppn) | ||
281 | { | ||
282 | unsigned int size; | ||
283 | const char *sz_str; | ||
284 | |||
285 | size = data_val & PMB_SZ_MASK; | ||
286 | |||
287 | sz_str = (size == PMB_SZ_16M) ? " 16MB": | ||
288 | (size == PMB_SZ_64M) ? " 64MB": | ||
289 | (size == PMB_SZ_128M) ? "128MB": | ||
290 | "512MB"; | ||
291 | |||
292 | pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n", | ||
293 | vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str, | ||
294 | (data_val & PMB_C) ? "" : "un"); | ||
295 | } | ||
296 | |||
280 | static inline unsigned int pmb_ppn_in_range(unsigned long ppn) | 297 | static inline unsigned int pmb_ppn_in_range(unsigned long ppn) |
281 | { | 298 | { |
282 | return ppn >= __MEMORY_START && ppn < __MEMORY_START + __MEMORY_SIZE; | 299 | return ppn >= __pa(memory_start) && ppn < __pa(memory_end); |
283 | } | 300 | } |
284 | 301 | ||
285 | static int pmb_apply_legacy_mappings(void) | 302 | static int pmb_synchronize_mappings(void) |
286 | { | 303 | { |
287 | unsigned int applied = 0; | 304 | unsigned int applied = 0; |
288 | int i; | 305 | int i; |
289 | 306 | ||
290 | pr_info("PMB: Preserving legacy mappings:\n"); | 307 | pr_info("PMB: boot mappings:\n"); |
291 | 308 | ||
292 | /* | 309 | /* |
293 | * The following entries are setup by the bootloader. | 310 | * Run through the initial boot mappings, log the established |
311 | * ones, and blow away anything that falls outside of the valid | ||
312 | * PPN range. Specifically, we only care about existing mappings | ||
313 | * that impact the cached/uncached sections. | ||
294 | * | 314 | * |
295 | * Entry VPN PPN V SZ C UB | 315 | * Note that touching these can be a bit of a minefield; the boot |
296 | * -------------------------------------------------------- | 316 | * loader can establish multi-page mappings with the same caching |
297 | * 0 0xA0000000 0x00000000 1 64MB 0 0 | 317 | * attributes, so we need to ensure that we aren't modifying a |
298 | * 1 0xA4000000 0x04000000 1 16MB 0 0 | 318 | * mapping that we're presently executing from, or may execute |
299 | * 2 0xA6000000 0x08000000 1 16MB 0 0 | 319 | * from in the case of straddling page boundaries. |
300 | * 9 0x88000000 0x48000000 1 128MB 1 1 | ||
301 | * 10 0x90000000 0x50000000 1 128MB 1 1 | ||
302 | * 11 0x98000000 0x58000000 1 128MB 1 1 | ||
303 | * 13 0xA8000000 0x48000000 1 128MB 0 0 | ||
304 | * 14 0xB0000000 0x50000000 1 128MB 0 0 | ||
305 | * 15 0xB8000000 0x58000000 1 128MB 0 0 | ||
306 | * | 320 | * |
307 | * The only entries the we need are the ones that map the kernel | 321 | * In the future we will have to tidy up after the boot loader by |
308 | * at the cached and uncached addresses. | 322 | * jumping between the cached and uncached mappings and tearing |
323 | * down alternating mappings while executing from the other. | ||
309 | */ | 324 | */ |
310 | for (i = 0; i < PMB_ENTRY_MAX; i++) { | 325 | for (i = 0; i < PMB_ENTRY_MAX; i++) { |
311 | unsigned long addr, data; | 326 | unsigned long addr, data; |
312 | unsigned long addr_val, data_val; | 327 | unsigned long addr_val, data_val; |
313 | unsigned long ppn, vpn; | 328 | unsigned long ppn, vpn, flags; |
329 | struct pmb_entry *pmbe; | ||
314 | 330 | ||
315 | addr = mk_pmb_addr(i); | 331 | addr = mk_pmb_addr(i); |
316 | data = mk_pmb_data(i); | 332 | data = mk_pmb_data(i); |
@@ -330,106 +346,66 @@ static int pmb_apply_legacy_mappings(void) | |||
330 | /* | 346 | /* |
331 | * Only preserve in-range mappings. | 347 | * Only preserve in-range mappings. |
332 | */ | 348 | */ |
333 | if (pmb_ppn_in_range(ppn)) { | 349 | if (!pmb_ppn_in_range(ppn)) { |
334 | unsigned int size; | ||
335 | char *sz_str = NULL; | ||
336 | |||
337 | size = data_val & PMB_SZ_MASK; | ||
338 | |||
339 | sz_str = (size == PMB_SZ_16M) ? " 16MB": | ||
340 | (size == PMB_SZ_64M) ? " 64MB": | ||
341 | (size == PMB_SZ_128M) ? "128MB": | ||
342 | "512MB"; | ||
343 | |||
344 | pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n", | ||
345 | vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str, | ||
346 | (data_val & PMB_C) ? "" : "un"); | ||
347 | |||
348 | applied++; | ||
349 | } else { | ||
350 | /* | 350 | /* |
351 | * Invalidate anything out of bounds. | 351 | * Invalidate anything out of bounds. |
352 | */ | 352 | */ |
353 | __raw_writel(addr_val & ~PMB_V, addr); | 353 | __raw_writel(addr_val & ~PMB_V, addr); |
354 | __raw_writel(data_val & ~PMB_V, data); | 354 | __raw_writel(data_val & ~PMB_V, data); |
355 | continue; | ||
355 | } | 356 | } |
357 | |||
358 | /* | ||
359 | * Update the caching attributes if necessary | ||
360 | */ | ||
361 | if (data_val & PMB_C) { | ||
362 | #if defined(CONFIG_CACHE_WRITETHROUGH) | ||
363 | data_val |= PMB_WT; | ||
364 | #elif defined(CONFIG_CACHE_WRITEBACK) | ||
365 | data_val &= ~PMB_WT; | ||
366 | #else | ||
367 | data_val &= ~(PMB_C | PMB_WT); | ||
368 | #endif | ||
369 | __raw_writel(data_val, data); | ||
370 | } | ||
371 | |||
372 | flags = data_val & (PMB_SZ_MASK | PMB_CACHE_MASK); | ||
373 | |||
374 | pmbe = pmb_alloc(vpn, ppn, flags, i); | ||
375 | if (IS_ERR(pmbe)) { | ||
376 | WARN_ON_ONCE(1); | ||
377 | continue; | ||
378 | } | ||
379 | |||
380 | pmb_log_mapping(data_val, vpn, ppn); | ||
381 | |||
382 | applied++; | ||
356 | } | 383 | } |
357 | 384 | ||
358 | return (applied == 0); | 385 | return (applied == 0); |
359 | } | 386 | } |
360 | #else | ||
361 | static inline int pmb_apply_legacy_mappings(void) | ||
362 | { | ||
363 | return 1; | ||
364 | } | ||
365 | #endif | ||
366 | 387 | ||
367 | int pmb_init(void) | 388 | int pmb_init(void) |
368 | { | 389 | { |
369 | int i; | 390 | int ret; |
370 | unsigned long addr, data; | ||
371 | unsigned long ret; | ||
372 | 391 | ||
373 | jump_to_uncached(); | 392 | jump_to_uncached(); |
374 | 393 | ||
375 | /* | 394 | /* |
376 | * Attempt to apply the legacy boot mappings if configured. If | ||
377 | * this is successful then we simply carry on with those and | ||
378 | * don't bother establishing additional memory mappings. Dynamic | ||
379 | * device mappings through pmb_remap() can still be bolted on | ||
380 | * after this. | ||
381 | */ | ||
382 | ret = pmb_apply_legacy_mappings(); | ||
383 | if (ret == 0) { | ||
384 | back_to_cached(); | ||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | /* | ||
389 | * Sync our software copy of the PMB mappings with those in | 395 | * Sync our software copy of the PMB mappings with those in |
390 | * hardware. The mappings in the hardware PMB were either set up | 396 | * hardware. The mappings in the hardware PMB were either set up |
391 | * by the bootloader or very early on by the kernel. | 397 | * by the bootloader or very early on by the kernel. |
392 | */ | 398 | */ |
393 | for (i = 0; i < PMB_ENTRY_MAX; i++) { | 399 | ret = pmb_synchronize_mappings(); |
394 | struct pmb_entry *pmbe; | 400 | if (unlikely(ret == 0)) { |
395 | unsigned long vpn, ppn, flags; | 401 | back_to_cached(); |
396 | 402 | return 0; | |
397 | addr = PMB_DATA + (i << PMB_E_SHIFT); | ||
398 | data = __raw_readl(addr); | ||
399 | if (!(data & PMB_V)) | ||
400 | continue; | ||
401 | |||
402 | if (data & PMB_C) { | ||
403 | #if defined(CONFIG_CACHE_WRITETHROUGH) | ||
404 | data |= PMB_WT; | ||
405 | #elif defined(CONFIG_CACHE_WRITEBACK) | ||
406 | data &= ~PMB_WT; | ||
407 | #else | ||
408 | data &= ~(PMB_C | PMB_WT); | ||
409 | #endif | ||
410 | } | ||
411 | __raw_writel(data, addr); | ||
412 | |||
413 | ppn = data & PMB_PFN_MASK; | ||
414 | |||
415 | flags = data & (PMB_C | PMB_WT | PMB_UB); | ||
416 | flags |= data & PMB_SZ_MASK; | ||
417 | |||
418 | addr = PMB_ADDR + (i << PMB_E_SHIFT); | ||
419 | data = __raw_readl(addr); | ||
420 | |||
421 | vpn = data & PMB_PFN_MASK; | ||
422 | |||
423 | pmbe = pmb_alloc(vpn, ppn, flags, i); | ||
424 | WARN_ON(IS_ERR(pmbe)); | ||
425 | } | 403 | } |
426 | 404 | ||
427 | __raw_writel(0, PMB_IRMCR); | 405 | __raw_writel(0, PMB_IRMCR); |
428 | 406 | ||
429 | /* Flush out the TLB */ | 407 | /* Flush out the TLB */ |
430 | i = __raw_readl(MMUCR); | 408 | __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); |
431 | i |= MMUCR_TI; | ||
432 | __raw_writel(i, MMUCR); | ||
433 | 409 | ||
434 | back_to_cached(); | 410 | back_to_cached(); |
435 | 411 | ||