diff options
author | Matt Fleming <matt@console-pimps.org> | 2010-01-18 05:33:10 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-01-18 05:33:10 -0500 |
commit | 3d467676abf5f01f5ee99056273a58486968e252 (patch) | |
tree | 4824c0f9a6dc7ade813a58f6ac4fa1e3780f035e /arch/sh/mm/pmb.c | |
parent | 7dcaa8e8e67b2cfbe0097c9bb52e23aed5443b8b (diff) |
sh: Setup early PMB mappings.
More and more boards are going to start shipping that boot with the MMU
in 32BIT mode by default. Previously we relied on the bootloader to
setup PMB mappings for use by the kernel but we also need to cater for
boards whose bootloaders don't set them up.
If CONFIG_PMB_LEGACY is not enabled we have full control over our PMB
mappings and can compress our address space. Usually, the distance
between the the cached and uncached mappings of RAM is always 512MB,
however we can compress the distance to be the amount of RAM on the
board.
pmb_init() now becomes much simpler. It no longer has to calculate any
mappings, it just has to synchronise the software PMB table with the
hardware.
Tested on SDK7786 and SH7785LCR.
Signed-off-by: Matt Fleming <matt@console-pimps.org>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r-- | arch/sh/mm/pmb.c | 156 |
1 files changed, 105 insertions, 51 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 8f7dbf183fb0..b796b6c021b4 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -3,11 +3,8 @@ | |||
3 | * | 3 | * |
4 | * Privileged Space Mapping Buffer (PMB) Support. | 4 | * Privileged Space Mapping Buffer (PMB) Support. |
5 | * | 5 | * |
6 | * Copyright (C) 2005 - 2010 Paul Mundt | 6 | * Copyright (C) 2005 - 2010 Paul Mundt |
7 | * | 7 | * Copyright (C) 2010 Matt Fleming |
8 | * P1/P2 Section mapping definitions from map32.h, which was: | ||
9 | * | ||
10 | * Copyright 2003 (c) Lineo Solutions,Inc. | ||
11 | * | 8 | * |
12 | * This file is subject to the terms and conditions of the GNU General Public | 9 | * This file is subject to the terms and conditions of the GNU General Public |
13 | * License. See the file "COPYING" in the main directory of this archive | 10 | * License. See the file "COPYING" in the main directory of this archive |
@@ -280,46 +277,82 @@ static void __pmb_unmap(struct pmb_entry *pmbe) | |||
280 | } | 277 | } |
281 | 278 | ||
282 | #ifdef CONFIG_PMB_LEGACY | 279 | #ifdef CONFIG_PMB_LEGACY |
280 | static inline unsigned int pmb_ppn_in_range(unsigned long ppn) | ||
281 | { | ||
282 | return ppn >= __MEMORY_START && ppn < __MEMORY_START + __MEMORY_SIZE; | ||
283 | } | ||
284 | |||
283 | static int pmb_apply_legacy_mappings(void) | 285 | static int pmb_apply_legacy_mappings(void) |
284 | { | 286 | { |
285 | int i; | ||
286 | unsigned long addr, data; | ||
287 | unsigned int applied = 0; | 287 | unsigned int applied = 0; |
288 | int i; | ||
288 | 289 | ||
289 | for (i = 0; i < PMB_ENTRY_MAX; i++) { | 290 | pr_info("PMB: Preserving legacy mappings:\n"); |
290 | struct pmb_entry *pmbe; | ||
291 | unsigned long vpn, ppn, flags; | ||
292 | |||
293 | addr = PMB_DATA + (i << PMB_E_SHIFT); | ||
294 | data = ctrl_inl(addr); | ||
295 | if (!(data & PMB_V)) | ||
296 | continue; | ||
297 | 291 | ||
298 | if (data & PMB_C) { | 292 | /* |
299 | #if defined(CONFIG_CACHE_WRITETHROUGH) | 293 | * The following entries are setup by the bootloader. |
300 | data |= PMB_WT; | 294 | * |
301 | #elif defined(CONFIG_CACHE_WRITEBACK) | 295 | * Entry VPN PPN V SZ C UB |
302 | data &= ~PMB_WT; | 296 | * -------------------------------------------------------- |
303 | #else | 297 | * 0 0xA0000000 0x00000000 1 64MB 0 0 |
304 | data &= ~(PMB_C | PMB_WT); | 298 | * 1 0xA4000000 0x04000000 1 16MB 0 0 |
305 | #endif | 299 | * 2 0xA6000000 0x08000000 1 16MB 0 0 |
306 | } | 300 | * 9 0x88000000 0x48000000 1 128MB 1 1 |
307 | ctrl_outl(data, addr); | 301 | * 10 0x90000000 0x50000000 1 128MB 1 1 |
308 | 302 | * 11 0x98000000 0x58000000 1 128MB 1 1 | |
309 | ppn = data & PMB_PFN_MASK; | 303 | * 13 0xA8000000 0x48000000 1 128MB 0 0 |
304 | * 14 0xB0000000 0x50000000 1 128MB 0 0 | ||
305 | * 15 0xB8000000 0x58000000 1 128MB 0 0 | ||
306 | * | ||
307 | * The only entries the we need are the ones that map the kernel | ||
308 | * at the cached and uncached addresses. | ||
309 | */ | ||
310 | for (i = 0; i < PMB_ENTRY_MAX; i++) { | ||
311 | unsigned long addr, data; | ||
312 | unsigned long addr_val, data_val; | ||
313 | unsigned long ppn, vpn; | ||
310 | 314 | ||
311 | flags = data & (PMB_C | PMB_WT | PMB_UB); | 315 | addr = mk_pmb_addr(i); |
312 | flags |= data & PMB_SZ_MASK; | 316 | data = mk_pmb_data(i); |
313 | 317 | ||
314 | addr = PMB_ADDR + (i << PMB_E_SHIFT); | 318 | addr_val = __raw_readl(addr); |
315 | data = ctrl_inl(addr); | 319 | data_val = __raw_readl(data); |
316 | 320 | ||
317 | vpn = data & PMB_PFN_MASK; | 321 | /* |
322 | * Skip over any bogus entries | ||
323 | */ | ||
324 | if (!(data_val & PMB_V) || !(addr_val & PMB_V)) | ||
325 | continue; | ||
318 | 326 | ||
319 | pmbe = pmb_alloc(vpn, ppn, flags, i); | 327 | ppn = data_val & PMB_PFN_MASK; |
320 | WARN_ON(IS_ERR(pmbe)); | 328 | vpn = addr_val & PMB_PFN_MASK; |
321 | 329 | ||
322 | applied++; | 330 | /* |
331 | * Only preserve in-range mappings. | ||
332 | */ | ||
333 | if (pmb_ppn_in_range(ppn)) { | ||
334 | unsigned int size; | ||
335 | char *sz_str = NULL; | ||
336 | |||
337 | size = data_val & PMB_SZ_MASK; | ||
338 | |||
339 | sz_str = (size == PMB_SZ_16M) ? " 16MB": | ||
340 | (size == PMB_SZ_64M) ? " 64MB": | ||
341 | (size == PMB_SZ_128M) ? "128MB": | ||
342 | "512MB"; | ||
343 | |||
344 | pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n", | ||
345 | vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str, | ||
346 | (data_val & PMB_C) ? "" : "un"); | ||
347 | |||
348 | applied++; | ||
349 | } else { | ||
350 | /* | ||
351 | * Invalidate anything out of bounds. | ||
352 | */ | ||
353 | __raw_writel(addr_val & ~PMB_V, addr); | ||
354 | __raw_writel(data_val & ~PMB_V, data); | ||
355 | } | ||
323 | } | 356 | } |
324 | 357 | ||
325 | return (applied == 0); | 358 | return (applied == 0); |
@@ -333,8 +366,9 @@ static inline int pmb_apply_legacy_mappings(void) | |||
333 | 366 | ||
334 | int __uses_jump_to_uncached pmb_init(void) | 367 | int __uses_jump_to_uncached pmb_init(void) |
335 | { | 368 | { |
336 | unsigned int i; | 369 | int i; |
337 | unsigned long size, ret; | 370 | unsigned long addr, data; |
371 | unsigned long ret; | ||
338 | 372 | ||
339 | jump_to_uncached(); | 373 | jump_to_uncached(); |
340 | 374 | ||
@@ -352,25 +386,45 @@ int __uses_jump_to_uncached pmb_init(void) | |||
352 | } | 386 | } |
353 | 387 | ||
354 | /* | 388 | /* |
355 | * Insert PMB entries for the P1 and P2 areas so that, after | 389 | * Sync our software copy of the PMB mappings with those in |
356 | * we've switched the MMU to 32-bit mode, the semantics of P1 | 390 | * hardware. The mappings in the hardware PMB were either set up |
357 | * and P2 are the same as in 29-bit mode, e.g. | 391 | * by the bootloader or very early on by the kernel. |
358 | * | ||
359 | * P1 - provides a cached window onto physical memory | ||
360 | * P2 - provides an uncached window onto physical memory | ||
361 | */ | 392 | */ |
362 | size = (unsigned long)__MEMORY_START + __MEMORY_SIZE; | 393 | for (i = 0; i < PMB_ENTRY_MAX; i++) { |
394 | struct pmb_entry *pmbe; | ||
395 | unsigned long vpn, ppn, flags; | ||
363 | 396 | ||
364 | ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C); | 397 | addr = PMB_DATA + (i << PMB_E_SHIFT); |
365 | BUG_ON(ret != size); | 398 | data = ctrl_inl(addr); |
399 | if (!(data & PMB_V)) | ||
400 | continue; | ||
366 | 401 | ||
367 | ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB); | 402 | if (data & PMB_C) { |
368 | BUG_ON(ret != size); | 403 | #if defined(CONFIG_CACHE_WRITETHROUGH) |
404 | data |= PMB_WT; | ||
405 | #elif defined(CONFIG_CACHE_WRITEBACK) | ||
406 | data &= ~PMB_WT; | ||
407 | #else | ||
408 | data &= ~(PMB_C | PMB_WT); | ||
409 | #endif | ||
410 | } | ||
411 | ctrl_outl(data, addr); | ||
369 | 412 | ||
370 | ctrl_outl(0, PMB_IRMCR); | 413 | ppn = data & PMB_PFN_MASK; |
414 | |||
415 | flags = data & (PMB_C | PMB_WT | PMB_UB); | ||
416 | flags |= data & PMB_SZ_MASK; | ||
371 | 417 | ||
372 | /* PMB.SE and UB[7] */ | 418 | addr = PMB_ADDR + (i << PMB_E_SHIFT); |
373 | ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR); | 419 | data = ctrl_inl(addr); |
420 | |||
421 | vpn = data & PMB_PFN_MASK; | ||
422 | |||
423 | pmbe = pmb_alloc(vpn, ppn, flags, i); | ||
424 | WARN_ON(IS_ERR(pmbe)); | ||
425 | } | ||
426 | |||
427 | ctrl_outl(0, PMB_IRMCR); | ||
374 | 428 | ||
375 | /* Flush out the TLB */ | 429 | /* Flush out the TLB */ |
376 | i = ctrl_inl(MMUCR); | 430 | i = ctrl_inl(MMUCR); |