aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2014-07-29 04:27:13 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-08-02 03:51:55 -0400
commit3bb70de692f70861f5c5729cd2b870d0104a7cc9 (patch)
treee805359f9252e9c51373e98c876e539f195d7b95
parentc716483c3db10b31bf9bf43c5f45f2c3117ca13a (diff)
ARM: add comments to the early page table remap code
Add further comments to the early page table remap code to explain what the code is doing, why it is doing it, but more importantly to explain that the code is not architecturally compliant and is squarely in "UNPREDICTABLE" behaviour territory. Add a warning and tainting of the kernel too. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/mm/mmu.c51
1 files changed, 46 insertions, 5 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index ab14b79b03f0..8348ed6b2efe 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1406,8 +1406,8 @@ void __init early_paging_init(const struct machine_desc *mdesc,
1406 return; 1406 return;
1407 1407
1408 /* remap kernel code and data */ 1408 /* remap kernel code and data */
1409 map_start = init_mm.start_code; 1409 map_start = init_mm.start_code & PMD_MASK;
1410 map_end = init_mm.brk; 1410 map_end = ALIGN(init_mm.brk, PMD_SIZE);
1411 1411
1412 /* get a handle on things... */ 1412 /* get a handle on things... */
1413 pgd0 = pgd_offset_k(0); 1413 pgd0 = pgd_offset_k(0);
@@ -1434,23 +1434,64 @@ void __init early_paging_init(const struct machine_desc *mdesc,
1434 dsb(ishst); 1434 dsb(ishst);
1435 isb(); 1435 isb();
1436 1436
1437 /* remap level 1 table */ 1437 /*
1438 * FIXME: This code is not architecturally compliant: we modify
1439 * the mappings in-place, indeed while they are in use by this
1440 * very same code. This may lead to unpredictable behaviour of
1441 * the CPU.
1442 *
1443 * Even modifying the mappings in a separate page table does
1444 * not resolve this.
1445 *
1446 * The architecture strongly recommends that when a mapping is
1447 * changed, that it is changed by first going via an invalid
1448 * mapping and back to the new mapping. This is to ensure that
1449 * no TLB conflicts (caused by the TLB having more than one TLB
1450 * entry match a translation) can occur. However, doing that
1451 * here will result in unmapping the code we are running.
1452 */
1453 pr_warn("WARNING: unsafe modification of in-place page tables - tainting kernel\n");
1454 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1455
1456 /*
1457 * Remap level 1 table. This changes the physical addresses
1458 * used to refer to the level 2 page tables to the high
1459 * physical address alias, leaving everything else the same.
1460 */
1438 for (i = 0; i < PTRS_PER_PGD; pud0++, i++) { 1461 for (i = 0; i < PTRS_PER_PGD; pud0++, i++) {
1439 set_pud(pud0, 1462 set_pud(pud0,
1440 __pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER)); 1463 __pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
1441 pmd0 += PTRS_PER_PMD; 1464 pmd0 += PTRS_PER_PMD;
1442 } 1465 }
1443 1466
1444 /* remap pmds for kernel mapping */ 1467 /*
1445 phys = __pa(map_start) & PMD_MASK; 1468 * Remap the level 2 table, pointing the mappings at the high
1469 * physical address alias of these pages.
1470 */
1471 phys = __pa(map_start);
1446 do { 1472 do {
1447 *pmdk++ = __pmd(phys | pmdprot); 1473 *pmdk++ = __pmd(phys | pmdprot);
1448 phys += PMD_SIZE; 1474 phys += PMD_SIZE;
1449 } while (phys < map_end); 1475 } while (phys < map_end);
1450 1476
1477 /*
1478 * Ensure that the above updates are flushed out of the cache.
1479 * This is not strictly correct; on a system where the caches
1480 * are coherent with each other, but the MMU page table walks
1481 * may not be coherent, flush_cache_all() may be a no-op, and
1482 * this will fail.
1483 */
1451 flush_cache_all(); 1484 flush_cache_all();
1485
1486 /*
1487 * Re-write the TTBR values to point them at the high physical
1488 * alias of the page tables. We expect __va() will work on
1489 * cpu_get_pgd(), which returns the value of TTBR0.
1490 */
1452 cpu_switch_mm(pgd0, &init_mm); 1491 cpu_switch_mm(pgd0, &init_mm);
1453 cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET); 1492 cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
1493
1494 /* Finally flush any stale TLB values. */
1454 local_flush_bp_all(); 1495 local_flush_bp_all();
1455 local_flush_tlb_all(); 1496 local_flush_tlb_all();
1456} 1497}