aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm/init.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2005-09-25 19:46:57 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-09-25 19:46:57 -0400
commit56425306517ef28a9b480161cdb96d182172bc1d (patch)
tree204cfbef0e5d86954f87b6b40d79d57f8157e5ea /arch/sparc64/mm/init.c
parent52f26deb7c67d5f34910660200b925c1a2b8df8c (diff)
[SPARC64]: Add CONFIG_DEBUG_PAGEALLOC support.
The trick is that we do the kernel linear mapping TLB miss starting with an instruction sequence like this: ba,pt %xcc, kvmap_load xor %g2, %g4, %g5 succeeded by an instruction sequence which performs a full page table walk starting at swapper_pg_dir. We first take over the trap table from the firmware. Then, using this constant PTE generation for the linear mapping area above, we build the kernel page tables for the linear mapping. After this is setup, we patch that branch above into a "nop", which will cause TLB misses to fall through to the full page table walk. With this, the page unmapping for CONFIG_DEBUG_PAGEALLOC is trivial. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/mm/init.c')
-rw-r--r--arch/sparc64/mm/init.c109
1 files changed, 106 insertions, 3 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index cf747372f0c9..8d72f8a1268e 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1332,15 +1332,114 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1332 return end_pfn; 1332 return end_pfn;
1333} 1333}
1334 1334
1335#ifdef CONFIG_DEBUG_PAGEALLOC
1336static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
1337{
1338 unsigned long vstart = PAGE_OFFSET + pstart;
1339 unsigned long vend = PAGE_OFFSET + pend;
1340 unsigned long alloc_bytes = 0UL;
1341
1342 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1343 prom_printf("kernel_map: Unaligned sp_banks[%lx:%lx]\n",
1344 vstart, vend);
1345 prom_halt();
1346 }
1347
1348 while (vstart < vend) {
1349 unsigned long this_end, paddr = __pa(vstart);
1350 pgd_t *pgd = pgd_offset_k(vstart);
1351 pud_t *pud;
1352 pmd_t *pmd;
1353 pte_t *pte;
1354
1355 pud = pud_offset(pgd, vstart);
1356 if (pud_none(*pud)) {
1357 pmd_t *new;
1358
1359 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1360 alloc_bytes += PAGE_SIZE;
1361 pud_populate(&init_mm, pud, new);
1362 }
1363
1364 pmd = pmd_offset(pud, vstart);
1365 if (!pmd_present(*pmd)) {
1366 pte_t *new;
1367
1368 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1369 alloc_bytes += PAGE_SIZE;
1370 pmd_populate_kernel(&init_mm, pmd, new);
1371 }
1372
1373 pte = pte_offset_kernel(pmd, vstart);
1374 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1375 if (this_end > vend)
1376 this_end = vend;
1377
1378 while (vstart < this_end) {
1379 pte_val(*pte) = (paddr | pgprot_val(prot));
1380
1381 vstart += PAGE_SIZE;
1382 paddr += PAGE_SIZE;
1383 pte++;
1384 }
1385 }
1386
1387 return alloc_bytes;
1388}
1389
1390extern struct linux_mlist_p1275 *prom_ptot_ptr;
1391extern unsigned int kvmap_linear_patch[1];
1392
1393static void __init kernel_physical_mapping_init(void)
1394{
1395 struct linux_mlist_p1275 *p = prom_ptot_ptr;
1396 unsigned long mem_alloced = 0UL;
1397
1398 while (p) {
1399 unsigned long phys_start, phys_end;
1400
1401 phys_start = p->start_adr;
1402 phys_end = phys_start + p->num_bytes;
1403 mem_alloced += kernel_map_range(phys_start, phys_end,
1404 PAGE_KERNEL);
1405
1406 p = p->theres_more;
1407 }
1408
1409 printk("Allocated %ld bytes for kernel page tables.\n",
1410 mem_alloced);
1411
1412 kvmap_linear_patch[0] = 0x01000000; /* nop */
1413 flushi(&kvmap_linear_patch[0]);
1414
1415 __flush_tlb_all();
1416}
1417
1418void kernel_map_pages(struct page *page, int numpages, int enable)
1419{
1420 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1421 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1422
1423 kernel_map_range(phys_start, phys_end,
1424 (enable ? PAGE_KERNEL : __pgprot(0)));
1425
1426 /* we should perform an IPI and flush all tlbs,
1427 * but that can deadlock->flush only current cpu.
1428 */
1429 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1430 PAGE_OFFSET + phys_end);
1431}
1432#endif
1433
1335/* paging_init() sets up the page tables */ 1434/* paging_init() sets up the page tables */
1336 1435
1337extern void cheetah_ecache_flush_init(void); 1436extern void cheetah_ecache_flush_init(void);
1338 1437
1339static unsigned long last_valid_pfn; 1438static unsigned long last_valid_pfn;
1439pgd_t swapper_pg_dir[2048];
1340 1440
1341void __init paging_init(void) 1441void __init paging_init(void)
1342{ 1442{
1343 extern pmd_t swapper_pmd_dir[1024];
1344 unsigned long end_pfn, pages_avail, shift; 1443 unsigned long end_pfn, pages_avail, shift;
1345 unsigned long real_end; 1444 unsigned long real_end;
1346 1445
@@ -1361,11 +1460,11 @@ void __init paging_init(void)
1361 */ 1460 */
1362 init_mm.pgd += ((shift) / (sizeof(pgd_t))); 1461 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1363 1462
1364 memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir)); 1463 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
1365 1464
1366 /* Now can init the kernel/bad page tables. */ 1465 /* Now can init the kernel/bad page tables. */
1367 pud_set(pud_offset(&swapper_pg_dir[0], 0), 1466 pud_set(pud_offset(&swapper_pg_dir[0], 0),
1368 swapper_pmd_dir + (shift / sizeof(pgd_t))); 1467 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1369 1468
1370 swapper_pgd_zero = pgd_val(swapper_pg_dir[0]); 1469 swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
1371 1470
@@ -1390,6 +1489,10 @@ void __init paging_init(void)
1390 pages_avail = 0; 1489 pages_avail = 0;
1391 last_valid_pfn = end_pfn = bootmem_init(&pages_avail); 1490 last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1392 1491
1492#ifdef CONFIG_DEBUG_PAGEALLOC
1493 kernel_physical_mapping_init();
1494#endif
1495
1393 { 1496 {
1394 unsigned long zones_size[MAX_NR_ZONES]; 1497 unsigned long zones_size[MAX_NR_ZONES];
1395 unsigned long zholes_size[MAX_NR_ZONES]; 1498 unsigned long zholes_size[MAX_NR_ZONES];