aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm/init_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/mm/init_64.c')
-rw-r--r--arch/sparc/mm/init_64.c393
1 files changed, 168 insertions, 225 deletions
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 35fcc9cb960d..848440a33125 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -75,7 +75,6 @@ unsigned long kern_linear_pte_xor[4] __read_mostly;
75 * 'cpu' properties, but we need to have this table setup before the 75 * 'cpu' properties, but we need to have this table setup before the
76 * MDESC is initialized. 76 * MDESC is initialized.
77 */ 77 */
78unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
79 78
80#ifndef CONFIG_DEBUG_PAGEALLOC 79#ifndef CONFIG_DEBUG_PAGEALLOC
81/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings. 80/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
@@ -84,6 +83,7 @@ unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
84 */ 83 */
85extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; 84extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
86#endif 85#endif
86extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
87 87
88static unsigned long cpu_pgsz_mask; 88static unsigned long cpu_pgsz_mask;
89 89
@@ -165,10 +165,6 @@ static void __init read_obp_memory(const char *property,
165 cmp_p64, NULL); 165 cmp_p64, NULL);
166} 166}
167 167
168unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES /
169 sizeof(unsigned long)];
170EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
171
172/* Kernel physical address base and size in bytes. */ 168/* Kernel physical address base and size in bytes. */
173unsigned long kern_base __read_mostly; 169unsigned long kern_base __read_mostly;
174unsigned long kern_size __read_mostly; 170unsigned long kern_size __read_mostly;
@@ -1369,9 +1365,145 @@ static unsigned long __init bootmem_init(unsigned long phys_base)
1369static struct linux_prom64_registers pall[MAX_BANKS] __initdata; 1365static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1370static int pall_ents __initdata; 1366static int pall_ents __initdata;
1371 1367
1372#ifdef CONFIG_DEBUG_PAGEALLOC 1368static unsigned long max_phys_bits = 40;
1369
1370bool kern_addr_valid(unsigned long addr)
1371{
1372 unsigned long above = ((long)addr) >> max_phys_bits;
1373 pgd_t *pgd;
1374 pud_t *pud;
1375 pmd_t *pmd;
1376 pte_t *pte;
1377
1378 if (above != 0 && above != -1UL)
1379 return false;
1380
1381 if (addr >= (unsigned long) KERNBASE &&
1382 addr < (unsigned long)&_end)
1383 return true;
1384
1385 if (addr >= PAGE_OFFSET) {
1386 unsigned long pa = __pa(addr);
1387
1388 return pfn_valid(pa >> PAGE_SHIFT);
1389 }
1390
1391 pgd = pgd_offset_k(addr);
1392 if (pgd_none(*pgd))
1393 return 0;
1394
1395 pud = pud_offset(pgd, addr);
1396 if (pud_none(*pud))
1397 return 0;
1398
1399 if (pud_large(*pud))
1400 return pfn_valid(pud_pfn(*pud));
1401
1402 pmd = pmd_offset(pud, addr);
1403 if (pmd_none(*pmd))
1404 return 0;
1405
1406 if (pmd_large(*pmd))
1407 return pfn_valid(pmd_pfn(*pmd));
1408
1409 pte = pte_offset_kernel(pmd, addr);
1410 if (pte_none(*pte))
1411 return 0;
1412
1413 return pfn_valid(pte_pfn(*pte));
1414}
1415EXPORT_SYMBOL(kern_addr_valid);
1416
1417static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
1418 unsigned long vend,
1419 pud_t *pud)
1420{
1421 const unsigned long mask16gb = (1UL << 34) - 1UL;
1422 u64 pte_val = vstart;
1423
1424 /* Each PUD is 8GB */
1425 if ((vstart & mask16gb) ||
1426 (vend - vstart <= mask16gb)) {
1427 pte_val ^= kern_linear_pte_xor[2];
1428 pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
1429
1430 return vstart + PUD_SIZE;
1431 }
1432
1433 pte_val ^= kern_linear_pte_xor[3];
1434 pte_val |= _PAGE_PUD_HUGE;
1435
1436 vend = vstart + mask16gb + 1UL;
1437 while (vstart < vend) {
1438 pud_val(*pud) = pte_val;
1439
1440 pte_val += PUD_SIZE;
1441 vstart += PUD_SIZE;
1442 pud++;
1443 }
1444 return vstart;
1445}
1446
1447static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
1448 bool guard)
1449{
1450 if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
1451 return true;
1452
1453 return false;
1454}
1455
1456static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
1457 unsigned long vend,
1458 pmd_t *pmd)
1459{
1460 const unsigned long mask256mb = (1UL << 28) - 1UL;
1461 const unsigned long mask2gb = (1UL << 31) - 1UL;
1462 u64 pte_val = vstart;
1463
1464 /* Each PMD is 8MB */
1465 if ((vstart & mask256mb) ||
1466 (vend - vstart <= mask256mb)) {
1467 pte_val ^= kern_linear_pte_xor[0];
1468 pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
1469
1470 return vstart + PMD_SIZE;
1471 }
1472
1473 if ((vstart & mask2gb) ||
1474 (vend - vstart <= mask2gb)) {
1475 pte_val ^= kern_linear_pte_xor[1];
1476 pte_val |= _PAGE_PMD_HUGE;
1477 vend = vstart + mask256mb + 1UL;
1478 } else {
1479 pte_val ^= kern_linear_pte_xor[2];
1480 pte_val |= _PAGE_PMD_HUGE;
1481 vend = vstart + mask2gb + 1UL;
1482 }
1483
1484 while (vstart < vend) {
1485 pmd_val(*pmd) = pte_val;
1486
1487 pte_val += PMD_SIZE;
1488 vstart += PMD_SIZE;
1489 pmd++;
1490 }
1491
1492 return vstart;
1493}
1494
1495static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
1496 bool guard)
1497{
1498 if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
1499 return true;
1500
1501 return false;
1502}
1503
1373static unsigned long __ref kernel_map_range(unsigned long pstart, 1504static unsigned long __ref kernel_map_range(unsigned long pstart,
1374 unsigned long pend, pgprot_t prot) 1505 unsigned long pend, pgprot_t prot,
1506 bool use_huge)
1375{ 1507{
1376 unsigned long vstart = PAGE_OFFSET + pstart; 1508 unsigned long vstart = PAGE_OFFSET + pstart;
1377 unsigned long vend = PAGE_OFFSET + pend; 1509 unsigned long vend = PAGE_OFFSET + pend;
@@ -1401,15 +1533,23 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
1401 if (pud_none(*pud)) { 1533 if (pud_none(*pud)) {
1402 pmd_t *new; 1534 pmd_t *new;
1403 1535
1536 if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
1537 vstart = kernel_map_hugepud(vstart, vend, pud);
1538 continue;
1539 }
1404 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1540 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1405 alloc_bytes += PAGE_SIZE; 1541 alloc_bytes += PAGE_SIZE;
1406 pud_populate(&init_mm, pud, new); 1542 pud_populate(&init_mm, pud, new);
1407 } 1543 }
1408 1544
1409 pmd = pmd_offset(pud, vstart); 1545 pmd = pmd_offset(pud, vstart);
1410 if (!pmd_present(*pmd)) { 1546 if (pmd_none(*pmd)) {
1411 pte_t *new; 1547 pte_t *new;
1412 1548
1549 if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
1550 vstart = kernel_map_hugepmd(vstart, vend, pmd);
1551 continue;
1552 }
1413 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1553 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1414 alloc_bytes += PAGE_SIZE; 1554 alloc_bytes += PAGE_SIZE;
1415 pmd_populate_kernel(&init_mm, pmd, new); 1555 pmd_populate_kernel(&init_mm, pmd, new);
@@ -1432,100 +1572,34 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
1432 return alloc_bytes; 1572 return alloc_bytes;
1433} 1573}
1434 1574
1435extern unsigned int kvmap_linear_patch[1]; 1575static void __init flush_all_kernel_tsbs(void)
1436#endif /* CONFIG_DEBUG_PAGEALLOC */
1437
1438static void __init kpte_set_val(unsigned long index, unsigned long val)
1439{
1440 unsigned long *ptr = kpte_linear_bitmap;
1441
1442 val <<= ((index % (BITS_PER_LONG / 2)) * 2);
1443 ptr += (index / (BITS_PER_LONG / 2));
1444
1445 *ptr |= val;
1446}
1447
1448static const unsigned long kpte_shift_min = 28; /* 256MB */
1449static const unsigned long kpte_shift_max = 34; /* 16GB */
1450static const unsigned long kpte_shift_incr = 3;
1451
1452static unsigned long kpte_mark_using_shift(unsigned long start, unsigned long end,
1453 unsigned long shift)
1454{ 1576{
1455 unsigned long size = (1UL << shift); 1577 int i;
1456 unsigned long mask = (size - 1UL);
1457 unsigned long remains = end - start;
1458 unsigned long val;
1459
1460 if (remains < size || (start & mask))
1461 return start;
1462
1463 /* VAL maps:
1464 *
1465 * shift 28 --> kern_linear_pte_xor index 1
1466 * shift 31 --> kern_linear_pte_xor index 2
1467 * shift 34 --> kern_linear_pte_xor index 3
1468 */
1469 val = ((shift - kpte_shift_min) / kpte_shift_incr) + 1;
1470
1471 remains &= ~mask;
1472 if (shift != kpte_shift_max)
1473 remains = size;
1474
1475 while (remains) {
1476 unsigned long index = start >> kpte_shift_min;
1477 1578
1478 kpte_set_val(index, val); 1579 for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
1580 struct tsb *ent = &swapper_tsb[i];
1479 1581
1480 start += 1UL << kpte_shift_min; 1582 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1481 remains -= 1UL << kpte_shift_min;
1482 } 1583 }
1584#ifndef CONFIG_DEBUG_PAGEALLOC
1585 for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
1586 struct tsb *ent = &swapper_4m_tsb[i];
1483 1587
1484 return start; 1588 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1485}
1486
1487static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
1488{
1489 unsigned long smallest_size, smallest_mask;
1490 unsigned long s;
1491
1492 smallest_size = (1UL << kpte_shift_min);
1493 smallest_mask = (smallest_size - 1UL);
1494
1495 while (start < end) {
1496 unsigned long orig_start = start;
1497
1498 for (s = kpte_shift_max; s >= kpte_shift_min; s -= kpte_shift_incr) {
1499 start = kpte_mark_using_shift(start, end, s);
1500
1501 if (start != orig_start)
1502 break;
1503 }
1504
1505 if (start == orig_start)
1506 start = (start + smallest_size) & ~smallest_mask;
1507 } 1589 }
1590#endif
1508} 1591}
1509 1592
1510static void __init init_kpte_bitmap(void) 1593extern unsigned int kvmap_linear_patch[1];
1511{
1512 unsigned long i;
1513
1514 for (i = 0; i < pall_ents; i++) {
1515 unsigned long phys_start, phys_end;
1516
1517 phys_start = pall[i].phys_addr;
1518 phys_end = phys_start + pall[i].reg_size;
1519
1520 mark_kpte_bitmap(phys_start, phys_end);
1521 }
1522}
1523 1594
1524static void __init kernel_physical_mapping_init(void) 1595static void __init kernel_physical_mapping_init(void)
1525{ 1596{
1526#ifdef CONFIG_DEBUG_PAGEALLOC
1527 unsigned long i, mem_alloced = 0UL; 1597 unsigned long i, mem_alloced = 0UL;
1598 bool use_huge = true;
1528 1599
1600#ifdef CONFIG_DEBUG_PAGEALLOC
1601 use_huge = false;
1602#endif
1529 for (i = 0; i < pall_ents; i++) { 1603 for (i = 0; i < pall_ents; i++) {
1530 unsigned long phys_start, phys_end; 1604 unsigned long phys_start, phys_end;
1531 1605
@@ -1533,7 +1607,7 @@ static void __init kernel_physical_mapping_init(void)
1533 phys_end = phys_start + pall[i].reg_size; 1607 phys_end = phys_start + pall[i].reg_size;
1534 1608
1535 mem_alloced += kernel_map_range(phys_start, phys_end, 1609 mem_alloced += kernel_map_range(phys_start, phys_end,
1536 PAGE_KERNEL); 1610 PAGE_KERNEL, use_huge);
1537 } 1611 }
1538 1612
1539 printk("Allocated %ld bytes for kernel page tables.\n", 1613 printk("Allocated %ld bytes for kernel page tables.\n",
@@ -1542,8 +1616,9 @@ static void __init kernel_physical_mapping_init(void)
1542 kvmap_linear_patch[0] = 0x01000000; /* nop */ 1616 kvmap_linear_patch[0] = 0x01000000; /* nop */
1543 flushi(&kvmap_linear_patch[0]); 1617 flushi(&kvmap_linear_patch[0]);
1544 1618
1619 flush_all_kernel_tsbs();
1620
1545 __flush_tlb_all(); 1621 __flush_tlb_all();
1546#endif
1547} 1622}
1548 1623
1549#ifdef CONFIG_DEBUG_PAGEALLOC 1624#ifdef CONFIG_DEBUG_PAGEALLOC
@@ -1553,7 +1628,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
1553 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); 1628 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1554 1629
1555 kernel_map_range(phys_start, phys_end, 1630 kernel_map_range(phys_start, phys_end,
1556 (enable ? PAGE_KERNEL : __pgprot(0))); 1631 (enable ? PAGE_KERNEL : __pgprot(0)), false);
1557 1632
1558 flush_tsb_kernel_range(PAGE_OFFSET + phys_start, 1633 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1559 PAGE_OFFSET + phys_end); 1634 PAGE_OFFSET + phys_end);
@@ -1581,62 +1656,11 @@ unsigned long __init find_ecache_flush_span(unsigned long size)
1581unsigned long PAGE_OFFSET; 1656unsigned long PAGE_OFFSET;
1582EXPORT_SYMBOL(PAGE_OFFSET); 1657EXPORT_SYMBOL(PAGE_OFFSET);
1583 1658
1584static void __init page_offset_shift_patch_one(unsigned int *insn, unsigned long phys_bits)
1585{
1586 unsigned long final_shift;
1587 unsigned int val = *insn;
1588 unsigned int cnt;
1589
1590 /* We are patching in ilog2(max_supported_phys_address), and
1591 * we are doing so in a manner similar to a relocation addend.
1592 * That is, we are adding the shift value to whatever value
1593 * is in the shift instruction count field already.
1594 */
1595 cnt = (val & 0x3f);
1596 val &= ~0x3f;
1597
1598 /* If we are trying to shift >= 64 bits, clear the destination
1599 * register. This can happen when phys_bits ends up being equal
1600 * to MAX_PHYS_ADDRESS_BITS.
1601 */
1602 final_shift = (cnt + (64 - phys_bits));
1603 if (final_shift >= 64) {
1604 unsigned int rd = (val >> 25) & 0x1f;
1605
1606 val = 0x80100000 | (rd << 25);
1607 } else {
1608 val |= final_shift;
1609 }
1610 *insn = val;
1611
1612 __asm__ __volatile__("flush %0"
1613 : /* no outputs */
1614 : "r" (insn));
1615}
1616
1617static void __init page_offset_shift_patch(unsigned long phys_bits)
1618{
1619 extern unsigned int __page_offset_shift_patch;
1620 extern unsigned int __page_offset_shift_patch_end;
1621 unsigned int *p;
1622
1623 p = &__page_offset_shift_patch;
1624 while (p < &__page_offset_shift_patch_end) {
1625 unsigned int *insn = (unsigned int *)(unsigned long)*p;
1626
1627 page_offset_shift_patch_one(insn, phys_bits);
1628
1629 p++;
1630 }
1631}
1632
1633unsigned long sparc64_va_hole_top = 0xfffff80000000000UL; 1659unsigned long sparc64_va_hole_top = 0xfffff80000000000UL;
1634unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL; 1660unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
1635 1661
1636static void __init setup_page_offset(void) 1662static void __init setup_page_offset(void)
1637{ 1663{
1638 unsigned long max_phys_bits = 40;
1639
1640 if (tlb_type == cheetah || tlb_type == cheetah_plus) { 1664 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1641 /* Cheetah/Panther support a full 64-bit virtual 1665 /* Cheetah/Panther support a full 64-bit virtual
1642 * address, so we can use all that our page tables 1666 * address, so we can use all that our page tables
@@ -1685,8 +1709,6 @@ static void __init setup_page_offset(void)
1685 1709
1686 pr_info("PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n", 1710 pr_info("PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
1687 PAGE_OFFSET, max_phys_bits); 1711 PAGE_OFFSET, max_phys_bits);
1688
1689 page_offset_shift_patch(max_phys_bits);
1690} 1712}
1691 1713
1692static void __init tsb_phys_patch(void) 1714static void __init tsb_phys_patch(void)
@@ -1731,7 +1753,6 @@ static void __init tsb_phys_patch(void)
1731#define NUM_KTSB_DESCR 1 1753#define NUM_KTSB_DESCR 1
1732#endif 1754#endif
1733static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR]; 1755static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
1734extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
1735 1756
1736/* The swapper TSBs are loaded with a base sequence of: 1757/* The swapper TSBs are loaded with a base sequence of:
1737 * 1758 *
@@ -2077,11 +2098,9 @@ void __init paging_init(void)
2077 2098
2078 pmd = swapper_low_pmd_dir + (shift / sizeof(pmd_t)); 2099 pmd = swapper_low_pmd_dir + (shift / sizeof(pmd_t));
2079 pud_set(&swapper_pud_dir[0], pmd); 2100 pud_set(&swapper_pud_dir[0], pmd);
2080 2101
2081 inherit_prom_mappings(); 2102 inherit_prom_mappings();
2082 2103
2083 init_kpte_bitmap();
2084
2085 /* Ok, we can use our TLB miss and window trap handlers safely. */ 2104 /* Ok, we can use our TLB miss and window trap handlers safely. */
2086 setup_tba(); 2105 setup_tba();
2087 2106
@@ -2188,70 +2207,6 @@ int page_in_phys_avail(unsigned long paddr)
2188 return 0; 2207 return 0;
2189} 2208}
2190 2209
2191static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
2192static int pavail_rescan_ents __initdata;
2193
2194/* Certain OBP calls, such as fetching "available" properties, can
2195 * claim physical memory. So, along with initializing the valid
2196 * address bitmap, what we do here is refetch the physical available
2197 * memory list again, and make sure it provides at least as much
2198 * memory as 'pavail' does.
2199 */
2200static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
2201{
2202 int i;
2203
2204 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
2205
2206 for (i = 0; i < pavail_ents; i++) {
2207 unsigned long old_start, old_end;
2208
2209 old_start = pavail[i].phys_addr;
2210 old_end = old_start + pavail[i].reg_size;
2211 while (old_start < old_end) {
2212 int n;
2213
2214 for (n = 0; n < pavail_rescan_ents; n++) {
2215 unsigned long new_start, new_end;
2216
2217 new_start = pavail_rescan[n].phys_addr;
2218 new_end = new_start +
2219 pavail_rescan[n].reg_size;
2220
2221 if (new_start <= old_start &&
2222 new_end >= (old_start + PAGE_SIZE)) {
2223 set_bit(old_start >> ILOG2_4MB, bitmap);
2224 goto do_next_page;
2225 }
2226 }
2227
2228 prom_printf("mem_init: Lost memory in pavail\n");
2229 prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
2230 pavail[i].phys_addr,
2231 pavail[i].reg_size);
2232 prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
2233 pavail_rescan[i].phys_addr,
2234 pavail_rescan[i].reg_size);
2235 prom_printf("mem_init: Cannot continue, aborting.\n");
2236 prom_halt();
2237
2238 do_next_page:
2239 old_start += PAGE_SIZE;
2240 }
2241 }
2242}
2243
2244static void __init patch_tlb_miss_handler_bitmap(void)
2245{
2246 extern unsigned int valid_addr_bitmap_insn[];
2247 extern unsigned int valid_addr_bitmap_patch[];
2248
2249 valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1];
2250 mb();
2251 valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0];
2252 flushi(&valid_addr_bitmap_insn[0]);
2253}
2254
2255static void __init register_page_bootmem_info(void) 2210static void __init register_page_bootmem_info(void)
2256{ 2211{
2257#ifdef CONFIG_NEED_MULTIPLE_NODES 2212#ifdef CONFIG_NEED_MULTIPLE_NODES
@@ -2264,18 +2219,6 @@ static void __init register_page_bootmem_info(void)
2264} 2219}
2265void __init mem_init(void) 2220void __init mem_init(void)
2266{ 2221{
2267 unsigned long addr, last;
2268
2269 addr = PAGE_OFFSET + kern_base;
2270 last = PAGE_ALIGN(kern_size) + addr;
2271 while (addr < last) {
2272 set_bit(__pa(addr) >> ILOG2_4MB, sparc64_valid_addr_bitmap);
2273 addr += PAGE_SIZE;
2274 }
2275
2276 setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap);
2277 patch_tlb_miss_handler_bitmap();
2278
2279 high_memory = __va(last_valid_pfn << PAGE_SHIFT); 2222 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2280 2223
2281 register_page_bootmem_info(); 2224 register_page_bootmem_info();