aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/mm/init.c')
-rw-r--r--arch/sparc64/mm/init.c259
1 files changed, 130 insertions, 129 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 9f6ca624892d..5db50524f20d 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -21,6 +21,7 @@
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/kprobes.h> 22#include <linux/kprobes.h>
23#include <linux/cache.h> 23#include <linux/cache.h>
24#include <linux/sort.h>
24 25
25#include <asm/head.h> 26#include <asm/head.h>
26#include <asm/system.h> 27#include <asm/system.h>
@@ -41,7 +42,72 @@
41 42
42extern void device_scan(void); 43extern void device_scan(void);
43 44
44struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; 45#define MAX_BANKS 32
46
47static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
48static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
49static int pavail_ents __initdata;
50static int pavail_rescan_ents __initdata;
51
52static int cmp_p64(const void *a, const void *b)
53{
54 const struct linux_prom64_registers *x = a, *y = b;
55
56 if (x->phys_addr > y->phys_addr)
57 return 1;
58 if (x->phys_addr < y->phys_addr)
59 return -1;
60 return 0;
61}
62
63static void __init read_obp_memory(const char *property,
64 struct linux_prom64_registers *regs,
65 int *num_ents)
66{
67 int node = prom_finddevice("/memory");
68 int prop_size = prom_getproplen(node, property);
69 int ents, ret, i;
70
71 ents = prop_size / sizeof(struct linux_prom64_registers);
72 if (ents > MAX_BANKS) {
73 prom_printf("The machine has more %s property entries than "
74 "this kernel can support (%d).\n",
75 property, MAX_BANKS);
76 prom_halt();
77 }
78
79 ret = prom_getproperty(node, property, (char *) regs, prop_size);
80 if (ret == -1) {
81 prom_printf("Couldn't get %s property from /memory.\n");
82 prom_halt();
83 }
84
85 *num_ents = ents;
86
87 /* Sanitize what we got from the firmware, by page aligning
88 * everything.
89 */
90 for (i = 0; i < ents; i++) {
91 unsigned long base, size;
92
93 base = regs[i].phys_addr;
94 size = regs[i].reg_size;
95
96 size &= PAGE_MASK;
97 if (base & ~PAGE_MASK) {
98 unsigned long new_base = PAGE_ALIGN(base);
99
100 size -= new_base - base;
101 if ((long) size < 0L)
102 size = 0UL;
103 base = new_base;
104 }
105 regs[i].phys_addr = base;
106 regs[i].reg_size = size;
107 }
108 sort(regs, ents, sizeof(struct linux_prom64_registers),
109 cmp_p64, NULL);
110}
45 111
46unsigned long *sparc64_valid_addr_bitmap __read_mostly; 112unsigned long *sparc64_valid_addr_bitmap __read_mostly;
47 113
@@ -1206,14 +1272,14 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1206 int i; 1272 int i;
1207 1273
1208#ifdef CONFIG_DEBUG_BOOTMEM 1274#ifdef CONFIG_DEBUG_BOOTMEM
1209 prom_printf("bootmem_init: Scan sp_banks, "); 1275 prom_printf("bootmem_init: Scan pavail, ");
1210#endif 1276#endif
1211 1277
1212 bytes_avail = 0UL; 1278 bytes_avail = 0UL;
1213 for (i = 0; sp_banks[i].num_bytes != 0; i++) { 1279 for (i = 0; i < pavail_ents; i++) {
1214 end_of_phys_memory = sp_banks[i].base_addr + 1280 end_of_phys_memory = pavail[i].phys_addr +
1215 sp_banks[i].num_bytes; 1281 pavail[i].reg_size;
1216 bytes_avail += sp_banks[i].num_bytes; 1282 bytes_avail += pavail[i].reg_size;
1217 if (cmdline_memory_size) { 1283 if (cmdline_memory_size) {
1218 if (bytes_avail > cmdline_memory_size) { 1284 if (bytes_avail > cmdline_memory_size) {
1219 unsigned long slack = bytes_avail - cmdline_memory_size; 1285 unsigned long slack = bytes_avail - cmdline_memory_size;
@@ -1221,12 +1287,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1221 bytes_avail -= slack; 1287 bytes_avail -= slack;
1222 end_of_phys_memory -= slack; 1288 end_of_phys_memory -= slack;
1223 1289
1224 sp_banks[i].num_bytes -= slack; 1290 pavail[i].reg_size -= slack;
1225 if (sp_banks[i].num_bytes == 0) { 1291 if ((long)pavail[i].reg_size <= 0L) {
1226 sp_banks[i].base_addr = 0xdeadbeef; 1292 pavail[i].phys_addr = 0xdeadbeefUL;
1293 pavail[i].reg_size = 0UL;
1294 pavail_ents = i;
1227 } else { 1295 } else {
1228 sp_banks[i+1].num_bytes = 0; 1296 pavail[i+1].reg_size = 0Ul;
1229 sp_banks[i+1].base_addr = 0xdeadbeef; 1297 pavail[i+1].phys_addr = 0xdeadbeefUL;
1298 pavail_ents = i + 1;
1230 } 1299 }
1231 break; 1300 break;
1232 } 1301 }
@@ -1280,12 +1349,12 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1280 /* Now register the available physical memory with the 1349 /* Now register the available physical memory with the
1281 * allocator. 1350 * allocator.
1282 */ 1351 */
1283 for (i = 0; sp_banks[i].num_bytes != 0; i++) { 1352 for (i = 0; i < pavail_ents; i++) {
1284#ifdef CONFIG_DEBUG_BOOTMEM 1353#ifdef CONFIG_DEBUG_BOOTMEM
1285 prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n", 1354 prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n",
1286 i, sp_banks[i].base_addr, sp_banks[i].num_bytes); 1355 i, pavail[i].phys_addr, pavail[i].reg_size);
1287#endif 1356#endif
1288 free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes); 1357 free_bootmem(pavail[i].phys_addr, pavail[i].reg_size);
1289 } 1358 }
1290 1359
1291#ifdef CONFIG_BLK_DEV_INITRD 1360#ifdef CONFIG_BLK_DEV_INITRD
@@ -1334,7 +1403,7 @@ static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend,
1334 unsigned long alloc_bytes = 0UL; 1403 unsigned long alloc_bytes = 0UL;
1335 1404
1336 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { 1405 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1337 prom_printf("kernel_map: Unaligned sp_banks[%lx:%lx]\n", 1406 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1338 vstart, vend); 1407 vstart, vend);
1339 prom_halt(); 1408 prom_halt();
1340 } 1409 }
@@ -1381,23 +1450,24 @@ static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend,
1381 return alloc_bytes; 1450 return alloc_bytes;
1382} 1451}
1383 1452
1384extern struct linux_mlist_p1275 *prom_ptot_ptr; 1453static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1454static int pall_ents __initdata;
1455
1385extern unsigned int kvmap_linear_patch[1]; 1456extern unsigned int kvmap_linear_patch[1];
1386 1457
1387static void __init kernel_physical_mapping_init(void) 1458static void __init kernel_physical_mapping_init(void)
1388{ 1459{
1389 struct linux_mlist_p1275 *p = prom_ptot_ptr; 1460 unsigned long i, mem_alloced = 0UL;
1390 unsigned long mem_alloced = 0UL; 1461
1462 read_obp_memory("reg", &pall[0], &pall_ents);
1391 1463
1392 while (p) { 1464 for (i = 0; i < pall_ents; i++) {
1393 unsigned long phys_start, phys_end; 1465 unsigned long phys_start, phys_end;
1394 1466
1395 phys_start = p->start_adr; 1467 phys_start = pall[i].phys_addr;
1396 phys_end = phys_start + p->num_bytes; 1468 phys_end = phys_start + pall[i].reg_size;
1397 mem_alloced += kernel_map_range(phys_start, phys_end, 1469 mem_alloced += kernel_map_range(phys_start, phys_end,
1398 PAGE_KERNEL); 1470 PAGE_KERNEL);
1399
1400 p = p->theres_more;
1401 } 1471 }
1402 1472
1403 printk("Allocated %ld bytes for kernel page tables.\n", 1473 printk("Allocated %ld bytes for kernel page tables.\n",
@@ -1425,6 +1495,18 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
1425} 1495}
1426#endif 1496#endif
1427 1497
1498unsigned long __init find_ecache_flush_span(unsigned long size)
1499{
1500 int i;
1501
1502 for (i = 0; i < pavail_ents; i++) {
1503 if (pavail[i].reg_size >= size)
1504 return pavail[i].phys_addr;
1505 }
1506
1507 return ~0UL;
1508}
1509
1428/* paging_init() sets up the page tables */ 1510/* paging_init() sets up the page tables */
1429 1511
1430extern void cheetah_ecache_flush_init(void); 1512extern void cheetah_ecache_flush_init(void);
@@ -1435,7 +1517,19 @@ pgd_t swapper_pg_dir[2048];
1435void __init paging_init(void) 1517void __init paging_init(void)
1436{ 1518{
1437 unsigned long end_pfn, pages_avail, shift; 1519 unsigned long end_pfn, pages_avail, shift;
1438 unsigned long real_end; 1520 unsigned long real_end, i;
1521
1522 /* Find available physical memory... */
1523 read_obp_memory("available", &pavail[0], &pavail_ents);
1524
1525 phys_base = 0xffffffffffffffffUL;
1526 for (i = 0; i < pavail_ents; i++)
1527 phys_base = min(phys_base, pavail[i].phys_addr);
1528
1529 pfn_base = phys_base >> PAGE_SHIFT;
1530
1531 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1532 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1439 1533
1440 set_bit(0, mmu_context_bmap); 1534 set_bit(0, mmu_context_bmap);
1441 1535
@@ -1507,128 +1601,35 @@ void __init paging_init(void)
1507 device_scan(); 1601 device_scan();
1508} 1602}
1509 1603
1510/* Ok, it seems that the prom can allocate some more memory chunks
1511 * as a side effect of some prom calls we perform during the
1512 * boot sequence. My most likely theory is that it is from the
1513 * prom_set_traptable() call, and OBP is allocating a scratchpad
1514 * for saving client program register state etc.
1515 */
1516static void __init sort_memlist(struct linux_mlist_p1275 *thislist)
1517{
1518 int swapi = 0;
1519 int i, mitr;
1520 unsigned long tmpaddr, tmpsize;
1521 unsigned long lowest;
1522
1523 for (i = 0; thislist[i].theres_more != 0; i++) {
1524 lowest = thislist[i].start_adr;
1525 for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
1526 if (thislist[mitr].start_adr < lowest) {
1527 lowest = thislist[mitr].start_adr;
1528 swapi = mitr;
1529 }
1530 if (lowest == thislist[i].start_adr)
1531 continue;
1532 tmpaddr = thislist[swapi].start_adr;
1533 tmpsize = thislist[swapi].num_bytes;
1534 for (mitr = swapi; mitr > i; mitr--) {
1535 thislist[mitr].start_adr = thislist[mitr-1].start_adr;
1536 thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
1537 }
1538 thislist[i].start_adr = tmpaddr;
1539 thislist[i].num_bytes = tmpsize;
1540 }
1541}
1542
1543void __init rescan_sp_banks(void)
1544{
1545 struct linux_prom64_registers memlist[64];
1546 struct linux_mlist_p1275 avail[64], *mlist;
1547 unsigned long bytes, base_paddr;
1548 int num_regs, node = prom_finddevice("/memory");
1549 int i;
1550
1551 num_regs = prom_getproperty(node, "available",
1552 (char *) memlist, sizeof(memlist));
1553 num_regs = (num_regs / sizeof(struct linux_prom64_registers));
1554 for (i = 0; i < num_regs; i++) {
1555 avail[i].start_adr = memlist[i].phys_addr;
1556 avail[i].num_bytes = memlist[i].reg_size;
1557 avail[i].theres_more = &avail[i + 1];
1558 }
1559 avail[i - 1].theres_more = NULL;
1560 sort_memlist(avail);
1561
1562 mlist = &avail[0];
1563 i = 0;
1564 bytes = mlist->num_bytes;
1565 base_paddr = mlist->start_adr;
1566
1567 sp_banks[0].base_addr = base_paddr;
1568 sp_banks[0].num_bytes = bytes;
1569
1570 while (mlist->theres_more != NULL){
1571 i++;
1572 mlist = mlist->theres_more;
1573 bytes = mlist->num_bytes;
1574 if (i >= SPARC_PHYS_BANKS-1) {
1575 printk ("The machine has more banks than "
1576 "this kernel can support\n"
1577 "Increase the SPARC_PHYS_BANKS "
1578 "setting (currently %d)\n",
1579 SPARC_PHYS_BANKS);
1580 i = SPARC_PHYS_BANKS-1;
1581 break;
1582 }
1583
1584 sp_banks[i].base_addr = mlist->start_adr;
1585 sp_banks[i].num_bytes = mlist->num_bytes;
1586 }
1587
1588 i++;
1589 sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
1590 sp_banks[i].num_bytes = 0;
1591
1592 for (i = 0; sp_banks[i].num_bytes != 0; i++)
1593 sp_banks[i].num_bytes &= PAGE_MASK;
1594}
1595
1596static void __init taint_real_pages(void) 1604static void __init taint_real_pages(void)
1597{ 1605{
1598 struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS];
1599 int i; 1606 int i;
1600 1607
1601 for (i = 0; i < SPARC_PHYS_BANKS; i++) { 1608 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
1602 saved_sp_banks[i].base_addr =
1603 sp_banks[i].base_addr;
1604 saved_sp_banks[i].num_bytes =
1605 sp_banks[i].num_bytes;
1606 }
1607
1608 rescan_sp_banks();
1609 1609
1610 /* Find changes discovered in the sp_bank rescan and 1610 /* Find changes discovered in the physmem available rescan and
1611 * reserve the lost portions in the bootmem maps. 1611 * reserve the lost portions in the bootmem maps.
1612 */ 1612 */
1613 for (i = 0; saved_sp_banks[i].num_bytes; i++) { 1613 for (i = 0; i < pavail_ents; i++) {
1614 unsigned long old_start, old_end; 1614 unsigned long old_start, old_end;
1615 1615
1616 old_start = saved_sp_banks[i].base_addr; 1616 old_start = pavail[i].phys_addr;
1617 old_end = old_start + 1617 old_end = old_start +
1618 saved_sp_banks[i].num_bytes; 1618 pavail[i].reg_size;
1619 while (old_start < old_end) { 1619 while (old_start < old_end) {
1620 int n; 1620 int n;
1621 1621
1622 for (n = 0; sp_banks[n].num_bytes; n++) { 1622 for (n = 0; pavail_rescan_ents; n++) {
1623 unsigned long new_start, new_end; 1623 unsigned long new_start, new_end;
1624 1624
1625 new_start = sp_banks[n].base_addr; 1625 new_start = pavail_rescan[n].phys_addr;
1626 new_end = new_start + sp_banks[n].num_bytes; 1626 new_end = new_start +
1627 pavail_rescan[n].reg_size;
1627 1628
1628 if (new_start <= old_start && 1629 if (new_start <= old_start &&
1629 new_end >= (old_start + PAGE_SIZE)) { 1630 new_end >= (old_start + PAGE_SIZE)) {
1630 set_bit (old_start >> 22, 1631 set_bit(old_start >> 22,
1631 sparc64_valid_addr_bitmap); 1632 sparc64_valid_addr_bitmap);
1632 goto do_next_page; 1633 goto do_next_page;
1633 } 1634 }
1634 } 1635 }