aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/mm/init.c')
-rw-r--r--arch/sparc64/mm/init.c90
1 files changed, 49 insertions, 41 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 6e5b01d779d2..3010227fe243 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -23,6 +23,7 @@
23#include <linux/kprobes.h> 23#include <linux/kprobes.h>
24#include <linux/cache.h> 24#include <linux/cache.h>
25#include <linux/sort.h> 25#include <linux/sort.h>
26#include <linux/percpu.h>
26 27
27#include <asm/head.h> 28#include <asm/head.h>
28#include <asm/system.h> 29#include <asm/system.h>
@@ -43,8 +44,8 @@
43#include <asm/tsb.h> 44#include <asm/tsb.h>
44#include <asm/hypervisor.h> 45#include <asm/hypervisor.h>
45#include <asm/prom.h> 46#include <asm/prom.h>
46 47#include <asm/sstate.h>
47extern void device_scan(void); 48#include <asm/mdesc.h>
48 49
49#define MAX_PHYS_ADDRESS (1UL << 42UL) 50#define MAX_PHYS_ADDRESS (1UL << 42UL)
50#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) 51#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
@@ -60,8 +61,11 @@ unsigned long kern_linear_pte_xor[2] __read_mostly;
60unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; 61unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
61 62
62#ifndef CONFIG_DEBUG_PAGEALLOC 63#ifndef CONFIG_DEBUG_PAGEALLOC
63/* A special kernel TSB for 4MB and 256MB linear mappings. */ 64/* A special kernel TSB for 4MB and 256MB linear mappings.
64struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; 65 * Space is allocated for this right after the trap table
66 * in arch/sparc64/kernel/head.S
67 */
68extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
65#endif 69#endif
66 70
67#define MAX_BANKS 32 71#define MAX_BANKS 32
@@ -190,12 +194,9 @@ inline void flush_dcache_page_impl(struct page *page)
190} 194}
191 195
192#define PG_dcache_dirty PG_arch_1 196#define PG_dcache_dirty PG_arch_1
193#define PG_dcache_cpu_shift 24UL 197#define PG_dcache_cpu_shift 32UL
194#define PG_dcache_cpu_mask (256UL - 1UL) 198#define PG_dcache_cpu_mask \
195 199 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
196#if NR_CPUS > 256
197#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
198#endif
199 200
200#define dcache_dirty_cpu(page) \ 201#define dcache_dirty_cpu(page) \
201 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) 202 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
@@ -557,26 +558,11 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr,
557 unsigned long pte, 558 unsigned long pte,
558 unsigned long mmu) 559 unsigned long mmu)
559{ 560{
560 register unsigned long func asm("%o5"); 561 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
561 register unsigned long arg0 asm("%o0"); 562
562 register unsigned long arg1 asm("%o1"); 563 if (ret != 0) {
563 register unsigned long arg2 asm("%o2");
564 register unsigned long arg3 asm("%o3");
565
566 func = HV_FAST_MMU_MAP_PERM_ADDR;
567 arg0 = vaddr;
568 arg1 = 0;
569 arg2 = pte;
570 arg3 = mmu;
571 __asm__ __volatile__("ta 0x80"
572 : "=&r" (func), "=&r" (arg0),
573 "=&r" (arg1), "=&r" (arg2),
574 "=&r" (arg3)
575 : "0" (func), "1" (arg0), "2" (arg1),
576 "3" (arg2), "4" (arg3));
577 if (arg0 != 0) {
578 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " 564 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
579 "errors with %lx\n", vaddr, 0, pte, mmu, arg0); 565 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
580 prom_halt(); 566 prom_halt();
581 } 567 }
582} 568}
@@ -1313,20 +1299,16 @@ static void __init sun4v_ktsb_init(void)
1313 1299
1314void __cpuinit sun4v_ktsb_register(void) 1300void __cpuinit sun4v_ktsb_register(void)
1315{ 1301{
1316 register unsigned long func asm("%o5"); 1302 unsigned long pa, ret;
1317 register unsigned long arg0 asm("%o0");
1318 register unsigned long arg1 asm("%o1");
1319 unsigned long pa;
1320 1303
1321 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); 1304 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1322 1305
1323 func = HV_FAST_MMU_TSB_CTX0; 1306 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1324 arg0 = NUM_KTSB_DESCR; 1307 if (ret != 0) {
1325 arg1 = pa; 1308 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1326 __asm__ __volatile__("ta %6" 1309 "errors with %lx\n", pa, ret);
1327 : "=&r" (func), "=&r" (arg0), "=&r" (arg1) 1310 prom_halt();
1328 : "0" (func), "1" (arg0), "2" (arg1), 1311 }
1329 "i" (HV_FAST_TRAP));
1330} 1312}
1331 1313
1332/* paging_init() sets up the page tables */ 1314/* paging_init() sets up the page tables */
@@ -1334,6 +1316,9 @@ void __cpuinit sun4v_ktsb_register(void)
1334extern void cheetah_ecache_flush_init(void); 1316extern void cheetah_ecache_flush_init(void);
1335extern void sun4v_patch_tlb_handlers(void); 1317extern void sun4v_patch_tlb_handlers(void);
1336 1318
1319extern void cpu_probe(void);
1320extern void central_probe(void);
1321
1337static unsigned long last_valid_pfn; 1322static unsigned long last_valid_pfn;
1338pgd_t swapper_pg_dir[2048]; 1323pgd_t swapper_pg_dir[2048];
1339 1324
@@ -1345,9 +1330,24 @@ void __init paging_init(void)
1345 unsigned long end_pfn, pages_avail, shift, phys_base; 1330 unsigned long end_pfn, pages_avail, shift, phys_base;
1346 unsigned long real_end, i; 1331 unsigned long real_end, i;
1347 1332
1333 /* These build time checkes make sure that the dcache_dirty_cpu()
1334 * page->flags usage will work.
1335 *
1336 * When a page gets marked as dcache-dirty, we store the
1337 * cpu number starting at bit 32 in the page->flags. Also,
1338 * functions like clear_dcache_dirty_cpu use the cpu mask
1339 * in 13-bit signed-immediate instruction fields.
1340 */
1341 BUILD_BUG_ON(FLAGS_RESERVED != 32);
1342 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
1343 ilog2(roundup_pow_of_two(NR_CPUS)) > FLAGS_RESERVED);
1344 BUILD_BUG_ON(NR_CPUS > 4096);
1345
1348 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 1346 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1349 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; 1347 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1350 1348
1349 sstate_booting();
1350
1351 /* Invalidate both kernel TSBs. */ 1351 /* Invalidate both kernel TSBs. */
1352 memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); 1352 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
1353#ifndef CONFIG_DEBUG_PAGEALLOC 1353#ifndef CONFIG_DEBUG_PAGEALLOC
@@ -1416,8 +1416,13 @@ void __init paging_init(void)
1416 1416
1417 kernel_physical_mapping_init(); 1417 kernel_physical_mapping_init();
1418 1418
1419 real_setup_per_cpu_areas();
1420
1419 prom_build_devicetree(); 1421 prom_build_devicetree();
1420 1422
1423 if (tlb_type == hypervisor)
1424 sun4v_mdesc_init();
1425
1421 { 1426 {
1422 unsigned long zones_size[MAX_NR_ZONES]; 1427 unsigned long zones_size[MAX_NR_ZONES];
1423 unsigned long zholes_size[MAX_NR_ZONES]; 1428 unsigned long zholes_size[MAX_NR_ZONES];
@@ -1434,7 +1439,10 @@ void __init paging_init(void)
1434 zholes_size); 1439 zholes_size);
1435 } 1440 }
1436 1441
1437 device_scan(); 1442 prom_printf("Booting Linux...\n");
1443
1444 central_probe();
1445 cpu_probe();
1438} 1446}
1439 1447
1440static void __init taint_real_pages(void) 1448static void __init taint_real_pages(void)