aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2007-05-29 05:22:14 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-05-29 05:52:15 -0400
commit7db35f31cbb8ca1dbaba03d74b7db79ace084358 (patch)
tree98dcbdb70d613ba6ddcf7c8bec03d79748342109 /arch/sparc64/mm
parent2d9e2763c22a4ce41c3cc5f35366a51f1eba38dc (diff)
[SPARC64]: Fill holes in hypervisor APIs and fix KTSB registry.
Several interfaces were missing and others misnumbered or improperly documented. Also, make sure to check the return value when registering the kernel TSBs with the hypervisor. This helped to find the 4MB kernel TSB alignment bug fixed in a previous changeset. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/mm')
-rw-r--r--arch/sparc64/mm/init.c41
1 files changed, 11 insertions, 30 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 97af4311f787..3010227fe243 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -558,26 +558,11 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr,
558 unsigned long pte, 558 unsigned long pte,
559 unsigned long mmu) 559 unsigned long mmu)
560{ 560{
561 register unsigned long func asm("%o5"); 561 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
562 register unsigned long arg0 asm("%o0"); 562
563 register unsigned long arg1 asm("%o1"); 563 if (ret != 0) {
564 register unsigned long arg2 asm("%o2");
565 register unsigned long arg3 asm("%o3");
566
567 func = HV_FAST_MMU_MAP_PERM_ADDR;
568 arg0 = vaddr;
569 arg1 = 0;
570 arg2 = pte;
571 arg3 = mmu;
572 __asm__ __volatile__("ta 0x80"
573 : "=&r" (func), "=&r" (arg0),
574 "=&r" (arg1), "=&r" (arg2),
575 "=&r" (arg3)
576 : "0" (func), "1" (arg0), "2" (arg1),
577 "3" (arg2), "4" (arg3));
578 if (arg0 != 0) {
579 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " 564 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
580 "errors with %lx\n", vaddr, 0, pte, mmu, arg0); 565 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
581 prom_halt(); 566 prom_halt();
582 } 567 }
583} 568}
@@ -1314,20 +1299,16 @@ static void __init sun4v_ktsb_init(void)
1314 1299
1315void __cpuinit sun4v_ktsb_register(void) 1300void __cpuinit sun4v_ktsb_register(void)
1316{ 1301{
1317 register unsigned long func asm("%o5"); 1302 unsigned long pa, ret;
1318 register unsigned long arg0 asm("%o0");
1319 register unsigned long arg1 asm("%o1");
1320 unsigned long pa;
1321 1303
1322 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); 1304 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1323 1305
1324 func = HV_FAST_MMU_TSB_CTX0; 1306 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1325 arg0 = NUM_KTSB_DESCR; 1307 if (ret != 0) {
1326 arg1 = pa; 1308 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1327 __asm__ __volatile__("ta %6" 1309 "errors with %lx\n", pa, ret);
1328 : "=&r" (func), "=&r" (arg0), "=&r" (arg1) 1310 prom_halt();
1329 : "0" (func), "1" (arg0), "2" (arg1), 1311 }
1330 "i" (HV_FAST_TRAP));
1331} 1312}
1332 1313
1333/* paging_init() sets up the page tables */ 1314/* paging_init() sets up the page tables */