aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-10 01:57:21 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:12:14 -0500
commit164c220fa3947abbada65329d168f421b461a2a7 (patch)
tree1a10418ccf896f1f9209c2206bedf87915b63bfd /arch/sparc64
parentdedacf623283cb24933ec9f7d5bf539f19173cd4 (diff)
[SPARC64]: Fix hypervisor call arg passing.
Function goes in %o5, args go in %o0 --> %o5. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/kernel/irq.c8
-rw-r--r--arch/sparc64/kernel/smp.c16
-rw-r--r--arch/sparc64/kernel/trampoline.S56
-rw-r--r--arch/sparc64/kernel/tsb.S6
-rw-r--r--arch/sparc64/mm/init.c20
5 files changed, 53 insertions, 53 deletions
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 1f6455503f24..c5dd6daf127f 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -863,10 +863,10 @@ void init_irqwork_curcpu(void)
863 863
864static void __cpuinit init_one_mondo(unsigned long *pa_ptr, unsigned long type) 864static void __cpuinit init_one_mondo(unsigned long *pa_ptr, unsigned long type)
865{ 865{
866 register unsigned long func __asm__("%o0"); 866 register unsigned long func __asm__("%o5");
867 register unsigned long arg0 __asm__("%o1"); 867 register unsigned long arg0 __asm__("%o0");
868 register unsigned long arg1 __asm__("%o2"); 868 register unsigned long arg1 __asm__("%o1");
869 register unsigned long arg2 __asm__("%o3"); 869 register unsigned long arg2 __asm__("%o2");
870 unsigned long page = get_zeroed_page(GFP_ATOMIC); 870 unsigned long page = get_zeroed_page(GFP_ATOMIC);
871 871
872 if (!page) { 872 if (!page) {
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index c10a3a8639e8..f553264588d6 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -572,10 +572,10 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
572 retries = 0; 572 retries = 0;
573 cnt = init_cpu_list(cpu_list, mask); 573 cnt = init_cpu_list(cpu_list, mask);
574 do { 574 do {
575 register unsigned long func __asm__("%o0"); 575 register unsigned long func __asm__("%o5");
576 register unsigned long arg0 __asm__("%o1"); 576 register unsigned long arg0 __asm__("%o0");
577 register unsigned long arg1 __asm__("%o2"); 577 register unsigned long arg1 __asm__("%o1");
578 register unsigned long arg2 __asm__("%o3"); 578 register unsigned long arg2 __asm__("%o2");
579 579
580 func = HV_FAST_CPU_MONDO_SEND; 580 func = HV_FAST_CPU_MONDO_SEND;
581 arg0 = cnt; 581 arg0 = cnt;
@@ -624,10 +624,10 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
624 int retries = 0; 624 int retries = 0;
625 625
626 do { 626 do {
627 register unsigned long func __asm__("%o0"); 627 register unsigned long func __asm__("%o5");
628 register unsigned long arg0 __asm__("%o1"); 628 register unsigned long arg0 __asm__("%o0");
629 register unsigned long arg1 __asm__("%o2"); 629 register unsigned long arg1 __asm__("%o1");
630 register unsigned long arg2 __asm__("%o3"); 630 register unsigned long arg2 __asm__("%o2");
631 631
632 cpu_list[0] = i; 632 cpu_list[0] = i;
633 func = HV_FAST_CPU_MONDO_SEND; 633 func = HV_FAST_CPU_MONDO_SEND;
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index ffa8b79632cf..c476f5b321fb 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -265,20 +265,20 @@ do_unlock:
265 nop 265 nop
266 266
267niagara_lock_tlb: 267niagara_lock_tlb:
268 mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 268 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
269 sethi %hi(KERNBASE), %o1 269 sethi %hi(KERNBASE), %o0
270 clr %o2 270 clr %o1
271 sethi %hi(kern_locked_tte_data), %o3 271 sethi %hi(kern_locked_tte_data), %o2
272 ldx [%o3 + %lo(kern_locked_tte_data)], %o3 272 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
273 mov HV_MMU_IMMU, %o4 273 mov HV_MMU_IMMU, %o3
274 ta HV_FAST_TRAP 274 ta HV_FAST_TRAP
275 275
276 mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 276 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
277 sethi %hi(KERNBASE), %o1 277 sethi %hi(KERNBASE), %o0
278 clr %o2 278 clr %o1
279 sethi %hi(kern_locked_tte_data), %o3 279 sethi %hi(kern_locked_tte_data), %o2
280 ldx [%o3 + %lo(kern_locked_tte_data)], %o3 280 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
281 mov HV_MMU_DMMU, %o4 281 mov HV_MMU_DMMU, %o3
282 ta HV_FAST_TRAP 282 ta HV_FAST_TRAP
283 283
284 sethi %hi(bigkernel), %g2 284 sethi %hi(bigkernel), %g2
@@ -286,24 +286,24 @@ niagara_lock_tlb:
286 brz,pt %g2, after_lock_tlb 286 brz,pt %g2, after_lock_tlb
287 nop 287 nop
288 288
289 mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 289 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
290 sethi %hi(KERNBASE + 0x400000), %o1 290 sethi %hi(KERNBASE + 0x400000), %o0
291 clr %o2 291 clr %o1
292 sethi %hi(kern_locked_tte_data), %o3 292 sethi %hi(kern_locked_tte_data), %o2
293 ldx [%o3 + %lo(kern_locked_tte_data)], %o3 293 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
294 sethi %hi(0x400000), %o4 294 sethi %hi(0x400000), %o3
295 add %o3, %o4, %o3 295 add %o2, %o3, %o2
296 mov HV_MMU_IMMU, %o4 296 mov HV_MMU_IMMU, %o3
297 ta HV_FAST_TRAP 297 ta HV_FAST_TRAP
298 298
299 mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 299 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
300 sethi %hi(KERNBASE + 0x400000), %o1 300 sethi %hi(KERNBASE + 0x400000), %o0
301 clr %o2 301 clr %o1
302 sethi %hi(kern_locked_tte_data), %o3 302 sethi %hi(kern_locked_tte_data), %o2
303 ldx [%o3 + %lo(kern_locked_tte_data)], %o3 303 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
304 sethi %hi(0x400000), %o4 304 sethi %hi(0x400000), %o3
305 add %o3, %o4, %o3 305 add %o2, %o3, %o2
306 mov HV_MMU_DMMU, %o4 306 mov HV_MMU_DMMU, %o3
307 ta HV_FAST_TRAP 307 ta HV_FAST_TRAP
308 308
309after_lock_tlb: 309after_lock_tlb:
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index a53ec6fb7697..8a9351258af8 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -266,9 +266,9 @@ __tsb_context_switch:
266 mov SCRATCHPAD_UTSBREG2, %g1 266 mov SCRATCHPAD_UTSBREG2, %g1
267 stxa %g2, [%g1] ASI_SCRATCHPAD 267 stxa %g2, [%g1] ASI_SCRATCHPAD
268 268
269 mov HV_FAST_MMU_TSB_CTXNON0, %o0 269 mov HV_FAST_MMU_TSB_CTXNON0, %o5
270 mov 1, %o1 270 mov 1, %o0
271 mov %o4, %o2 271 mov %o4, %o1
272 ta HV_FAST_TRAP 272 ta HV_FAST_TRAP
273 273
274 ba,pt %xcc, 9f 274 ba,pt %xcc, 9f
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index e602b857071a..7faba33202a9 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -518,11 +518,11 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr,
518 unsigned long pte, 518 unsigned long pte,
519 unsigned long mmu) 519 unsigned long mmu)
520{ 520{
521 register unsigned long func asm("%o0"); 521 register unsigned long func asm("%o5");
522 register unsigned long arg0 asm("%o1"); 522 register unsigned long arg0 asm("%o0");
523 register unsigned long arg1 asm("%o2"); 523 register unsigned long arg1 asm("%o1");
524 register unsigned long arg2 asm("%o3"); 524 register unsigned long arg2 asm("%o2");
525 register unsigned long arg3 asm("%o4"); 525 register unsigned long arg3 asm("%o3");
526 526
527 func = HV_FAST_MMU_MAP_PERM_ADDR; 527 func = HV_FAST_MMU_MAP_PERM_ADDR;
528 arg0 = vaddr; 528 arg0 = vaddr;
@@ -1112,18 +1112,18 @@ static void __init tsb_phys_patch(void)
1112/* Register this cpu's fault status area with the hypervisor. */ 1112/* Register this cpu's fault status area with the hypervisor. */
1113void __cpuinit sun4v_register_fault_status(void) 1113void __cpuinit sun4v_register_fault_status(void)
1114{ 1114{
1115 register unsigned long func asm("%o5");
1115 register unsigned long arg0 asm("%o0"); 1116 register unsigned long arg0 asm("%o0");
1116 register unsigned long arg1 asm("%o1");
1117 int cpu = hard_smp_processor_id(); 1117 int cpu = hard_smp_processor_id();
1118 struct trap_per_cpu *tb = &trap_block[cpu]; 1118 struct trap_per_cpu *tb = &trap_block[cpu];
1119 unsigned long pa; 1119 unsigned long pa;
1120 1120
1121 pa = kern_base + ((unsigned long) tb - KERNBASE); 1121 pa = kern_base + ((unsigned long) tb - KERNBASE);
1122 arg0 = HV_FAST_MMU_FAULT_AREA_CONF; 1122 func = HV_FAST_MMU_FAULT_AREA_CONF;
1123 arg1 = pa; 1123 arg0 = pa;
1124 __asm__ __volatile__("ta %4" 1124 __asm__ __volatile__("ta %4"
1125 : "=&r" (arg0), "=&r" (arg1) 1125 : "=&r" (func), "=&r" (arg0)
1126 : "0" (arg0), "1" (arg1), 1126 : "0" (func), "1" (arg0),
1127 "i" (HV_FAST_TRAP)); 1127 "i" (HV_FAST_TRAP));
1128} 1128}
1129 1129