aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sparc64/mm/ultra.S224
1 files changed, 214 insertions, 10 deletions
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index 269ed57b3e9d..cac58d66fca9 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -15,6 +15,7 @@
15#include <asm/head.h> 15#include <asm/head.h>
16#include <asm/thread_info.h> 16#include <asm/thread_info.h>
17#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
18#include <asm/hypervisor.h>
18 19
19 /* Basically, most of the Spitfire vs. Cheetah madness 20 /* Basically, most of the Spitfire vs. Cheetah madness
20 * has to do with the fact that Cheetah does not support 21 * has to do with the fact that Cheetah does not support
@@ -29,7 +30,8 @@
29 .text 30 .text
30 .align 32 31 .align 32
31 .globl __flush_tlb_mm 32 .globl __flush_tlb_mm
32__flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ 33__flush_tlb_mm: /* 18 insns */
34 /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
33 ldxa [%o1] ASI_DMMU, %g2 35 ldxa [%o1] ASI_DMMU, %g2
34 cmp %g2, %o0 36 cmp %g2, %o0
35 bne,pn %icc, __spitfire_flush_tlb_mm_slow 37 bne,pn %icc, __spitfire_flush_tlb_mm_slow
@@ -52,7 +54,7 @@ __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
52 54
53 .align 32 55 .align 32
54 .globl __flush_tlb_pending 56 .globl __flush_tlb_pending
55__flush_tlb_pending: 57__flush_tlb_pending: /* 26 insns */
56 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ 58 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
57 rdpr %pstate, %g7 59 rdpr %pstate, %g7
58 sllx %o1, 3, %o1 60 sllx %o1, 3, %o1
@@ -84,7 +86,8 @@ __flush_tlb_pending:
84 86
85 .align 32 87 .align 32
86 .globl __flush_tlb_kernel_range 88 .globl __flush_tlb_kernel_range
87__flush_tlb_kernel_range: /* %o0=start, %o1=end */ 89__flush_tlb_kernel_range: /* 14 insns */
90 /* %o0=start, %o1=end */
88 cmp %o0, %o1 91 cmp %o0, %o1
89 be,pn %xcc, 2f 92 be,pn %xcc, 2f
90 sethi %hi(PAGE_SIZE), %o4 93 sethi %hi(PAGE_SIZE), %o4
@@ -100,6 +103,7 @@ __flush_tlb_kernel_range: /* %o0=start, %o1=end */
100 flush %o3 103 flush %o3
101 retl 104 retl
102 nop 105 nop
106 nop
103 107
104__spitfire_flush_tlb_mm_slow: 108__spitfire_flush_tlb_mm_slow:
105 rdpr %pstate, %g1 109 rdpr %pstate, %g1
@@ -252,7 +256,63 @@ __cheetah_flush_dcache_page: /* 11 insns */
252 nop 256 nop
253#endif /* DCACHE_ALIASING_POSSIBLE */ 257#endif /* DCACHE_ALIASING_POSSIBLE */
254 258
255cheetah_patch_one: 259 /* Hypervisor specific versions, patched at boot time. */
260__hypervisor_flush_tlb_mm: /* 8 insns */
261 mov %o0, %o2 /* ARG2: mmu context */
262 mov 0, %o0 /* ARG0: CPU lists unimplemented */
263 mov 0, %o1 /* ARG1: CPU lists unimplemented */
264 mov HV_MMU_ALL, %o3 /* ARG3: flags */
265 mov HV_FAST_MMU_DEMAP_CTX, %o5
266 ta HV_FAST_TRAP
267 retl
268 nop
269
270__hypervisor_flush_tlb_pending: /* 15 insns */
271 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
272 sllx %o1, 3, %g1
273 mov %o2, %g2
274 mov %o0, %g3
2751: sub %g1, (1 << 3), %g1
276 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
277 mov %g3, %o1 /* ARG1: mmu context */
278 mov HV_MMU_DMMU, %o2
279 andcc %o0, 1, %g0
280 movne %icc, HV_MMU_ALL, %o2 /* ARG2: flags */
281 andn %o0, 1, %o0
282 ta HV_MMU_UNMAP_ADDR_TRAP
283 brnz,pt %g1, 1b
284 nop
285 retl
286 nop
287
288__hypervisor_flush_tlb_kernel_range: /* 14 insns */
289 /* %o0=start, %o1=end */
290 cmp %o0, %o1
291 be,pn %xcc, 2f
292 sethi %hi(PAGE_SIZE), %g3
293 mov %o0, %g1
294 sub %o1, %g1, %g2
295 sub %g2, %g3, %g2
2961: add %g1, %g2, %o0 /* ARG0: virtual address */
297 mov 0, %o1 /* ARG1: mmu context */
298 mov HV_MMU_ALL, %o2 /* ARG2: flags */
299 ta HV_MMU_UNMAP_ADDR_TRAP
300 brnz,pt %g2, 1b
301 sub %g2, %g3, %g2
3022: retl
303 nop
304
305#ifdef DCACHE_ALIASING_POSSIBLE
306 /* XXX Niagara and friends have an 8K cache, so no aliasing is
307 * XXX possible, but nothing explicit in the Hypervisor API
308 * XXX guarantees this.
309 */
310__hypervisor_flush_dcache_page: /* 2 insns */
311 retl
312 nop
313#endif
314
315tlb_patch_one:
2561: lduw [%o1], %g1 3161: lduw [%o1], %g1
257 stw %g1, [%o0] 317 stw %g1, [%o0]
258 flush %o0 318 flush %o0
@@ -271,14 +331,14 @@ cheetah_patch_cachetlbops:
271 or %o0, %lo(__flush_tlb_mm), %o0 331 or %o0, %lo(__flush_tlb_mm), %o0
272 sethi %hi(__cheetah_flush_tlb_mm), %o1 332 sethi %hi(__cheetah_flush_tlb_mm), %o1
273 or %o1, %lo(__cheetah_flush_tlb_mm), %o1 333 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
274 call cheetah_patch_one 334 call tlb_patch_one
275 mov 19, %o2 335 mov 19, %o2
276 336
277 sethi %hi(__flush_tlb_pending), %o0 337 sethi %hi(__flush_tlb_pending), %o0
278 or %o0, %lo(__flush_tlb_pending), %o0 338 or %o0, %lo(__flush_tlb_pending), %o0
279 sethi %hi(__cheetah_flush_tlb_pending), %o1 339 sethi %hi(__cheetah_flush_tlb_pending), %o1
280 or %o1, %lo(__cheetah_flush_tlb_pending), %o1 340 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
281 call cheetah_patch_one 341 call tlb_patch_one
282 mov 27, %o2 342 mov 27, %o2
283 343
284#ifdef DCACHE_ALIASING_POSSIBLE 344#ifdef DCACHE_ALIASING_POSSIBLE
@@ -286,7 +346,7 @@ cheetah_patch_cachetlbops:
286 or %o0, %lo(__flush_dcache_page), %o0 346 or %o0, %lo(__flush_dcache_page), %o0
287 sethi %hi(__cheetah_flush_dcache_page), %o1 347 sethi %hi(__cheetah_flush_dcache_page), %o1
288 or %o1, %lo(__cheetah_flush_dcache_page), %o1 348 or %o1, %lo(__cheetah_flush_dcache_page), %o1
289 call cheetah_patch_one 349 call tlb_patch_one
290 mov 11, %o2 350 mov 11, %o2
291#endif /* DCACHE_ALIASING_POSSIBLE */ 351#endif /* DCACHE_ALIASING_POSSIBLE */
292 352
@@ -309,7 +369,7 @@ cheetah_patch_cachetlbops:
309 */ 369 */
310 .align 32 370 .align 32
311 .globl xcall_flush_tlb_mm 371 .globl xcall_flush_tlb_mm
312xcall_flush_tlb_mm: 372xcall_flush_tlb_mm: /* 18 insns */
313 mov PRIMARY_CONTEXT, %g2 373 mov PRIMARY_CONTEXT, %g2
314 ldxa [%g2] ASI_DMMU, %g3 374 ldxa [%g2] ASI_DMMU, %g3
315 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 375 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
@@ -321,9 +381,16 @@ xcall_flush_tlb_mm:
321 stxa %g0, [%g4] ASI_IMMU_DEMAP 381 stxa %g0, [%g4] ASI_IMMU_DEMAP
322 stxa %g3, [%g2] ASI_DMMU 382 stxa %g3, [%g2] ASI_DMMU
323 retry 383 retry
384 nop
385 nop
386 nop
387 nop
388 nop
389 nop
390 nop
324 391
325 .globl xcall_flush_tlb_pending 392 .globl xcall_flush_tlb_pending
326xcall_flush_tlb_pending: 393xcall_flush_tlb_pending: /* 20 insns */
327 /* %g5=context, %g1=nr, %g7=vaddrs[] */ 394 /* %g5=context, %g1=nr, %g7=vaddrs[] */
328 sllx %g1, 3, %g1 395 sllx %g1, 3, %g1
329 mov PRIMARY_CONTEXT, %g4 396 mov PRIMARY_CONTEXT, %g4
@@ -348,7 +415,7 @@ xcall_flush_tlb_pending:
348 retry 415 retry
349 416
350 .globl xcall_flush_tlb_kernel_range 417 .globl xcall_flush_tlb_kernel_range
351xcall_flush_tlb_kernel_range: 418xcall_flush_tlb_kernel_range: /* 22 insns */
352 sethi %hi(PAGE_SIZE - 1), %g2 419 sethi %hi(PAGE_SIZE - 1), %g2
353 or %g2, %lo(PAGE_SIZE - 1), %g2 420 or %g2, %lo(PAGE_SIZE - 1), %g2
354 andn %g1, %g2, %g1 421 andn %g1, %g2, %g1
@@ -365,6 +432,12 @@ xcall_flush_tlb_kernel_range:
365 retry 432 retry
366 nop 433 nop
367 nop 434 nop
435 nop
436 nop
437 nop
438 nop
439 nop
440 nop
368 441
369 /* This runs in a very controlled environment, so we do 442 /* This runs in a very controlled environment, so we do
370 * not need to worry about BH races etc. 443 * not need to worry about BH races etc.
@@ -458,6 +531,76 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
458 nop 531 nop
459 nop 532 nop
460 533
534 .globl __hypervisor_xcall_flush_tlb_mm
535__hypervisor_xcall_flush_tlb_mm: /* 18 insns */
536 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
537 mov %o0, %g2
538 mov %o1, %g3
539 mov %o2, %g4
540 mov %o3, %g1
541 mov %o5, %g7
542 clr %o0 /* ARG0: CPU lists unimplemented */
543 clr %o1 /* ARG1: CPU lists unimplemented */
544 mov %g5, %o2 /* ARG2: mmu context */
545 mov HV_MMU_ALL, %o3 /* ARG3: flags */
546 mov HV_FAST_MMU_DEMAP_CTX, %o5
547 ta HV_FAST_TRAP
548 mov %g2, %o0
549 mov %g3, %o1
550 mov %g4, %o2
551 mov %g1, %o3
552 mov %g7, %o5
553 membar #Sync
554 retry
555
556 .globl __hypervisor_xcall_flush_tlb_pending
557__hypervisor_xcall_flush_tlb_pending: /* 18 insns */
558 /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4=scratch, %g6=unusable */
559 sllx %g1, 3, %g1
560 mov %o0, %g2
561 mov %o1, %g3
562 mov %o2, %g4
5631: sub %g1, (1 << 3), %g1
564 ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
565 mov %g5, %o1 /* ARG1: mmu context */
566 mov HV_MMU_DMMU, %o2
567 andcc %o0, 1, %g0
568 movne %icc, HV_MMU_ALL, %o2 /* ARG2: flags */
569 ta HV_MMU_UNMAP_ADDR_TRAP
570 brnz,pt %g1, 1b
571 nop
572 mov %g2, %o0
573 mov %g3, %o1
574 mov %g4, %o2
575 membar #Sync
576 retry
577
578 .globl __hypervisor_xcall_flush_tlb_kernel_range
579__hypervisor_xcall_flush_tlb_kernel_range: /* 22 insns */
580 /* %g1=start, %g7=end, g2,g3,g4,g5=scratch, g6=unusable */
581 sethi %hi(PAGE_SIZE - 1), %g2
582 or %g2, %lo(PAGE_SIZE - 1), %g2
583 andn %g1, %g2, %g1
584 andn %g7, %g2, %g7
585 sub %g7, %g1, %g3
586 add %g2, 1, %g2
587 sub %g3, %g2, %g3
588 mov %o0, %g2
589 mov %o1, %g4
590 mov %o2, %g5
5911: add %g1, %g3, %o0 /* ARG0: virtual address */
592 mov 0, %o1 /* ARG1: mmu context */
593 mov HV_MMU_ALL, %o2 /* ARG2: flags */
594 ta HV_MMU_UNMAP_ADDR_TRAP
595 sethi %hi(PAGE_SIZE), %o2
596 brnz,pt %g3, 1b
597 sub %g3, %o2, %g3
598 mov %g2, %o0
599 mov %g4, %o1
600 mov %g5, %o2
601 membar #Sync
602 retry
603
461 /* These just get rescheduled to PIL vectors. */ 604 /* These just get rescheduled to PIL vectors. */
462 .globl xcall_call_function 605 .globl xcall_call_function
463xcall_call_function: 606xcall_call_function:
@@ -475,3 +618,64 @@ xcall_capture:
475 retry 618 retry
476 619
477#endif /* CONFIG_SMP */ 620#endif /* CONFIG_SMP */
621
622
623 .globl hypervisor_patch_cachetlbops
624hypervisor_patch_cachetlbops:
625 save %sp, -128, %sp
626
627 sethi %hi(__flush_tlb_mm), %o0
628 or %o0, %lo(__flush_tlb_mm), %o0
629 sethi %hi(__hypervisor_flush_tlb_mm), %o1
630 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
631 call tlb_patch_one
632 mov 8, %o2
633
634 sethi %hi(__flush_tlb_pending), %o0
635 or %o0, %lo(__flush_tlb_pending), %o0
636 sethi %hi(__hypervisor_flush_tlb_pending), %o1
637 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
638 call tlb_patch_one
639 mov 15, %o2
640
641 sethi %hi(__flush_tlb_kernel_range), %o0
642 or %o0, %lo(__flush_tlb_kernel_range), %o0
643 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
644 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
645 call tlb_patch_one
646 mov 14, %o2
647
648#ifdef DCACHE_ALIASING_POSSIBLE
649 sethi %hi(__flush_dcache_page), %o0
650 or %o0, %lo(__flush_dcache_page), %o0
651 sethi %hi(__hypervisor_flush_dcache_page), %o1
652 or %o1, %lo(__hypervisor_flush_dcache_page), %o1
653 call tlb_patch_one
654 mov 2, %o2
655#endif /* DCACHE_ALIASING_POSSIBLE */
656
657#ifdef CONFIG_SMP
658 sethi %hi(xcall_flush_tlb_mm), %o0
659 or %o0, %lo(xcall_flush_tlb_mm), %o0
660 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
661 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
662 call tlb_patch_one
663 mov 18, %o2
664
665 sethi %hi(xcall_flush_tlb_pending), %o0
666 or %o0, %lo(xcall_flush_tlb_pending), %o0
667 sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
668 or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
669 call tlb_patch_one
670 mov 18, %o2
671
672 sethi %hi(xcall_flush_tlb_kernel_range), %o0
673 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
674 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
675 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
676 call tlb_patch_one
677 mov 22, %o2
678#endif /* CONFIG_SMP */
679
680 ret
681 restore