aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-26 22:31:49 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:14:05 -0500
commit2a3a5f5ddbefde498e87f10924d4bf741c5bf37f (patch)
tree851003d4ff0b1619766d4fab883868f2b413ed62 /arch
parent6cc80cfab8b2ce1919ad5862a43f6b7bcf163c80 (diff)
[SPARC64]: Bulletproof hypervisor TLB flushing.
Check TLB flush hypervisor calls for errors and report them. Pass HV_MMU_ALL always for now, we can add back the optimization to avoid the I-TLB flush later. Always explicitly page align the virtual address arguments. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc64/kernel/traps.c12
-rw-r--r--arch/sparc64/mm/ultra.S94
2 files changed, 80 insertions, 26 deletions
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 8df0cf29e3eb..043a72658f6a 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -1968,6 +1968,18 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
1968 prom_halt(); 1968 prom_halt();
1969} 1969}
1970 1970
1971void hypervisor_tlbop_error(unsigned long err, unsigned long op)
1972{
1973 printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
1974 err, op);
1975}
1976
1977void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
1978{
1979 printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
1980 err, op);
1981}
1982
1971void do_fpe_common(struct pt_regs *regs) 1983void do_fpe_common(struct pt_regs *regs)
1972{ 1984{
1973 if (regs->tstate & TSTATE_PRIV) { 1985 if (regs->tstate & TSTATE_PRIV) {
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index 725f8b34af49..bd8b0b4f878f 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -257,17 +257,27 @@ __cheetah_flush_dcache_page: /* 11 insns */
257#endif /* DCACHE_ALIASING_POSSIBLE */ 257#endif /* DCACHE_ALIASING_POSSIBLE */
258 258
259 /* Hypervisor specific versions, patched at boot time. */ 259 /* Hypervisor specific versions, patched at boot time. */
260__hypervisor_flush_tlb_mm: /* 8 insns */ 260__hypervisor_tlb_tl0_error:
261 save %sp, -192, %sp
262 mov %i0, %o0
263 call hypervisor_tlbop_error
264 mov %i1, %o1
265 ret
266 restore
267
268__hypervisor_flush_tlb_mm: /* 10 insns */
261 mov %o0, %o2 /* ARG2: mmu context */ 269 mov %o0, %o2 /* ARG2: mmu context */
262 mov 0, %o0 /* ARG0: CPU lists unimplemented */ 270 mov 0, %o0 /* ARG0: CPU lists unimplemented */
263 mov 0, %o1 /* ARG1: CPU lists unimplemented */ 271 mov 0, %o1 /* ARG1: CPU lists unimplemented */
264 mov HV_MMU_ALL, %o3 /* ARG3: flags */ 272 mov HV_MMU_ALL, %o3 /* ARG3: flags */
265 mov HV_FAST_MMU_DEMAP_CTX, %o5 273 mov HV_FAST_MMU_DEMAP_CTX, %o5
266 ta HV_FAST_TRAP 274 ta HV_FAST_TRAP
275 brnz,pn %o0, __hypervisor_tlb_tl0_error
276 mov HV_FAST_MMU_DEMAP_CTX, %o1
267 retl 277 retl
268 nop 278 nop
269 279
270__hypervisor_flush_tlb_pending: /* 15 insns */ 280__hypervisor_flush_tlb_pending: /* 16 insns */
271 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ 281 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
272 sllx %o1, 3, %g1 282 sllx %o1, 3, %g1
273 mov %o2, %g2 283 mov %o2, %g2
@@ -275,17 +285,18 @@ __hypervisor_flush_tlb_pending: /* 15 insns */
2751: sub %g1, (1 << 3), %g1 2851: sub %g1, (1 << 3), %g1
276 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */ 286 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
277 mov %g3, %o1 /* ARG1: mmu context */ 287 mov %g3, %o1 /* ARG1: mmu context */
278 mov HV_MMU_DMMU, %o2 288 mov HV_MMU_ALL, %o2 /* ARG2: flags */
279 andcc %o0, 1, %g0 289 srlx %o0, PAGE_SHIFT, %o0
280 movne %icc, HV_MMU_ALL, %o2 /* ARG2: flags */ 290 sllx %o0, PAGE_SHIFT, %o0
281 andn %o0, 1, %o0
282 ta HV_MMU_UNMAP_ADDR_TRAP 291 ta HV_MMU_UNMAP_ADDR_TRAP
292 brnz,pn %o0, __hypervisor_tlb_tl0_error
293 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
283 brnz,pt %g1, 1b 294 brnz,pt %g1, 1b
284 nop 295 nop
285 retl 296 retl
286 nop 297 nop
287 298
288__hypervisor_flush_tlb_kernel_range: /* 14 insns */ 299__hypervisor_flush_tlb_kernel_range: /* 16 insns */
289 /* %o0=start, %o1=end */ 300 /* %o0=start, %o1=end */
290 cmp %o0, %o1 301 cmp %o0, %o1
291 be,pn %xcc, 2f 302 be,pn %xcc, 2f
@@ -297,6 +308,8 @@ __hypervisor_flush_tlb_kernel_range: /* 14 insns */
297 mov 0, %o1 /* ARG1: mmu context */ 308 mov 0, %o1 /* ARG1: mmu context */
298 mov HV_MMU_ALL, %o2 /* ARG2: flags */ 309 mov HV_MMU_ALL, %o2 /* ARG2: flags */
299 ta HV_MMU_UNMAP_ADDR_TRAP 310 ta HV_MMU_UNMAP_ADDR_TRAP
311 brnz,pn %o0, __hypervisor_tlb_tl0_error
312 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
300 brnz,pt %g2, 1b 313 brnz,pt %g2, 1b
301 sub %g2, %g3, %g2 314 sub %g2, %g3, %g2
3022: retl 3152: retl
@@ -369,7 +382,7 @@ cheetah_patch_cachetlbops:
369 */ 382 */
370 .align 32 383 .align 32
371 .globl xcall_flush_tlb_mm 384 .globl xcall_flush_tlb_mm
372xcall_flush_tlb_mm: /* 18 insns */ 385xcall_flush_tlb_mm: /* 21 insns */
373 mov PRIMARY_CONTEXT, %g2 386 mov PRIMARY_CONTEXT, %g2
374 ldxa [%g2] ASI_DMMU, %g3 387 ldxa [%g2] ASI_DMMU, %g3
375 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 388 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
@@ -388,9 +401,12 @@ xcall_flush_tlb_mm: /* 18 insns */
388 nop 401 nop
389 nop 402 nop
390 nop 403 nop
404 nop
405 nop
406 nop
391 407
392 .globl xcall_flush_tlb_pending 408 .globl xcall_flush_tlb_pending
393xcall_flush_tlb_pending: /* 20 insns */ 409xcall_flush_tlb_pending: /* 21 insns */
394 /* %g5=context, %g1=nr, %g7=vaddrs[] */ 410 /* %g5=context, %g1=nr, %g7=vaddrs[] */
395 sllx %g1, 3, %g1 411 sllx %g1, 3, %g1
396 mov PRIMARY_CONTEXT, %g4 412 mov PRIMARY_CONTEXT, %g4
@@ -413,9 +429,10 @@ xcall_flush_tlb_pending: /* 20 insns */
413 nop 429 nop
414 stxa %g2, [%g4] ASI_DMMU 430 stxa %g2, [%g4] ASI_DMMU
415 retry 431 retry
432 nop
416 433
417 .globl xcall_flush_tlb_kernel_range 434 .globl xcall_flush_tlb_kernel_range
418xcall_flush_tlb_kernel_range: /* 22 insns */ 435xcall_flush_tlb_kernel_range: /* 25 insns */
419 sethi %hi(PAGE_SIZE - 1), %g2 436 sethi %hi(PAGE_SIZE - 1), %g2
420 or %g2, %lo(PAGE_SIZE - 1), %g2 437 or %g2, %lo(PAGE_SIZE - 1), %g2
421 andn %g1, %g2, %g1 438 andn %g1, %g2, %g1
@@ -438,6 +455,9 @@ xcall_flush_tlb_kernel_range: /* 22 insns */
438 nop 455 nop
439 nop 456 nop
440 nop 457 nop
458 nop
459 nop
460 nop
441 461
442 /* This runs in a very controlled environment, so we do 462 /* This runs in a very controlled environment, so we do
443 * not need to worry about BH races etc. 463 * not need to worry about BH races etc.
@@ -545,8 +565,21 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
545 nop 565 nop
546 nop 566 nop
547 567
568 /* %g5: error
569 * %g6: tlb op
570 */
571__hypervisor_tlb_xcall_error:
572 mov %g5, %g4
573 mov %g6, %g5
574 ba,pt %xcc, etrap
575 rd %pc, %g7
576 mov %l4, %o0
577 call hypervisor_tlbop_error_xcall
578 mov %l5, %o1
579 ba,a,pt %xcc, rtrap_clr_l6
580
548 .globl __hypervisor_xcall_flush_tlb_mm 581 .globl __hypervisor_xcall_flush_tlb_mm
549__hypervisor_xcall_flush_tlb_mm: /* 18 insns */ 582__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
550 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ 583 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
551 mov %o0, %g2 584 mov %o0, %g2
552 mov %o1, %g3 585 mov %o1, %g3
@@ -559,6 +592,9 @@ __hypervisor_xcall_flush_tlb_mm: /* 18 insns */
559 mov HV_MMU_ALL, %o3 /* ARG3: flags */ 592 mov HV_MMU_ALL, %o3 /* ARG3: flags */
560 mov HV_FAST_MMU_DEMAP_CTX, %o5 593 mov HV_FAST_MMU_DEMAP_CTX, %o5
561 ta HV_FAST_TRAP 594 ta HV_FAST_TRAP
595 mov HV_FAST_MMU_DEMAP_CTX, %g6
596 brnz,pn %o0, __hypervisor_tlb_xcall_error
597 mov %o0, %g5
562 mov %g2, %o0 598 mov %g2, %o0
563 mov %g3, %o1 599 mov %g3, %o1
564 mov %g4, %o2 600 mov %g4, %o2
@@ -568,8 +604,8 @@ __hypervisor_xcall_flush_tlb_mm: /* 18 insns */
568 retry 604 retry
569 605
570 .globl __hypervisor_xcall_flush_tlb_pending 606 .globl __hypervisor_xcall_flush_tlb_pending
571__hypervisor_xcall_flush_tlb_pending: /* 18 insns */ 607__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
572 /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4=scratch, %g6=unusable */ 608 /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
573 sllx %g1, 3, %g1 609 sllx %g1, 3, %g1
574 mov %o0, %g2 610 mov %o0, %g2
575 mov %o1, %g3 611 mov %o1, %g3
@@ -577,10 +613,13 @@ __hypervisor_xcall_flush_tlb_pending: /* 18 insns */
5771: sub %g1, (1 << 3), %g1 6131: sub %g1, (1 << 3), %g1
578 ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ 614 ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
579 mov %g5, %o1 /* ARG1: mmu context */ 615 mov %g5, %o1 /* ARG1: mmu context */
580 mov HV_MMU_DMMU, %o2 616 mov HV_MMU_ALL, %o2 /* ARG2: flags */
581 andcc %o0, 1, %g0 617 srlx %o0, PAGE_SHIFT, %o0
582 movne %icc, HV_MMU_ALL, %o2 /* ARG2: flags */ 618 sllx %o0, PAGE_SHIFT, %o0
583 ta HV_MMU_UNMAP_ADDR_TRAP 619 ta HV_MMU_UNMAP_ADDR_TRAP
620 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
621 brnz,a,pn %o0, __hypervisor_tlb_xcall_error
622 mov %o0, %g5
584 brnz,pt %g1, 1b 623 brnz,pt %g1, 1b
585 nop 624 nop
586 mov %g2, %o0 625 mov %g2, %o0
@@ -590,8 +629,8 @@ __hypervisor_xcall_flush_tlb_pending: /* 18 insns */
590 retry 629 retry
591 630
592 .globl __hypervisor_xcall_flush_tlb_kernel_range 631 .globl __hypervisor_xcall_flush_tlb_kernel_range
593__hypervisor_xcall_flush_tlb_kernel_range: /* 22 insns */ 632__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
594 /* %g1=start, %g7=end, g2,g3,g4,g5=scratch, g6=unusable */ 633 /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
595 sethi %hi(PAGE_SIZE - 1), %g2 634 sethi %hi(PAGE_SIZE - 1), %g2
596 or %g2, %lo(PAGE_SIZE - 1), %g2 635 or %g2, %lo(PAGE_SIZE - 1), %g2
597 andn %g1, %g2, %g1 636 andn %g1, %g2, %g1
@@ -601,17 +640,20 @@ __hypervisor_xcall_flush_tlb_kernel_range: /* 22 insns */
601 sub %g3, %g2, %g3 640 sub %g3, %g2, %g3
602 mov %o0, %g2 641 mov %o0, %g2
603 mov %o1, %g4 642 mov %o1, %g4
604 mov %o2, %g5 643 mov %o2, %g7
6051: add %g1, %g3, %o0 /* ARG0: virtual address */ 6441: add %g1, %g3, %o0 /* ARG0: virtual address */
606 mov 0, %o1 /* ARG1: mmu context */ 645 mov 0, %o1 /* ARG1: mmu context */
607 mov HV_MMU_ALL, %o2 /* ARG2: flags */ 646 mov HV_MMU_ALL, %o2 /* ARG2: flags */
608 ta HV_MMU_UNMAP_ADDR_TRAP 647 ta HV_MMU_UNMAP_ADDR_TRAP
648 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
649 brnz,pn %o0, __hypervisor_tlb_xcall_error
650 mov %o0, %g5
609 sethi %hi(PAGE_SIZE), %o2 651 sethi %hi(PAGE_SIZE), %o2
610 brnz,pt %g3, 1b 652 brnz,pt %g3, 1b
611 sub %g3, %o2, %g3 653 sub %g3, %o2, %g3
612 mov %g2, %o0 654 mov %g2, %o0
613 mov %g4, %o1 655 mov %g4, %o1
614 mov %g5, %o2 656 mov %g7, %o2
615 membar #Sync 657 membar #Sync
616 retry 658 retry
617 659
@@ -643,21 +685,21 @@ hypervisor_patch_cachetlbops:
643 sethi %hi(__hypervisor_flush_tlb_mm), %o1 685 sethi %hi(__hypervisor_flush_tlb_mm), %o1
644 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 686 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
645 call tlb_patch_one 687 call tlb_patch_one
646 mov 8, %o2 688 mov 10, %o2
647 689
648 sethi %hi(__flush_tlb_pending), %o0 690 sethi %hi(__flush_tlb_pending), %o0
649 or %o0, %lo(__flush_tlb_pending), %o0 691 or %o0, %lo(__flush_tlb_pending), %o0
650 sethi %hi(__hypervisor_flush_tlb_pending), %o1 692 sethi %hi(__hypervisor_flush_tlb_pending), %o1
651 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 693 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
652 call tlb_patch_one 694 call tlb_patch_one
653 mov 15, %o2 695 mov 16, %o2
654 696
655 sethi %hi(__flush_tlb_kernel_range), %o0 697 sethi %hi(__flush_tlb_kernel_range), %o0
656 or %o0, %lo(__flush_tlb_kernel_range), %o0 698 or %o0, %lo(__flush_tlb_kernel_range), %o0
657 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 699 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
658 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 700 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
659 call tlb_patch_one 701 call tlb_patch_one
660 mov 14, %o2 702 mov 16, %o2
661 703
662#ifdef DCACHE_ALIASING_POSSIBLE 704#ifdef DCACHE_ALIASING_POSSIBLE
663 sethi %hi(__flush_dcache_page), %o0 705 sethi %hi(__flush_dcache_page), %o0
@@ -674,21 +716,21 @@ hypervisor_patch_cachetlbops:
674 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 716 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
675 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 717 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
676 call tlb_patch_one 718 call tlb_patch_one
677 mov 18, %o2 719 mov 21, %o2
678 720
679 sethi %hi(xcall_flush_tlb_pending), %o0 721 sethi %hi(xcall_flush_tlb_pending), %o0
680 or %o0, %lo(xcall_flush_tlb_pending), %o0 722 or %o0, %lo(xcall_flush_tlb_pending), %o0
681 sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 723 sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
682 or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 724 or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
683 call tlb_patch_one 725 call tlb_patch_one
684 mov 18, %o2 726 mov 21, %o2
685 727
686 sethi %hi(xcall_flush_tlb_kernel_range), %o0 728 sethi %hi(xcall_flush_tlb_kernel_range), %o0
687 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 729 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
688 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 730 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
689 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 731 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
690 call tlb_patch_one 732 call tlb_patch_one
691 mov 22, %o2 733 mov 25, %o2
692#endif /* CONFIG_SMP */ 734#endif /* CONFIG_SMP */
693 735
694 ret 736 ret