diff options
Diffstat (limited to 'arch')
79 files changed, 9759 insertions, 3298 deletions
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index d39c9f206271..460f72e640e6 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c | |||
@@ -217,7 +217,7 @@ static void _sparc_free_io(struct resource *res) | |||
217 | unsigned long plen; | 217 | unsigned long plen; |
218 | 218 | ||
219 | plen = res->end - res->start + 1; | 219 | plen = res->end - res->start + 1; |
220 | if ((plen & (PAGE_SIZE-1)) != 0) BUG(); | 220 | BUG_ON((plen & (PAGE_SIZE-1)) != 0); |
221 | sparc_unmapiorange(res->start, plen); | 221 | sparc_unmapiorange(res->start, plen); |
222 | release_resource(res); | 222 | release_resource(res); |
223 | } | 223 | } |
@@ -512,8 +512,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) | |||
512 | dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, | 512 | dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, |
513 | int direction) | 513 | int direction) |
514 | { | 514 | { |
515 | if (direction == PCI_DMA_NONE) | 515 | BUG_ON(direction == PCI_DMA_NONE); |
516 | BUG(); | ||
517 | /* IIep is write-through, not flushing. */ | 516 | /* IIep is write-through, not flushing. */ |
518 | return virt_to_phys(ptr); | 517 | return virt_to_phys(ptr); |
519 | } | 518 | } |
@@ -528,8 +527,7 @@ dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, | |||
528 | void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, | 527 | void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, |
529 | int direction) | 528 | int direction) |
530 | { | 529 | { |
531 | if (direction == PCI_DMA_NONE) | 530 | BUG_ON(direction == PCI_DMA_NONE); |
532 | BUG(); | ||
533 | if (direction != PCI_DMA_TODEVICE) { | 531 | if (direction != PCI_DMA_TODEVICE) { |
534 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 532 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), |
535 | (size + PAGE_SIZE-1) & PAGE_MASK); | 533 | (size + PAGE_SIZE-1) & PAGE_MASK); |
@@ -542,8 +540,7 @@ void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, | |||
542 | dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, | 540 | dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, |
543 | unsigned long offset, size_t size, int direction) | 541 | unsigned long offset, size_t size, int direction) |
544 | { | 542 | { |
545 | if (direction == PCI_DMA_NONE) | 543 | BUG_ON(direction == PCI_DMA_NONE); |
546 | BUG(); | ||
547 | /* IIep is write-through, not flushing. */ | 544 | /* IIep is write-through, not flushing. */ |
548 | return page_to_phys(page) + offset; | 545 | return page_to_phys(page) + offset; |
549 | } | 546 | } |
@@ -551,8 +548,7 @@ dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, | |||
551 | void pci_unmap_page(struct pci_dev *hwdev, | 548 | void pci_unmap_page(struct pci_dev *hwdev, |
552 | dma_addr_t dma_address, size_t size, int direction) | 549 | dma_addr_t dma_address, size_t size, int direction) |
553 | { | 550 | { |
554 | if (direction == PCI_DMA_NONE) | 551 | BUG_ON(direction == PCI_DMA_NONE); |
555 | BUG(); | ||
556 | /* mmu_inval_dma_area XXX */ | 552 | /* mmu_inval_dma_area XXX */ |
557 | } | 553 | } |
558 | 554 | ||
@@ -576,11 +572,10 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, | |||
576 | { | 572 | { |
577 | int n; | 573 | int n; |
578 | 574 | ||
579 | if (direction == PCI_DMA_NONE) | 575 | BUG_ON(direction == PCI_DMA_NONE); |
580 | BUG(); | ||
581 | /* IIep is write-through, not flushing. */ | 576 | /* IIep is write-through, not flushing. */ |
582 | for (n = 0; n < nents; n++) { | 577 | for (n = 0; n < nents; n++) { |
583 | if (page_address(sg->page) == NULL) BUG(); | 578 | BUG_ON(page_address(sg->page) == NULL); |
584 | sg->dvma_address = virt_to_phys(page_address(sg->page)); | 579 | sg->dvma_address = virt_to_phys(page_address(sg->page)); |
585 | sg->dvma_length = sg->length; | 580 | sg->dvma_length = sg->length; |
586 | sg++; | 581 | sg++; |
@@ -597,11 +592,10 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, | |||
597 | { | 592 | { |
598 | int n; | 593 | int n; |
599 | 594 | ||
600 | if (direction == PCI_DMA_NONE) | 595 | BUG_ON(direction == PCI_DMA_NONE); |
601 | BUG(); | ||
602 | if (direction != PCI_DMA_TODEVICE) { | 596 | if (direction != PCI_DMA_TODEVICE) { |
603 | for (n = 0; n < nents; n++) { | 597 | for (n = 0; n < nents; n++) { |
604 | if (page_address(sg->page) == NULL) BUG(); | 598 | BUG_ON(page_address(sg->page) == NULL); |
605 | mmu_inval_dma_area( | 599 | mmu_inval_dma_area( |
606 | (unsigned long) page_address(sg->page), | 600 | (unsigned long) page_address(sg->page), |
607 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); | 601 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); |
@@ -622,8 +616,7 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, | |||
622 | */ | 616 | */ |
623 | void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) | 617 | void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) |
624 | { | 618 | { |
625 | if (direction == PCI_DMA_NONE) | 619 | BUG_ON(direction == PCI_DMA_NONE); |
626 | BUG(); | ||
627 | if (direction != PCI_DMA_TODEVICE) { | 620 | if (direction != PCI_DMA_TODEVICE) { |
628 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 621 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), |
629 | (size + PAGE_SIZE-1) & PAGE_MASK); | 622 | (size + PAGE_SIZE-1) & PAGE_MASK); |
@@ -632,8 +625,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t si | |||
632 | 625 | ||
633 | void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) | 626 | void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) |
634 | { | 627 | { |
635 | if (direction == PCI_DMA_NONE) | 628 | BUG_ON(direction == PCI_DMA_NONE); |
636 | BUG(); | ||
637 | if (direction != PCI_DMA_TODEVICE) { | 629 | if (direction != PCI_DMA_TODEVICE) { |
638 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 630 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), |
639 | (size + PAGE_SIZE-1) & PAGE_MASK); | 631 | (size + PAGE_SIZE-1) & PAGE_MASK); |
@@ -650,11 +642,10 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int | |||
650 | { | 642 | { |
651 | int n; | 643 | int n; |
652 | 644 | ||
653 | if (direction == PCI_DMA_NONE) | 645 | BUG_ON(direction == PCI_DMA_NONE); |
654 | BUG(); | ||
655 | if (direction != PCI_DMA_TODEVICE) { | 646 | if (direction != PCI_DMA_TODEVICE) { |
656 | for (n = 0; n < nents; n++) { | 647 | for (n = 0; n < nents; n++) { |
657 | if (page_address(sg->page) == NULL) BUG(); | 648 | BUG_ON(page_address(sg->page) == NULL); |
658 | mmu_inval_dma_area( | 649 | mmu_inval_dma_area( |
659 | (unsigned long) page_address(sg->page), | 650 | (unsigned long) page_address(sg->page), |
660 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); | 651 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); |
@@ -667,11 +658,10 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, i | |||
667 | { | 658 | { |
668 | int n; | 659 | int n; |
669 | 660 | ||
670 | if (direction == PCI_DMA_NONE) | 661 | BUG_ON(direction == PCI_DMA_NONE); |
671 | BUG(); | ||
672 | if (direction != PCI_DMA_TODEVICE) { | 662 | if (direction != PCI_DMA_TODEVICE) { |
673 | for (n = 0; n < nents; n++) { | 663 | for (n = 0; n < nents; n++) { |
674 | if (page_address(sg->page) == NULL) BUG(); | 664 | BUG_ON(page_address(sg->page) == NULL); |
675 | mmu_inval_dma_area( | 665 | mmu_inval_dma_area( |
676 | (unsigned long) page_address(sg->page), | 666 | (unsigned long) page_address(sg->page), |
677 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); | 667 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); |
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index 4c0a50a76554..c3685b314d71 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig | |||
@@ -186,6 +186,15 @@ endchoice | |||
186 | 186 | ||
187 | endmenu | 187 | endmenu |
188 | 188 | ||
189 | config ARCH_SPARSEMEM_ENABLE | ||
190 | def_bool y | ||
191 | |||
192 | config ARCH_SPARSEMEM_DEFAULT | ||
193 | def_bool y | ||
194 | |||
195 | config LARGE_ALLOCS | ||
196 | def_bool y | ||
197 | |||
189 | source "mm/Kconfig" | 198 | source "mm/Kconfig" |
190 | 199 | ||
191 | config GENERIC_ISA_DMA | 200 | config GENERIC_ISA_DMA |
@@ -350,6 +359,15 @@ config SOLARIS_EMUL | |||
350 | 359 | ||
351 | endmenu | 360 | endmenu |
352 | 361 | ||
362 | config SCHED_SMT | ||
363 | bool "SMT (Hyperthreading) scheduler support" | ||
364 | depends on SMP | ||
365 | default y | ||
366 | help | ||
367 | SMT scheduler support improves the CPU scheduler's decision making | ||
368 | when dealing with UltraSPARC cpus at a cost of slightly increased | ||
369 | overhead in some places. If unsure say N here. | ||
370 | |||
353 | config CMDLINE_BOOL | 371 | config CMDLINE_BOOL |
354 | bool "Default bootloader kernel arguments" | 372 | bool "Default bootloader kernel arguments" |
355 | 373 | ||
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig index 069d49777b2a..f819a9663a8d 100644 --- a/arch/sparc64/defconfig +++ b/arch/sparc64/defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.16-rc2 | 3 | # Linux kernel version: 2.6.16 |
4 | # Tue Feb 7 17:47:18 2006 | 4 | # Mon Mar 20 01:23:21 2006 |
5 | # | 5 | # |
6 | CONFIG_SPARC=y | 6 | CONFIG_SPARC=y |
7 | CONFIG_SPARC64=y | 7 | CONFIG_SPARC64=y |
@@ -115,14 +115,20 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y | |||
115 | CONFIG_HUGETLB_PAGE_SIZE_4MB=y | 115 | CONFIG_HUGETLB_PAGE_SIZE_4MB=y |
116 | # CONFIG_HUGETLB_PAGE_SIZE_512K is not set | 116 | # CONFIG_HUGETLB_PAGE_SIZE_512K is not set |
117 | # CONFIG_HUGETLB_PAGE_SIZE_64K is not set | 117 | # CONFIG_HUGETLB_PAGE_SIZE_64K is not set |
118 | CONFIG_ARCH_SPARSEMEM_ENABLE=y | ||
119 | CONFIG_ARCH_SPARSEMEM_DEFAULT=y | ||
120 | CONFIG_LARGE_ALLOCS=y | ||
118 | CONFIG_SELECT_MEMORY_MODEL=y | 121 | CONFIG_SELECT_MEMORY_MODEL=y |
119 | CONFIG_FLATMEM_MANUAL=y | 122 | # CONFIG_FLATMEM_MANUAL is not set |
120 | # CONFIG_DISCONTIGMEM_MANUAL is not set | 123 | # CONFIG_DISCONTIGMEM_MANUAL is not set |
121 | # CONFIG_SPARSEMEM_MANUAL is not set | 124 | CONFIG_SPARSEMEM_MANUAL=y |
122 | CONFIG_FLATMEM=y | 125 | CONFIG_SPARSEMEM=y |
123 | CONFIG_FLAT_NODE_MEM_MAP=y | 126 | CONFIG_HAVE_MEMORY_PRESENT=y |
124 | # CONFIG_SPARSEMEM_STATIC is not set | 127 | # CONFIG_SPARSEMEM_STATIC is not set |
128 | CONFIG_SPARSEMEM_EXTREME=y | ||
129 | CONFIG_MEMORY_HOTPLUG=y | ||
125 | CONFIG_SPLIT_PTLOCK_CPUS=4 | 130 | CONFIG_SPLIT_PTLOCK_CPUS=4 |
131 | CONFIG_MIGRATION=y | ||
126 | CONFIG_GENERIC_ISA_DMA=y | 132 | CONFIG_GENERIC_ISA_DMA=y |
127 | CONFIG_SBUS=y | 133 | CONFIG_SBUS=y |
128 | CONFIG_SBUSCHAR=y | 134 | CONFIG_SBUSCHAR=y |
@@ -655,6 +661,7 @@ CONFIG_SERIAL_SUNCORE=y | |||
655 | CONFIG_SERIAL_SUNSU=y | 661 | CONFIG_SERIAL_SUNSU=y |
656 | CONFIG_SERIAL_SUNSU_CONSOLE=y | 662 | CONFIG_SERIAL_SUNSU_CONSOLE=y |
657 | CONFIG_SERIAL_SUNSAB=m | 663 | CONFIG_SERIAL_SUNSAB=m |
664 | CONFIG_SERIAL_SUNHV=y | ||
658 | CONFIG_SERIAL_CORE=y | 665 | CONFIG_SERIAL_CORE=y |
659 | CONFIG_SERIAL_CORE_CONSOLE=y | 666 | CONFIG_SERIAL_CORE_CONSOLE=y |
660 | # CONFIG_SERIAL_JSM is not set | 667 | # CONFIG_SERIAL_JSM is not set |
@@ -1116,11 +1123,7 @@ CONFIG_USB_HIDDEV=y | |||
1116 | # CONFIG_INFINIBAND is not set | 1123 | # CONFIG_INFINIBAND is not set |
1117 | 1124 | ||
1118 | # | 1125 | # |
1119 | # SN Devices | 1126 | # EDAC - error detection and reporting (RAS) (EXPERIMENTAL) |
1120 | # | ||
1121 | |||
1122 | # | ||
1123 | # EDAC - error detection and reporting (RAS) | ||
1124 | # | 1127 | # |
1125 | 1128 | ||
1126 | # | 1129 | # |
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile index 83d67eb18895..6f6816488b04 100644 --- a/arch/sparc64/kernel/Makefile +++ b/arch/sparc64/kernel/Makefile | |||
@@ -11,10 +11,12 @@ obj-y := process.o setup.o cpu.o idprom.o \ | |||
11 | traps.o devices.o auxio.o una_asm.o \ | 11 | traps.o devices.o auxio.o una_asm.o \ |
12 | irq.o ptrace.o time.o sys_sparc.o signal.o \ | 12 | irq.o ptrace.o time.o sys_sparc.o signal.o \ |
13 | unaligned.o central.o pci.o starfire.o semaphore.o \ | 13 | unaligned.o central.o pci.o starfire.o semaphore.o \ |
14 | power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o | 14 | power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \ |
15 | visemul.o | ||
15 | 16 | ||
16 | obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \ | 17 | obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \ |
17 | pci_psycho.o pci_sabre.o pci_schizo.o | 18 | pci_psycho.o pci_sabre.o pci_schizo.o \ |
19 | pci_sun4v.o pci_sun4v_asm.o | ||
18 | obj-$(CONFIG_SMP) += smp.o trampoline.o | 20 | obj-$(CONFIG_SMP) += smp.o trampoline.o |
19 | obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o | 21 | obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o |
20 | obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o | 22 | obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o |
@@ -38,5 +40,5 @@ else | |||
38 | CMODEL_CFLAG := -m64 -mcmodel=medlow | 40 | CMODEL_CFLAG := -m64 -mcmodel=medlow |
39 | endif | 41 | endif |
40 | 42 | ||
41 | head.o: head.S ttable.S itlb_base.S dtlb_base.S dtlb_backend.S dtlb_prot.S \ | 43 | head.o: head.S ttable.S itlb_miss.S dtlb_miss.S ktlb.S tsb.S \ |
42 | etrap.S rtrap.S winfixup.S entry.S | 44 | etrap.S rtrap.S winfixup.S entry.S |
diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c index 202a80c24b6f..d7caa60a0074 100644 --- a/arch/sparc64/kernel/binfmt_aout32.c +++ b/arch/sparc64/kernel/binfmt_aout32.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/system.h> | 31 | #include <asm/system.h> |
32 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
33 | #include <asm/pgalloc.h> | 33 | #include <asm/pgalloc.h> |
34 | #include <asm/mmu_context.h> | ||
34 | 35 | ||
35 | static int load_aout32_binary(struct linux_binprm *, struct pt_regs * regs); | 36 | static int load_aout32_binary(struct linux_binprm *, struct pt_regs * regs); |
36 | static int load_aout32_library(struct file*); | 37 | static int load_aout32_library(struct file*); |
@@ -238,6 +239,8 @@ static int load_aout32_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
238 | (current->mm->start_data = N_DATADDR(ex)); | 239 | (current->mm->start_data = N_DATADDR(ex)); |
239 | current->mm->brk = ex.a_bss + | 240 | current->mm->brk = ex.a_bss + |
240 | (current->mm->start_brk = N_BSSADDR(ex)); | 241 | (current->mm->start_brk = N_BSSADDR(ex)); |
242 | current->mm->free_area_cache = current->mm->mmap_base; | ||
243 | current->mm->cached_hole_size = 0; | ||
241 | 244 | ||
242 | current->mm->mmap = NULL; | 245 | current->mm->mmap = NULL; |
243 | compute_creds(bprm); | 246 | compute_creds(bprm); |
@@ -329,15 +332,8 @@ beyond_if: | |||
329 | 332 | ||
330 | current->mm->start_stack = | 333 | current->mm->start_stack = |
331 | (unsigned long) create_aout32_tables((char __user *)bprm->p, bprm); | 334 | (unsigned long) create_aout32_tables((char __user *)bprm->p, bprm); |
332 | if (!(orig_thr_flags & _TIF_32BIT)) { | 335 | tsb_context_switch(current->mm); |
333 | unsigned long pgd_cache = get_pgd_cache(current->mm->pgd); | 336 | |
334 | |||
335 | __asm__ __volatile__("stxa\t%0, [%1] %2\n\t" | ||
336 | "membar #Sync" | ||
337 | : /* no outputs */ | ||
338 | : "r" (pgd_cache), | ||
339 | "r" (TSB_REG), "i" (ASI_DMMU)); | ||
340 | } | ||
341 | start_thread32(regs, ex.a_entry, current->mm->start_stack); | 337 | start_thread32(regs, ex.a_entry, current->mm->start_stack); |
342 | if (current->ptrace & PT_PTRACED) | 338 | if (current->ptrace & PT_PTRACED) |
343 | send_sig(SIGTRAP, current, 0); | 339 | send_sig(SIGTRAP, current, 0); |
diff --git a/arch/sparc64/kernel/binfmt_elf32.c b/arch/sparc64/kernel/binfmt_elf32.c index a1a12d2aa353..8a2abcce2737 100644 --- a/arch/sparc64/kernel/binfmt_elf32.c +++ b/arch/sparc64/kernel/binfmt_elf32.c | |||
@@ -153,7 +153,9 @@ MODULE_AUTHOR("Eric Youngdale, David S. Miller, Jakub Jelinek"); | |||
153 | #undef MODULE_DESCRIPTION | 153 | #undef MODULE_DESCRIPTION |
154 | #undef MODULE_AUTHOR | 154 | #undef MODULE_AUTHOR |
155 | 155 | ||
156 | #include <asm/a.out.h> | ||
157 | |||
156 | #undef TASK_SIZE | 158 | #undef TASK_SIZE |
157 | #define TASK_SIZE 0xf0000000 | 159 | #define TASK_SIZE STACK_TOP32 |
158 | 160 | ||
159 | #include "../../../fs/binfmt_elf.c" | 161 | #include "../../../fs/binfmt_elf.c" |
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c index 00eed88ef2e8..11cc0caef592 100644 --- a/arch/sparc64/kernel/cpu.c +++ b/arch/sparc64/kernel/cpu.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <asm/system.h> | 13 | #include <asm/system.h> |
14 | #include <asm/fpumacro.h> | 14 | #include <asm/fpumacro.h> |
15 | #include <asm/cpudata.h> | 15 | #include <asm/cpudata.h> |
16 | #include <asm/spitfire.h> | ||
16 | 17 | ||
17 | DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 }; | 18 | DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 }; |
18 | 19 | ||
@@ -71,6 +72,12 @@ void __init cpu_probe(void) | |||
71 | unsigned long ver, fpu_vers, manuf, impl, fprs; | 72 | unsigned long ver, fpu_vers, manuf, impl, fprs; |
72 | int i; | 73 | int i; |
73 | 74 | ||
75 | if (tlb_type == hypervisor) { | ||
76 | sparc_cpu_type = "UltraSparc T1 (Niagara)"; | ||
77 | sparc_fpu_type = "UltraSparc T1 integrated FPU"; | ||
78 | return; | ||
79 | } | ||
80 | |||
74 | fprs = fprs_read(); | 81 | fprs = fprs_read(); |
75 | fprs_write(FPRS_FEF); | 82 | fprs_write(FPRS_FEF); |
76 | __asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]" | 83 | __asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]" |
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c index df9a1ca8fd77..007e8922cd16 100644 --- a/arch/sparc64/kernel/devices.c +++ b/arch/sparc64/kernel/devices.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/string.h> | 12 | #include <linux/string.h> |
13 | #include <linux/spinlock.h> | 13 | #include <linux/spinlock.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/bootmem.h> | ||
15 | 16 | ||
16 | #include <asm/page.h> | 17 | #include <asm/page.h> |
17 | #include <asm/oplib.h> | 18 | #include <asm/oplib.h> |
@@ -20,6 +21,8 @@ | |||
20 | #include <asm/spitfire.h> | 21 | #include <asm/spitfire.h> |
21 | #include <asm/timer.h> | 22 | #include <asm/timer.h> |
22 | #include <asm/cpudata.h> | 23 | #include <asm/cpudata.h> |
24 | #include <asm/vdev.h> | ||
25 | #include <asm/irq.h> | ||
23 | 26 | ||
24 | /* Used to synchronize acceses to NatSemi SUPER I/O chip configure | 27 | /* Used to synchronize acceses to NatSemi SUPER I/O chip configure |
25 | * operations in asm/ns87303.h | 28 | * operations in asm/ns87303.h |
@@ -29,13 +32,158 @@ DEFINE_SPINLOCK(ns87303_lock); | |||
29 | extern void cpu_probe(void); | 32 | extern void cpu_probe(void); |
30 | extern void central_probe(void); | 33 | extern void central_probe(void); |
31 | 34 | ||
32 | static char *cpu_mid_prop(void) | 35 | u32 sun4v_vdev_devhandle; |
36 | int sun4v_vdev_root; | ||
37 | |||
38 | struct vdev_intmap { | ||
39 | unsigned int phys; | ||
40 | unsigned int irq; | ||
41 | unsigned int cnode; | ||
42 | unsigned int cinterrupt; | ||
43 | }; | ||
44 | |||
45 | struct vdev_intmask { | ||
46 | unsigned int phys; | ||
47 | unsigned int interrupt; | ||
48 | unsigned int __unused; | ||
49 | }; | ||
50 | |||
51 | static struct vdev_intmap *vdev_intmap; | ||
52 | static int vdev_num_intmap; | ||
53 | static struct vdev_intmask vdev_intmask; | ||
54 | |||
55 | static void __init sun4v_virtual_device_probe(void) | ||
56 | { | ||
57 | struct linux_prom64_registers regs; | ||
58 | struct vdev_intmap *ip; | ||
59 | int node, sz, err; | ||
60 | |||
61 | if (tlb_type != hypervisor) | ||
62 | return; | ||
63 | |||
64 | node = prom_getchild(prom_root_node); | ||
65 | node = prom_searchsiblings(node, "virtual-devices"); | ||
66 | if (!node) { | ||
67 | prom_printf("SUN4V: Fatal error, no virtual-devices node.\n"); | ||
68 | prom_halt(); | ||
69 | } | ||
70 | |||
71 | sun4v_vdev_root = node; | ||
72 | |||
73 | prom_getproperty(node, "reg", (char *)®s, sizeof(regs)); | ||
74 | sun4v_vdev_devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff; | ||
75 | |||
76 | sz = prom_getproplen(node, "interrupt-map"); | ||
77 | if (sz <= 0) { | ||
78 | prom_printf("SUN4V: Error, no vdev interrupt-map.\n"); | ||
79 | prom_halt(); | ||
80 | } | ||
81 | |||
82 | if ((sz % sizeof(*ip)) != 0) { | ||
83 | prom_printf("SUN4V: Bogus interrupt-map property size %d\n", | ||
84 | sz); | ||
85 | prom_halt(); | ||
86 | } | ||
87 | |||
88 | vdev_intmap = ip = alloc_bootmem_low_pages(sz); | ||
89 | if (!vdev_intmap) { | ||
90 | prom_printf("SUN4V: Error, cannot allocate vdev_intmap.\n"); | ||
91 | prom_halt(); | ||
92 | } | ||
93 | |||
94 | err = prom_getproperty(node, "interrupt-map", (char *) ip, sz); | ||
95 | if (err == -1) { | ||
96 | prom_printf("SUN4V: Fatal error, no vdev interrupt-map.\n"); | ||
97 | prom_halt(); | ||
98 | } | ||
99 | if (err != sz) { | ||
100 | prom_printf("SUN4V: Inconsistent interrupt-map size, " | ||
101 | "proplen(%d) vs getprop(%d).\n", sz,err); | ||
102 | prom_halt(); | ||
103 | } | ||
104 | |||
105 | vdev_num_intmap = err / sizeof(*ip); | ||
106 | |||
107 | err = prom_getproperty(node, "interrupt-map-mask", | ||
108 | (char *) &vdev_intmask, | ||
109 | sizeof(vdev_intmask)); | ||
110 | if (err <= 0) { | ||
111 | prom_printf("SUN4V: Fatal error, no vdev " | ||
112 | "interrupt-map-mask.\n"); | ||
113 | prom_halt(); | ||
114 | } | ||
115 | if (err % sizeof(vdev_intmask)) { | ||
116 | prom_printf("SUN4V: Bogus interrupt-map-mask " | ||
117 | "property size %d\n", err); | ||
118 | prom_halt(); | ||
119 | } | ||
120 | |||
121 | printk("SUN4V: virtual-devices devhandle[%x]\n", | ||
122 | sun4v_vdev_devhandle); | ||
123 | } | ||
124 | |||
125 | unsigned int sun4v_vdev_device_interrupt(unsigned int dev_node) | ||
126 | { | ||
127 | unsigned int irq, reg; | ||
128 | int err, i; | ||
129 | |||
130 | err = prom_getproperty(dev_node, "interrupts", | ||
131 | (char *) &irq, sizeof(irq)); | ||
132 | if (err <= 0) { | ||
133 | printk("VDEV: Cannot get \"interrupts\" " | ||
134 | "property for OBP node %x\n", dev_node); | ||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | err = prom_getproperty(dev_node, "reg", | ||
139 | (char *) ®, sizeof(reg)); | ||
140 | if (err <= 0) { | ||
141 | printk("VDEV: Cannot get \"reg\" " | ||
142 | "property for OBP node %x\n", dev_node); | ||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | for (i = 0; i < vdev_num_intmap; i++) { | ||
147 | if (vdev_intmap[i].phys == (reg & vdev_intmask.phys) && | ||
148 | vdev_intmap[i].irq == (irq & vdev_intmask.interrupt)) { | ||
149 | irq = vdev_intmap[i].cinterrupt; | ||
150 | break; | ||
151 | } | ||
152 | } | ||
153 | |||
154 | if (i == vdev_num_intmap) { | ||
155 | printk("VDEV: No matching interrupt map entry " | ||
156 | "for OBP node %x\n", dev_node); | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | return sun4v_build_irq(sun4v_vdev_devhandle, irq, 5, 0); | ||
161 | } | ||
162 | |||
163 | static const char *cpu_mid_prop(void) | ||
33 | { | 164 | { |
34 | if (tlb_type == spitfire) | 165 | if (tlb_type == spitfire) |
35 | return "upa-portid"; | 166 | return "upa-portid"; |
36 | return "portid"; | 167 | return "portid"; |
37 | } | 168 | } |
38 | 169 | ||
170 | static int get_cpu_mid(int prom_node) | ||
171 | { | ||
172 | if (tlb_type == hypervisor) { | ||
173 | struct linux_prom64_registers reg; | ||
174 | |||
175 | if (prom_getproplen(prom_node, "cpuid") == 4) | ||
176 | return prom_getintdefault(prom_node, "cpuid", 0); | ||
177 | |||
178 | prom_getproperty(prom_node, "reg", (char *) ®, sizeof(reg)); | ||
179 | return (reg.phys_addr >> 32) & 0x0fffffffUL; | ||
180 | } else { | ||
181 | const char *prop_name = cpu_mid_prop(); | ||
182 | |||
183 | return prom_getintdefault(prom_node, prop_name, 0); | ||
184 | } | ||
185 | } | ||
186 | |||
39 | static int check_cpu_node(int nd, int *cur_inst, | 187 | static int check_cpu_node(int nd, int *cur_inst, |
40 | int (*compare)(int, int, void *), void *compare_arg, | 188 | int (*compare)(int, int, void *), void *compare_arg, |
41 | int *prom_node, int *mid) | 189 | int *prom_node, int *mid) |
@@ -50,7 +198,7 @@ static int check_cpu_node(int nd, int *cur_inst, | |||
50 | if (prom_node) | 198 | if (prom_node) |
51 | *prom_node = nd; | 199 | *prom_node = nd; |
52 | if (mid) | 200 | if (mid) |
53 | *mid = prom_getintdefault(nd, cpu_mid_prop(), 0); | 201 | *mid = get_cpu_mid(nd); |
54 | return 0; | 202 | return 0; |
55 | } | 203 | } |
56 | 204 | ||
@@ -105,7 +253,7 @@ static int cpu_mid_compare(int nd, int instance, void *_arg) | |||
105 | int desired_mid = (int) (long) _arg; | 253 | int desired_mid = (int) (long) _arg; |
106 | int this_mid; | 254 | int this_mid; |
107 | 255 | ||
108 | this_mid = prom_getintdefault(nd, cpu_mid_prop(), 0); | 256 | this_mid = get_cpu_mid(nd); |
109 | if (this_mid == desired_mid) | 257 | if (this_mid == desired_mid) |
110 | return 0; | 258 | return 0; |
111 | return -ENODEV; | 259 | return -ENODEV; |
@@ -126,7 +274,8 @@ void __init device_scan(void) | |||
126 | 274 | ||
127 | #ifndef CONFIG_SMP | 275 | #ifndef CONFIG_SMP |
128 | { | 276 | { |
129 | int err, cpu_node; | 277 | int err, cpu_node, def; |
278 | |||
130 | err = cpu_find_by_instance(0, &cpu_node, NULL); | 279 | err = cpu_find_by_instance(0, &cpu_node, NULL); |
131 | if (err) { | 280 | if (err) { |
132 | prom_printf("No cpu nodes, cannot continue\n"); | 281 | prom_printf("No cpu nodes, cannot continue\n"); |
@@ -135,21 +284,40 @@ void __init device_scan(void) | |||
135 | cpu_data(0).clock_tick = prom_getintdefault(cpu_node, | 284 | cpu_data(0).clock_tick = prom_getintdefault(cpu_node, |
136 | "clock-frequency", | 285 | "clock-frequency", |
137 | 0); | 286 | 0); |
287 | |||
288 | def = ((tlb_type == hypervisor) ? | ||
289 | (8 * 1024) : | ||
290 | (16 * 1024)); | ||
138 | cpu_data(0).dcache_size = prom_getintdefault(cpu_node, | 291 | cpu_data(0).dcache_size = prom_getintdefault(cpu_node, |
139 | "dcache-size", | 292 | "dcache-size", |
140 | 16 * 1024); | 293 | def); |
294 | |||
295 | def = 32; | ||
141 | cpu_data(0).dcache_line_size = | 296 | cpu_data(0).dcache_line_size = |
142 | prom_getintdefault(cpu_node, "dcache-line-size", 32); | 297 | prom_getintdefault(cpu_node, "dcache-line-size", |
298 | def); | ||
299 | |||
300 | def = 16 * 1024; | ||
143 | cpu_data(0).icache_size = prom_getintdefault(cpu_node, | 301 | cpu_data(0).icache_size = prom_getintdefault(cpu_node, |
144 | "icache-size", | 302 | "icache-size", |
145 | 16 * 1024); | 303 | def); |
304 | |||
305 | def = 32; | ||
146 | cpu_data(0).icache_line_size = | 306 | cpu_data(0).icache_line_size = |
147 | prom_getintdefault(cpu_node, "icache-line-size", 32); | 307 | prom_getintdefault(cpu_node, "icache-line-size", |
308 | def); | ||
309 | |||
310 | def = ((tlb_type == hypervisor) ? | ||
311 | (3 * 1024 * 1024) : | ||
312 | (4 * 1024 * 1024)); | ||
148 | cpu_data(0).ecache_size = prom_getintdefault(cpu_node, | 313 | cpu_data(0).ecache_size = prom_getintdefault(cpu_node, |
149 | "ecache-size", | 314 | "ecache-size", |
150 | 4 * 1024 * 1024); | 315 | def); |
316 | |||
317 | def = 64; | ||
151 | cpu_data(0).ecache_line_size = | 318 | cpu_data(0).ecache_line_size = |
152 | prom_getintdefault(cpu_node, "ecache-line-size", 64); | 319 | prom_getintdefault(cpu_node, "ecache-line-size", |
320 | def); | ||
153 | printk("CPU[0]: Caches " | 321 | printk("CPU[0]: Caches " |
154 | "D[sz(%d):line_sz(%d)] " | 322 | "D[sz(%d):line_sz(%d)] " |
155 | "I[sz(%d):line_sz(%d)] " | 323 | "I[sz(%d):line_sz(%d)] " |
@@ -160,6 +328,7 @@ void __init device_scan(void) | |||
160 | } | 328 | } |
161 | #endif | 329 | #endif |
162 | 330 | ||
331 | sun4v_virtual_device_probe(); | ||
163 | central_probe(); | 332 | central_probe(); |
164 | 333 | ||
165 | cpu_probe(); | 334 | cpu_probe(); |
diff --git a/arch/sparc64/kernel/dtlb_backend.S b/arch/sparc64/kernel/dtlb_backend.S deleted file mode 100644 index acc889a7f9c1..000000000000 --- a/arch/sparc64/kernel/dtlb_backend.S +++ /dev/null | |||
@@ -1,170 +0,0 @@ | |||
1 | /* $Id: dtlb_backend.S,v 1.16 2001/10/09 04:02:11 davem Exp $ | ||
2 | * dtlb_backend.S: Back end to DTLB miss replacement strategy. | ||
3 | * This is included directly into the trap table. | ||
4 | * | ||
5 | * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com) | ||
6 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz) | ||
7 | */ | ||
8 | |||
9 | #include <asm/pgtable.h> | ||
10 | #include <asm/mmu.h> | ||
11 | |||
12 | #define VALID_SZ_BITS (_PAGE_VALID | _PAGE_SZBITS) | ||
13 | |||
14 | #define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P ) | ||
15 | #define VPTE_SHIFT (PAGE_SHIFT - 3) | ||
16 | |||
17 | /* Ways we can get here: | ||
18 | * | ||
19 | * 1) Nucleus loads and stores to/from PA-->VA direct mappings at tl>1. | ||
20 | * 2) Nucleus loads and stores to/from user/kernel window save areas. | ||
21 | * 3) VPTE misses from dtlb_base and itlb_base. | ||
22 | * | ||
23 | * We need to extract out the PMD and PGDIR indexes from the | ||
24 | * linear virtual page table access address. The PTE index | ||
25 | * is at the bottom, but we are not concerned with it. Bits | ||
26 | * 0 to 2 are clear since each PTE is 8 bytes in size. Each | ||
27 | * PMD and PGDIR entry are 4 bytes in size. Thus, this | ||
28 | * address looks something like: | ||
29 | * | ||
30 | * |---------------------------------------------------------------| | ||
31 | * | ... | PGDIR index | PMD index | PTE index | | | ||
32 | * |---------------------------------------------------------------| | ||
33 | * 63 F E D C B A 3 2 0 <- bit nr | ||
34 | * | ||
35 | * The variable bits above are defined as: | ||
36 | * A --> 3 + (PAGE_SHIFT - log2(8)) | ||
37 | * --> 3 + (PAGE_SHIFT - 3) - 1 | ||
38 | * (ie. this is "bit 3" + PAGE_SIZE - size of PTE entry in bits - 1) | ||
39 | * B --> A + 1 | ||
40 | * C --> B + (PAGE_SHIFT - log2(4)) | ||
41 | * --> B + (PAGE_SHIFT - 2) - 1 | ||
42 | * (ie. this is "bit B" + PAGE_SIZE - size of PMD entry in bits - 1) | ||
43 | * D --> C + 1 | ||
44 | * E --> D + (PAGE_SHIFT - log2(4)) | ||
45 | * --> D + (PAGE_SHIFT - 2) - 1 | ||
46 | * (ie. this is "bit D" + PAGE_SIZE - size of PGDIR entry in bits - 1) | ||
47 | * F --> E + 1 | ||
48 | * | ||
49 | * (Note how "B" always evalutes to PAGE_SHIFT, all the other constants | ||
50 | * cancel out.) | ||
51 | * | ||
52 | * For 8K PAGE_SIZE (thus, PAGE_SHIFT of 13) the bit numbers are: | ||
53 | * A --> 12 | ||
54 | * B --> 13 | ||
55 | * C --> 23 | ||
56 | * D --> 24 | ||
57 | * E --> 34 | ||
58 | * F --> 35 | ||
59 | * | ||
60 | * For 64K PAGE_SIZE (thus, PAGE_SHIFT of 16) the bit numbers are: | ||
61 | * A --> 15 | ||
62 | * B --> 16 | ||
63 | * C --> 29 | ||
64 | * D --> 30 | ||
65 | * E --> 43 | ||
66 | * F --> 44 | ||
67 | * | ||
68 | * Because bits both above and below each PGDIR and PMD index need to | ||
69 | * be masked out, and the index can be as long as 14 bits (when using a | ||
70 | * 64K PAGE_SIZE, and thus a PAGE_SHIFT of 16), we need 3 instructions | ||
71 | * to extract each index out. | ||
72 | * | ||
73 | * Shifts do not pair very well on UltraSPARC-I, II, IIi, and IIe, so | ||
74 | * we try to avoid using them for the entire operation. We could setup | ||
75 | * a mask anywhere from bit 31 down to bit 10 using the sethi instruction. | ||
76 | * | ||
77 | * We need a mask covering bits B --> C and one covering D --> E. | ||
78 | * For 8K PAGE_SIZE these masks are 0x00ffe000 and 0x7ff000000. | ||
79 | * For 64K PAGE_SIZE these masks are 0x3fff0000 and 0xfffc0000000. | ||
80 | * The second in each set cannot be loaded with a single sethi | ||
81 | * instruction, because the upper bits are past bit 32. We would | ||
82 | * need to use a sethi + a shift. | ||
83 | * | ||
84 | * For the time being, we use 2 shifts and a simple "and" mask. | ||
85 | * We shift left to clear the bits above the index, we shift down | ||
86 | * to clear the bits below the index (sans the log2(4 or 8) bits) | ||
87 | * and a mask to clear the log2(4 or 8) bits. We need therefore | ||
88 | * define 4 shift counts, all of which are relative to PAGE_SHIFT. | ||
89 | * | ||
90 | * Although unsupportable for other reasons, this does mean that | ||
91 | * 512K and 4MB page sizes would be generaally supported by the | ||
92 | * kernel. (ELF binaries would break with > 64K PAGE_SIZE since | ||
93 | * the sections are only aligned that strongly). | ||
94 | * | ||
95 | * The operations performed for extraction are thus: | ||
96 | * | ||
97 | * ((X << FOO_SHIFT_LEFT) >> FOO_SHIFT_RIGHT) & ~0x3 | ||
98 | * | ||
99 | */ | ||
100 | |||
101 | #define A (3 + (PAGE_SHIFT - 3) - 1) | ||
102 | #define B (A + 1) | ||
103 | #define C (B + (PAGE_SHIFT - 2) - 1) | ||
104 | #define D (C + 1) | ||
105 | #define E (D + (PAGE_SHIFT - 2) - 1) | ||
106 | #define F (E + 1) | ||
107 | |||
108 | #define PMD_SHIFT_LEFT (64 - D) | ||
109 | #define PMD_SHIFT_RIGHT (64 - (D - B) - 2) | ||
110 | #define PGDIR_SHIFT_LEFT (64 - F) | ||
111 | #define PGDIR_SHIFT_RIGHT (64 - (F - D) - 2) | ||
112 | #define LOW_MASK_BITS 0x3 | ||
113 | |||
114 | /* TLB1 ** ICACHE line 1: tl1 DTLB and quick VPTE miss */ | ||
115 | ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS | ||
116 | add %g3, %g3, %g5 ! Compute VPTE base | ||
117 | cmp %g4, %g5 ! VPTE miss? | ||
118 | bgeu,pt %xcc, 1f ! Continue here | ||
119 | andcc %g4, TAG_CONTEXT_BITS, %g5 ! tl0 miss Nucleus test | ||
120 | ba,a,pt %xcc, from_tl1_trap ! Fall to tl0 miss | ||
121 | 1: sllx %g6, VPTE_SHIFT, %g4 ! Position TAG_ACCESS | ||
122 | or %g4, %g5, %g4 ! Prepare TAG_ACCESS | ||
123 | |||
124 | /* TLB1 ** ICACHE line 2: Quick VPTE miss */ | ||
125 | mov TSB_REG, %g1 ! Grab TSB reg | ||
126 | ldxa [%g1] ASI_DMMU, %g5 ! Doing PGD caching? | ||
127 | sllx %g6, PMD_SHIFT_LEFT, %g1 ! Position PMD offset | ||
128 | be,pn %xcc, sparc64_vpte_nucleus ! Is it from Nucleus? | ||
129 | srlx %g1, PMD_SHIFT_RIGHT, %g1 ! Mask PMD offset bits | ||
130 | brnz,pt %g5, sparc64_vpte_continue ! Yep, go like smoke | ||
131 | andn %g1, LOW_MASK_BITS, %g1 ! Final PMD mask | ||
132 | sllx %g6, PGDIR_SHIFT_LEFT, %g5 ! Position PGD offset | ||
133 | |||
134 | /* TLB1 ** ICACHE line 3: Quick VPTE miss */ | ||
135 | srlx %g5, PGDIR_SHIFT_RIGHT, %g5 ! Mask PGD offset bits | ||
136 | andn %g5, LOW_MASK_BITS, %g5 ! Final PGD mask | ||
137 | lduwa [%g7 + %g5] ASI_PHYS_USE_EC, %g5! Load PGD | ||
138 | brz,pn %g5, vpte_noent ! Valid? | ||
139 | sparc64_kpte_continue: | ||
140 | sllx %g5, 11, %g5 ! Shift into place | ||
141 | sparc64_vpte_continue: | ||
142 | lduwa [%g5 + %g1] ASI_PHYS_USE_EC, %g5! Load PMD | ||
143 | sllx %g5, 11, %g5 ! Shift into place | ||
144 | brz,pn %g5, vpte_noent ! Valid? | ||
145 | |||
146 | /* TLB1 ** ICACHE line 4: Quick VPTE miss */ | ||
147 | mov (VALID_SZ_BITS >> 61), %g1 ! upper vpte into %g1 | ||
148 | sllx %g1, 61, %g1 ! finish calc | ||
149 | or %g5, VPTE_BITS, %g5 ! Prepare VPTE data | ||
150 | or %g5, %g1, %g5 ! ... | ||
151 | mov TLB_SFSR, %g1 ! Restore %g1 value | ||
152 | stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Load VPTE into TLB | ||
153 | stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS | ||
154 | retry ! Load PTE once again | ||
155 | |||
156 | #undef VALID_SZ_BITS | ||
157 | #undef VPTE_SHIFT | ||
158 | #undef VPTE_BITS | ||
159 | #undef A | ||
160 | #undef B | ||
161 | #undef C | ||
162 | #undef D | ||
163 | #undef E | ||
164 | #undef F | ||
165 | #undef PMD_SHIFT_LEFT | ||
166 | #undef PMD_SHIFT_RIGHT | ||
167 | #undef PGDIR_SHIFT_LEFT | ||
168 | #undef PGDIR_SHIFT_RIGHT | ||
169 | #undef LOW_MASK_BITS | ||
170 | |||
diff --git a/arch/sparc64/kernel/dtlb_base.S b/arch/sparc64/kernel/dtlb_base.S deleted file mode 100644 index 6528786840c0..000000000000 --- a/arch/sparc64/kernel/dtlb_base.S +++ /dev/null | |||
@@ -1,109 +0,0 @@ | |||
1 | /* $Id: dtlb_base.S,v 1.17 2001/10/11 22:33:52 davem Exp $ | ||
2 | * dtlb_base.S: Front end to DTLB miss replacement strategy. | ||
3 | * This is included directly into the trap table. | ||
4 | * | ||
5 | * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com) | ||
6 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz) | ||
7 | */ | ||
8 | |||
9 | #include <asm/pgtable.h> | ||
10 | #include <asm/mmu.h> | ||
11 | |||
12 | /* %g1 TLB_SFSR (%g1 + %g1 == TLB_TAG_ACCESS) | ||
13 | * %g2 (KERN_HIGHBITS | KERN_LOWBITS) | ||
14 | * %g3 VPTE base (0xfffffffe00000000) Spitfire/Blackbird (44-bit VA space) | ||
15 | * (0xffe0000000000000) Cheetah (64-bit VA space) | ||
16 | * %g7 __pa(current->mm->pgd) | ||
17 | * | ||
18 | * The VPTE base value is completely magic, but note that | ||
19 | * few places in the kernel other than these TLB miss | ||
20 | * handlers know anything about the VPTE mechanism or | ||
21 | * how it works (see VPTE_SIZE, TASK_SIZE and PTRS_PER_PGD). | ||
22 | * Consider the 44-bit VADDR Ultra-I/II case as an example: | ||
23 | * | ||
24 | * VA[0 : (1<<43)] produce VPTE index [%g3 : 0] | ||
25 | * VA[0 : -(1<<43)] produce VPTE index [%g3-(1<<(43-PAGE_SHIFT+3)) : %g3] | ||
26 | * | ||
27 | * For Cheetah's 64-bit VADDR space this is: | ||
28 | * | ||
29 | * VA[0 : (1<<63)] produce VPTE index [%g3 : 0] | ||
30 | * VA[0 : -(1<<63)] produce VPTE index [%g3-(1<<(63-PAGE_SHIFT+3)) : %g3] | ||
31 | * | ||
32 | * If you're paying attention you'll notice that this means half of | ||
33 | * the VPTE table is above %g3 and half is below, low VA addresses | ||
34 | * map progressively upwards from %g3, and high VA addresses map | ||
35 | * progressively upwards towards %g3. This trick was needed to make | ||
36 | * the same 8 instruction handler work both for Spitfire/Blackbird's | ||
37 | * peculiar VA space hole configuration and the full 64-bit VA space | ||
38 | * one of Cheetah at the same time. | ||
39 | */ | ||
40 | |||
41 | /* Ways we can get here: | ||
42 | * | ||
43 | * 1) Nucleus loads and stores to/from PA-->VA direct mappings. | ||
44 | * 2) Nucleus loads and stores to/from vmalloc() areas. | ||
45 | * 3) User loads and stores. | ||
46 | * 4) User space accesses by nucleus at tl0 | ||
47 | */ | ||
48 | |||
49 | #if PAGE_SHIFT == 13 | ||
50 | /* | ||
51 | * To compute vpte offset, we need to do ((addr >> 13) << 3), | ||
52 | * which can be optimized to (addr >> 10) if bits 10/11/12 can | ||
53 | * be guaranteed to be 0 ... mmu_context.h does guarantee this | ||
54 | * by only using 10 bits in the hwcontext value. | ||
55 | */ | ||
56 | #define CREATE_VPTE_OFFSET1(r1, r2) nop | ||
57 | #define CREATE_VPTE_OFFSET2(r1, r2) \ | ||
58 | srax r1, 10, r2 | ||
59 | #else | ||
60 | #define CREATE_VPTE_OFFSET1(r1, r2) \ | ||
61 | srax r1, PAGE_SHIFT, r2 | ||
62 | #define CREATE_VPTE_OFFSET2(r1, r2) \ | ||
63 | sllx r2, 3, r2 | ||
64 | #endif | ||
65 | |||
66 | /* DTLB ** ICACHE line 1: Quick user TLB misses */ | ||
67 | mov TLB_SFSR, %g1 | ||
68 | ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS | ||
69 | andcc %g4, TAG_CONTEXT_BITS, %g0 ! From Nucleus? | ||
70 | from_tl1_trap: | ||
71 | rdpr %tl, %g5 ! For TL==3 test | ||
72 | CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset | ||
73 | be,pn %xcc, kvmap ! Yep, special processing | ||
74 | CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset | ||
75 | cmp %g5, 4 ! Last trap level? | ||
76 | |||
77 | /* DTLB ** ICACHE line 2: User finish + quick kernel TLB misses */ | ||
78 | be,pn %xcc, longpath ! Yep, cannot risk VPTE miss | ||
79 | nop ! delay slot | ||
80 | ldxa [%g3 + %g6] ASI_S, %g5 ! Load VPTE | ||
81 | 1: brgez,pn %g5, longpath ! Invalid, branch out | ||
82 | nop ! Delay-slot | ||
83 | 9: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB | ||
84 | retry ! Trap return | ||
85 | nop | ||
86 | |||
87 | /* DTLB ** ICACHE line 3: winfixups+real_faults */ | ||
88 | longpath: | ||
89 | rdpr %pstate, %g5 ! Move into alternate globals | ||
90 | wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate | ||
91 | rdpr %tl, %g4 ! See where we came from. | ||
92 | cmp %g4, 1 ! Is etrap/rtrap window fault? | ||
93 | mov TLB_TAG_ACCESS, %g4 ! Prepare for fault processing | ||
94 | ldxa [%g4] ASI_DMMU, %g5 ! Load faulting VA page | ||
95 | be,pt %xcc, sparc64_realfault_common ! Jump to normal fault handling | ||
96 | mov FAULT_CODE_DTLB, %g4 ! It was read from DTLB | ||
97 | |||
98 | /* DTLB ** ICACHE line 4: Unused... */ | ||
99 | ba,a,pt %xcc, winfix_trampoline ! Call window fixup code | ||
100 | nop | ||
101 | nop | ||
102 | nop | ||
103 | nop | ||
104 | nop | ||
105 | nop | ||
106 | nop | ||
107 | |||
108 | #undef CREATE_VPTE_OFFSET1 | ||
109 | #undef CREATE_VPTE_OFFSET2 | ||
diff --git a/arch/sparc64/kernel/dtlb_miss.S b/arch/sparc64/kernel/dtlb_miss.S new file mode 100644 index 000000000000..09a6a15a7105 --- /dev/null +++ b/arch/sparc64/kernel/dtlb_miss.S | |||
@@ -0,0 +1,39 @@ | |||
1 | /* DTLB ** ICACHE line 1: Context 0 check and TSB load */ | ||
2 | ldxa [%g0] ASI_DMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer | ||
3 | ldxa [%g0] ASI_DMMU, %g6 ! Get TAG TARGET | ||
4 | srlx %g6, 48, %g5 ! Get context | ||
5 | sllx %g6, 22, %g6 ! Zero out context | ||
6 | brz,pn %g5, kvmap_dtlb ! Context 0 processing | ||
7 | srlx %g6, 22, %g6 ! Delay slot | ||
8 | TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry | ||
9 | cmp %g4, %g6 ! Compare TAG | ||
10 | |||
11 | /* DTLB ** ICACHE line 2: TSB compare and TLB load */ | ||
12 | bne,pn %xcc, tsb_miss_dtlb ! Miss | ||
13 | mov FAULT_CODE_DTLB, %g3 | ||
14 | stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Load TLB | ||
15 | retry ! Trap done | ||
16 | nop | ||
17 | nop | ||
18 | nop | ||
19 | nop | ||
20 | |||
21 | /* DTLB ** ICACHE line 3: */ | ||
22 | nop | ||
23 | nop | ||
24 | nop | ||
25 | nop | ||
26 | nop | ||
27 | nop | ||
28 | nop | ||
29 | nop | ||
30 | |||
31 | /* DTLB ** ICACHE line 4: */ | ||
32 | nop | ||
33 | nop | ||
34 | nop | ||
35 | nop | ||
36 | nop | ||
37 | nop | ||
38 | nop | ||
39 | nop | ||
diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c index 7991e919d8ab..c69504aa638f 100644 --- a/arch/sparc64/kernel/ebus.c +++ b/arch/sparc64/kernel/ebus.c | |||
@@ -277,10 +277,9 @@ static inline void *ebus_alloc(size_t size) | |||
277 | { | 277 | { |
278 | void *mem; | 278 | void *mem; |
279 | 279 | ||
280 | mem = kmalloc(size, GFP_ATOMIC); | 280 | mem = kzalloc(size, GFP_ATOMIC); |
281 | if (!mem) | 281 | if (!mem) |
282 | panic("ebus_alloc: out of memory"); | 282 | panic("ebus_alloc: out of memory"); |
283 | memset((char *)mem, 0, size); | ||
284 | return mem; | 283 | return mem; |
285 | } | 284 | } |
286 | 285 | ||
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index a73553ae7e53..6d0b3ed77a02 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S | |||
@@ -50,7 +50,8 @@ do_fpdis: | |||
50 | add %g0, %g0, %g0 | 50 | add %g0, %g0, %g0 |
51 | ba,a,pt %xcc, rtrap_clr_l6 | 51 | ba,a,pt %xcc, rtrap_clr_l6 |
52 | 52 | ||
53 | 1: ldub [%g6 + TI_FPSAVED], %g5 | 53 | 1: TRAP_LOAD_THREAD_REG(%g6, %g1) |
54 | ldub [%g6 + TI_FPSAVED], %g5 | ||
54 | wr %g0, FPRS_FEF, %fprs | 55 | wr %g0, FPRS_FEF, %fprs |
55 | andcc %g5, FPRS_FEF, %g0 | 56 | andcc %g5, FPRS_FEF, %g0 |
56 | be,a,pt %icc, 1f | 57 | be,a,pt %icc, 1f |
@@ -96,10 +97,22 @@ do_fpdis: | |||
96 | add %g6, TI_FPREGS + 0x80, %g1 | 97 | add %g6, TI_FPREGS + 0x80, %g1 |
97 | faddd %f0, %f2, %f4 | 98 | faddd %f0, %f2, %f4 |
98 | fmuld %f0, %f2, %f6 | 99 | fmuld %f0, %f2, %f6 |
99 | ldxa [%g3] ASI_DMMU, %g5 | 100 | |
101 | 661: ldxa [%g3] ASI_DMMU, %g5 | ||
102 | .section .sun4v_1insn_patch, "ax" | ||
103 | .word 661b | ||
104 | ldxa [%g3] ASI_MMU, %g5 | ||
105 | .previous | ||
106 | |||
100 | sethi %hi(sparc64_kern_sec_context), %g2 | 107 | sethi %hi(sparc64_kern_sec_context), %g2 |
101 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 | 108 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 |
102 | stxa %g2, [%g3] ASI_DMMU | 109 | |
110 | 661: stxa %g2, [%g3] ASI_DMMU | ||
111 | .section .sun4v_1insn_patch, "ax" | ||
112 | .word 661b | ||
113 | stxa %g2, [%g3] ASI_MMU | ||
114 | .previous | ||
115 | |||
103 | membar #Sync | 116 | membar #Sync |
104 | add %g6, TI_FPREGS + 0xc0, %g2 | 117 | add %g6, TI_FPREGS + 0xc0, %g2 |
105 | faddd %f0, %f2, %f8 | 118 | faddd %f0, %f2, %f8 |
@@ -125,11 +138,23 @@ do_fpdis: | |||
125 | fzero %f32 | 138 | fzero %f32 |
126 | mov SECONDARY_CONTEXT, %g3 | 139 | mov SECONDARY_CONTEXT, %g3 |
127 | fzero %f34 | 140 | fzero %f34 |
128 | ldxa [%g3] ASI_DMMU, %g5 | 141 | |
142 | 661: ldxa [%g3] ASI_DMMU, %g5 | ||
143 | .section .sun4v_1insn_patch, "ax" | ||
144 | .word 661b | ||
145 | ldxa [%g3] ASI_MMU, %g5 | ||
146 | .previous | ||
147 | |||
129 | add %g6, TI_FPREGS, %g1 | 148 | add %g6, TI_FPREGS, %g1 |
130 | sethi %hi(sparc64_kern_sec_context), %g2 | 149 | sethi %hi(sparc64_kern_sec_context), %g2 |
131 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 | 150 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 |
132 | stxa %g2, [%g3] ASI_DMMU | 151 | |
152 | 661: stxa %g2, [%g3] ASI_DMMU | ||
153 | .section .sun4v_1insn_patch, "ax" | ||
154 | .word 661b | ||
155 | stxa %g2, [%g3] ASI_MMU | ||
156 | .previous | ||
157 | |||
133 | membar #Sync | 158 | membar #Sync |
134 | add %g6, TI_FPREGS + 0x40, %g2 | 159 | add %g6, TI_FPREGS + 0x40, %g2 |
135 | faddd %f32, %f34, %f36 | 160 | faddd %f32, %f34, %f36 |
@@ -154,10 +179,22 @@ do_fpdis: | |||
154 | nop | 179 | nop |
155 | 3: mov SECONDARY_CONTEXT, %g3 | 180 | 3: mov SECONDARY_CONTEXT, %g3 |
156 | add %g6, TI_FPREGS, %g1 | 181 | add %g6, TI_FPREGS, %g1 |
157 | ldxa [%g3] ASI_DMMU, %g5 | 182 | |
183 | 661: ldxa [%g3] ASI_DMMU, %g5 | ||
184 | .section .sun4v_1insn_patch, "ax" | ||
185 | .word 661b | ||
186 | ldxa [%g3] ASI_MMU, %g5 | ||
187 | .previous | ||
188 | |||
158 | sethi %hi(sparc64_kern_sec_context), %g2 | 189 | sethi %hi(sparc64_kern_sec_context), %g2 |
159 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 | 190 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 |
160 | stxa %g2, [%g3] ASI_DMMU | 191 | |
192 | 661: stxa %g2, [%g3] ASI_DMMU | ||
193 | .section .sun4v_1insn_patch, "ax" | ||
194 | .word 661b | ||
195 | stxa %g2, [%g3] ASI_MMU | ||
196 | .previous | ||
197 | |||
161 | membar #Sync | 198 | membar #Sync |
162 | mov 0x40, %g2 | 199 | mov 0x40, %g2 |
163 | membar #Sync | 200 | membar #Sync |
@@ -168,7 +205,13 @@ do_fpdis: | |||
168 | ldda [%g1 + %g2] ASI_BLK_S, %f48 | 205 | ldda [%g1 + %g2] ASI_BLK_S, %f48 |
169 | membar #Sync | 206 | membar #Sync |
170 | fpdis_exit: | 207 | fpdis_exit: |
171 | stxa %g5, [%g3] ASI_DMMU | 208 | |
209 | 661: stxa %g5, [%g3] ASI_DMMU | ||
210 | .section .sun4v_1insn_patch, "ax" | ||
211 | .word 661b | ||
212 | stxa %g5, [%g3] ASI_MMU | ||
213 | .previous | ||
214 | |||
172 | membar #Sync | 215 | membar #Sync |
173 | fpdis_exit2: | 216 | fpdis_exit2: |
174 | wr %g7, 0, %gsr | 217 | wr %g7, 0, %gsr |
@@ -189,6 +232,7 @@ fp_other_bounce: | |||
189 | .globl do_fpother_check_fitos | 232 | .globl do_fpother_check_fitos |
190 | .align 32 | 233 | .align 32 |
191 | do_fpother_check_fitos: | 234 | do_fpother_check_fitos: |
235 | TRAP_LOAD_THREAD_REG(%g6, %g1) | ||
192 | sethi %hi(fp_other_bounce - 4), %g7 | 236 | sethi %hi(fp_other_bounce - 4), %g7 |
193 | or %g7, %lo(fp_other_bounce - 4), %g7 | 237 | or %g7, %lo(fp_other_bounce - 4), %g7 |
194 | 238 | ||
@@ -312,6 +356,7 @@ fitos_emul_fini: | |||
312 | .globl do_fptrap | 356 | .globl do_fptrap |
313 | .align 32 | 357 | .align 32 |
314 | do_fptrap: | 358 | do_fptrap: |
359 | TRAP_LOAD_THREAD_REG(%g6, %g1) | ||
315 | stx %fsr, [%g6 + TI_XFSR] | 360 | stx %fsr, [%g6 + TI_XFSR] |
316 | do_fptrap_after_fsr: | 361 | do_fptrap_after_fsr: |
317 | ldub [%g6 + TI_FPSAVED], %g3 | 362 | ldub [%g6 + TI_FPSAVED], %g3 |
@@ -321,10 +366,22 @@ do_fptrap_after_fsr: | |||
321 | rd %gsr, %g3 | 366 | rd %gsr, %g3 |
322 | stx %g3, [%g6 + TI_GSR] | 367 | stx %g3, [%g6 + TI_GSR] |
323 | mov SECONDARY_CONTEXT, %g3 | 368 | mov SECONDARY_CONTEXT, %g3 |
324 | ldxa [%g3] ASI_DMMU, %g5 | 369 | |
370 | 661: ldxa [%g3] ASI_DMMU, %g5 | ||
371 | .section .sun4v_1insn_patch, "ax" | ||
372 | .word 661b | ||
373 | ldxa [%g3] ASI_MMU, %g5 | ||
374 | .previous | ||
375 | |||
325 | sethi %hi(sparc64_kern_sec_context), %g2 | 376 | sethi %hi(sparc64_kern_sec_context), %g2 |
326 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 | 377 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 |
327 | stxa %g2, [%g3] ASI_DMMU | 378 | |
379 | 661: stxa %g2, [%g3] ASI_DMMU | ||
380 | .section .sun4v_1insn_patch, "ax" | ||
381 | .word 661b | ||
382 | stxa %g2, [%g3] ASI_MMU | ||
383 | .previous | ||
384 | |||
328 | membar #Sync | 385 | membar #Sync |
329 | add %g6, TI_FPREGS, %g2 | 386 | add %g6, TI_FPREGS, %g2 |
330 | andcc %g1, FPRS_DL, %g0 | 387 | andcc %g1, FPRS_DL, %g0 |
@@ -339,7 +396,13 @@ do_fptrap_after_fsr: | |||
339 | stda %f48, [%g2 + %g3] ASI_BLK_S | 396 | stda %f48, [%g2 + %g3] ASI_BLK_S |
340 | 5: mov SECONDARY_CONTEXT, %g1 | 397 | 5: mov SECONDARY_CONTEXT, %g1 |
341 | membar #Sync | 398 | membar #Sync |
342 | stxa %g5, [%g1] ASI_DMMU | 399 | |
400 | 661: stxa %g5, [%g1] ASI_DMMU | ||
401 | .section .sun4v_1insn_patch, "ax" | ||
402 | .word 661b | ||
403 | stxa %g5, [%g1] ASI_MMU | ||
404 | .previous | ||
405 | |||
343 | membar #Sync | 406 | membar #Sync |
344 | ba,pt %xcc, etrap | 407 | ba,pt %xcc, etrap |
345 | wr %g0, 0, %fprs | 408 | wr %g0, 0, %fprs |
@@ -353,8 +416,6 @@ do_fptrap_after_fsr: | |||
353 | * | 416 | * |
354 | * With this method we can do most of the cross-call tlb/cache | 417 | * With this method we can do most of the cross-call tlb/cache |
355 | * flushing very quickly. | 418 | * flushing very quickly. |
356 | * | ||
357 | * Current CPU's IRQ worklist table is locked into %g6, don't touch. | ||
358 | */ | 419 | */ |
359 | .text | 420 | .text |
360 | .align 32 | 421 | .align 32 |
@@ -378,6 +439,8 @@ do_ivec: | |||
378 | sllx %g2, %g4, %g2 | 439 | sllx %g2, %g4, %g2 |
379 | sllx %g4, 2, %g4 | 440 | sllx %g4, 2, %g4 |
380 | 441 | ||
442 | TRAP_LOAD_IRQ_WORK(%g6, %g1) | ||
443 | |||
381 | lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */ | 444 | lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */ |
382 | stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ | 445 | stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ |
383 | stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */ | 446 | stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */ |
@@ -399,76 +462,6 @@ do_ivec_xcall: | |||
399 | 1: jmpl %g3, %g0 | 462 | 1: jmpl %g3, %g0 |
400 | nop | 463 | nop |
401 | 464 | ||
402 | .globl save_alternate_globals | ||
403 | save_alternate_globals: /* %o0 = save_area */ | ||
404 | rdpr %pstate, %o5 | ||
405 | andn %o5, PSTATE_IE, %o1 | ||
406 | wrpr %o1, PSTATE_AG, %pstate | ||
407 | stx %g0, [%o0 + 0x00] | ||
408 | stx %g1, [%o0 + 0x08] | ||
409 | stx %g2, [%o0 + 0x10] | ||
410 | stx %g3, [%o0 + 0x18] | ||
411 | stx %g4, [%o0 + 0x20] | ||
412 | stx %g5, [%o0 + 0x28] | ||
413 | stx %g6, [%o0 + 0x30] | ||
414 | stx %g7, [%o0 + 0x38] | ||
415 | wrpr %o1, PSTATE_IG, %pstate | ||
416 | stx %g0, [%o0 + 0x40] | ||
417 | stx %g1, [%o0 + 0x48] | ||
418 | stx %g2, [%o0 + 0x50] | ||
419 | stx %g3, [%o0 + 0x58] | ||
420 | stx %g4, [%o0 + 0x60] | ||
421 | stx %g5, [%o0 + 0x68] | ||
422 | stx %g6, [%o0 + 0x70] | ||
423 | stx %g7, [%o0 + 0x78] | ||
424 | wrpr %o1, PSTATE_MG, %pstate | ||
425 | stx %g0, [%o0 + 0x80] | ||
426 | stx %g1, [%o0 + 0x88] | ||
427 | stx %g2, [%o0 + 0x90] | ||
428 | stx %g3, [%o0 + 0x98] | ||
429 | stx %g4, [%o0 + 0xa0] | ||
430 | stx %g5, [%o0 + 0xa8] | ||
431 | stx %g6, [%o0 + 0xb0] | ||
432 | stx %g7, [%o0 + 0xb8] | ||
433 | wrpr %o5, 0x0, %pstate | ||
434 | retl | ||
435 | nop | ||
436 | |||
437 | .globl restore_alternate_globals | ||
438 | restore_alternate_globals: /* %o0 = save_area */ | ||
439 | rdpr %pstate, %o5 | ||
440 | andn %o5, PSTATE_IE, %o1 | ||
441 | wrpr %o1, PSTATE_AG, %pstate | ||
442 | ldx [%o0 + 0x00], %g0 | ||
443 | ldx [%o0 + 0x08], %g1 | ||
444 | ldx [%o0 + 0x10], %g2 | ||
445 | ldx [%o0 + 0x18], %g3 | ||
446 | ldx [%o0 + 0x20], %g4 | ||
447 | ldx [%o0 + 0x28], %g5 | ||
448 | ldx [%o0 + 0x30], %g6 | ||
449 | ldx [%o0 + 0x38], %g7 | ||
450 | wrpr %o1, PSTATE_IG, %pstate | ||
451 | ldx [%o0 + 0x40], %g0 | ||
452 | ldx [%o0 + 0x48], %g1 | ||
453 | ldx [%o0 + 0x50], %g2 | ||
454 | ldx [%o0 + 0x58], %g3 | ||
455 | ldx [%o0 + 0x60], %g4 | ||
456 | ldx [%o0 + 0x68], %g5 | ||
457 | ldx [%o0 + 0x70], %g6 | ||
458 | ldx [%o0 + 0x78], %g7 | ||
459 | wrpr %o1, PSTATE_MG, %pstate | ||
460 | ldx [%o0 + 0x80], %g0 | ||
461 | ldx [%o0 + 0x88], %g1 | ||
462 | ldx [%o0 + 0x90], %g2 | ||
463 | ldx [%o0 + 0x98], %g3 | ||
464 | ldx [%o0 + 0xa0], %g4 | ||
465 | ldx [%o0 + 0xa8], %g5 | ||
466 | ldx [%o0 + 0xb0], %g6 | ||
467 | ldx [%o0 + 0xb8], %g7 | ||
468 | wrpr %o5, 0x0, %pstate | ||
469 | retl | ||
470 | nop | ||
471 | |||
472 | .globl getcc, setcc | 465 | .globl getcc, setcc |
473 | getcc: | 466 | getcc: |
474 | ldx [%o0 + PT_V9_TSTATE], %o1 | 467 | ldx [%o0 + PT_V9_TSTATE], %o1 |
@@ -488,9 +481,24 @@ setcc: | |||
488 | retl | 481 | retl |
489 | stx %o1, [%o0 + PT_V9_TSTATE] | 482 | stx %o1, [%o0 + PT_V9_TSTATE] |
490 | 483 | ||
491 | .globl utrap, utrap_ill | 484 | .globl utrap_trap |
492 | utrap: brz,pn %g1, etrap | 485 | utrap_trap: /* %g3=handler,%g4=level */ |
486 | TRAP_LOAD_THREAD_REG(%g6, %g1) | ||
487 | ldx [%g6 + TI_UTRAPS], %g1 | ||
488 | brnz,pt %g1, invoke_utrap | ||
493 | nop | 489 | nop |
490 | |||
491 | ba,pt %xcc, etrap | ||
492 | rd %pc, %g7 | ||
493 | mov %l4, %o1 | ||
494 | call bad_trap | ||
495 | add %sp, PTREGS_OFF, %o0 | ||
496 | ba,pt %xcc, rtrap | ||
497 | clr %l6 | ||
498 | |||
499 | invoke_utrap: | ||
500 | sllx %g3, 3, %g3 | ||
501 | ldx [%g1 + %g3], %g1 | ||
494 | save %sp, -128, %sp | 502 | save %sp, -128, %sp |
495 | rdpr %tstate, %l6 | 503 | rdpr %tstate, %l6 |
496 | rdpr %cwp, %l7 | 504 | rdpr %cwp, %l7 |
@@ -500,17 +508,6 @@ utrap: brz,pn %g1, etrap | |||
500 | rdpr %tnpc, %l7 | 508 | rdpr %tnpc, %l7 |
501 | wrpr %g1, 0, %tnpc | 509 | wrpr %g1, 0, %tnpc |
502 | done | 510 | done |
503 | utrap_ill: | ||
504 | call bad_trap | ||
505 | add %sp, PTREGS_OFF, %o0 | ||
506 | ba,pt %xcc, rtrap | ||
507 | clr %l6 | ||
508 | |||
509 | /* XXX Here is stuff we still need to write... -DaveM XXX */ | ||
510 | .globl netbsd_syscall | ||
511 | netbsd_syscall: | ||
512 | retl | ||
513 | nop | ||
514 | 511 | ||
515 | /* We need to carefully read the error status, ACK | 512 | /* We need to carefully read the error status, ACK |
516 | * the errors, prevent recursive traps, and pass the | 513 | * the errors, prevent recursive traps, and pass the |
@@ -1001,7 +998,7 @@ dcpe_icpe_tl1_common: | |||
1001 | * %g3: scratch | 998 | * %g3: scratch |
1002 | * %g4: AFSR | 999 | * %g4: AFSR |
1003 | * %g5: AFAR | 1000 | * %g5: AFAR |
1004 | * %g6: current thread ptr | 1001 | * %g6: unused, will have current thread ptr after etrap |
1005 | * %g7: scratch | 1002 | * %g7: scratch |
1006 | */ | 1003 | */ |
1007 | __cheetah_log_error: | 1004 | __cheetah_log_error: |
@@ -1539,13 +1536,14 @@ ret_from_syscall: | |||
1539 | 1536 | ||
1540 | 1: b,pt %xcc, ret_sys_call | 1537 | 1: b,pt %xcc, ret_sys_call |
1541 | ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0 | 1538 | ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0 |
1542 | sparc_exit: wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV), %pstate | 1539 | sparc_exit: rdpr %pstate, %g2 |
1540 | wrpr %g2, PSTATE_IE, %pstate | ||
1543 | rdpr %otherwin, %g1 | 1541 | rdpr %otherwin, %g1 |
1544 | rdpr %cansave, %g3 | 1542 | rdpr %cansave, %g3 |
1545 | add %g3, %g1, %g3 | 1543 | add %g3, %g1, %g3 |
1546 | wrpr %g3, 0x0, %cansave | 1544 | wrpr %g3, 0x0, %cansave |
1547 | wrpr %g0, 0x0, %otherwin | 1545 | wrpr %g0, 0x0, %otherwin |
1548 | wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE), %pstate | 1546 | wrpr %g2, 0x0, %pstate |
1549 | ba,pt %xcc, sys_exit | 1547 | ba,pt %xcc, sys_exit |
1550 | stb %g0, [%g6 + TI_WSAVED] | 1548 | stb %g0, [%g6 + TI_WSAVED] |
1551 | 1549 | ||
@@ -1690,3 +1688,138 @@ __flushw_user: | |||
1690 | restore %g0, %g0, %g0 | 1688 | restore %g0, %g0, %g0 |
1691 | 2: retl | 1689 | 2: retl |
1692 | nop | 1690 | nop |
1691 | |||
1692 | #ifdef CONFIG_SMP | ||
1693 | .globl hard_smp_processor_id | ||
1694 | hard_smp_processor_id: | ||
1695 | #endif | ||
1696 | .globl real_hard_smp_processor_id | ||
1697 | real_hard_smp_processor_id: | ||
1698 | __GET_CPUID(%o0) | ||
1699 | retl | ||
1700 | nop | ||
1701 | |||
1702 | /* %o0: devhandle | ||
1703 | * %o1: devino | ||
1704 | * | ||
1705 | * returns %o0: sysino | ||
1706 | */ | ||
1707 | .globl sun4v_devino_to_sysino | ||
1708 | sun4v_devino_to_sysino: | ||
1709 | mov HV_FAST_INTR_DEVINO2SYSINO, %o5 | ||
1710 | ta HV_FAST_TRAP | ||
1711 | retl | ||
1712 | mov %o1, %o0 | ||
1713 | |||
1714 | /* %o0: sysino | ||
1715 | * | ||
1716 | * returns %o0: intr_enabled (HV_INTR_{DISABLED,ENABLED}) | ||
1717 | */ | ||
1718 | .globl sun4v_intr_getenabled | ||
1719 | sun4v_intr_getenabled: | ||
1720 | mov HV_FAST_INTR_GETENABLED, %o5 | ||
1721 | ta HV_FAST_TRAP | ||
1722 | retl | ||
1723 | mov %o1, %o0 | ||
1724 | |||
1725 | /* %o0: sysino | ||
1726 | * %o1: intr_enabled (HV_INTR_{DISABLED,ENABLED}) | ||
1727 | */ | ||
1728 | .globl sun4v_intr_setenabled | ||
1729 | sun4v_intr_setenabled: | ||
1730 | mov HV_FAST_INTR_SETENABLED, %o5 | ||
1731 | ta HV_FAST_TRAP | ||
1732 | retl | ||
1733 | nop | ||
1734 | |||
1735 | /* %o0: sysino | ||
1736 | * | ||
1737 | * returns %o0: intr_state (HV_INTR_STATE_*) | ||
1738 | */ | ||
1739 | .globl sun4v_intr_getstate | ||
1740 | sun4v_intr_getstate: | ||
1741 | mov HV_FAST_INTR_GETSTATE, %o5 | ||
1742 | ta HV_FAST_TRAP | ||
1743 | retl | ||
1744 | mov %o1, %o0 | ||
1745 | |||
1746 | /* %o0: sysino | ||
1747 | * %o1: intr_state (HV_INTR_STATE_*) | ||
1748 | */ | ||
1749 | .globl sun4v_intr_setstate | ||
1750 | sun4v_intr_setstate: | ||
1751 | mov HV_FAST_INTR_SETSTATE, %o5 | ||
1752 | ta HV_FAST_TRAP | ||
1753 | retl | ||
1754 | nop | ||
1755 | |||
1756 | /* %o0: sysino | ||
1757 | * | ||
1758 | * returns %o0: cpuid | ||
1759 | */ | ||
1760 | .globl sun4v_intr_gettarget | ||
1761 | sun4v_intr_gettarget: | ||
1762 | mov HV_FAST_INTR_GETTARGET, %o5 | ||
1763 | ta HV_FAST_TRAP | ||
1764 | retl | ||
1765 | mov %o1, %o0 | ||
1766 | |||
1767 | /* %o0: sysino | ||
1768 | * %o1: cpuid | ||
1769 | */ | ||
1770 | .globl sun4v_intr_settarget | ||
1771 | sun4v_intr_settarget: | ||
1772 | mov HV_FAST_INTR_SETTARGET, %o5 | ||
1773 | ta HV_FAST_TRAP | ||
1774 | retl | ||
1775 | nop | ||
1776 | |||
1777 | /* %o0: type | ||
1778 | * %o1: queue paddr | ||
1779 | * %o2: num queue entries | ||
1780 | * | ||
1781 | * returns %o0: status | ||
1782 | */ | ||
1783 | .globl sun4v_cpu_qconf | ||
1784 | sun4v_cpu_qconf: | ||
1785 | mov HV_FAST_CPU_QCONF, %o5 | ||
1786 | ta HV_FAST_TRAP | ||
1787 | retl | ||
1788 | nop | ||
1789 | |||
1790 | /* returns %o0: status | ||
1791 | */ | ||
1792 | .globl sun4v_cpu_yield | ||
1793 | sun4v_cpu_yield: | ||
1794 | mov HV_FAST_CPU_YIELD, %o5 | ||
1795 | ta HV_FAST_TRAP | ||
1796 | retl | ||
1797 | nop | ||
1798 | |||
1799 | /* %o0: num cpus in cpu list | ||
1800 | * %o1: cpu list paddr | ||
1801 | * %o2: mondo block paddr | ||
1802 | * | ||
1803 | * returns %o0: status | ||
1804 | */ | ||
1805 | .globl sun4v_cpu_mondo_send | ||
1806 | sun4v_cpu_mondo_send: | ||
1807 | mov HV_FAST_CPU_MONDO_SEND, %o5 | ||
1808 | ta HV_FAST_TRAP | ||
1809 | retl | ||
1810 | nop | ||
1811 | |||
1812 | /* %o0: CPU ID | ||
1813 | * | ||
1814 | * returns %o0: -status if status non-zero, else | ||
1815 | * %o0: cpu state as HV_CPU_STATE_* | ||
1816 | */ | ||
1817 | .globl sun4v_cpu_state | ||
1818 | sun4v_cpu_state: | ||
1819 | mov HV_FAST_CPU_STATE, %o5 | ||
1820 | ta HV_FAST_TRAP | ||
1821 | brnz,pn %o0, 1f | ||
1822 | sub %g0, %o0, %o0 | ||
1823 | mov %o1, %o0 | ||
1824 | 1: retl | ||
1825 | nop | ||
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index 0d8eba21111b..149383835c25 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S | |||
@@ -31,6 +31,7 @@ | |||
31 | .globl etrap, etrap_irq, etraptl1 | 31 | .globl etrap, etrap_irq, etraptl1 |
32 | etrap: rdpr %pil, %g2 | 32 | etrap: rdpr %pil, %g2 |
33 | etrap_irq: | 33 | etrap_irq: |
34 | TRAP_LOAD_THREAD_REG(%g6, %g1) | ||
34 | rdpr %tstate, %g1 | 35 | rdpr %tstate, %g1 |
35 | sllx %g2, 20, %g3 | 36 | sllx %g2, 20, %g3 |
36 | andcc %g1, TSTATE_PRIV, %g0 | 37 | andcc %g1, TSTATE_PRIV, %g0 |
@@ -54,7 +55,31 @@ etrap_irq: | |||
54 | rd %y, %g3 | 55 | rd %y, %g3 |
55 | stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC] | 56 | stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC] |
56 | st %g3, [%g2 + STACKFRAME_SZ + PT_V9_Y] | 57 | st %g3, [%g2 + STACKFRAME_SZ + PT_V9_Y] |
57 | save %g2, -STACK_BIAS, %sp ! Ordering here is critical | 58 | |
59 | rdpr %cansave, %g1 | ||
60 | brnz,pt %g1, etrap_save | ||
61 | nop | ||
62 | |||
63 | rdpr %cwp, %g1 | ||
64 | add %g1, 2, %g1 | ||
65 | wrpr %g1, %cwp | ||
66 | be,pt %xcc, etrap_user_spill | ||
67 | mov ASI_AIUP, %g3 | ||
68 | |||
69 | rdpr %otherwin, %g3 | ||
70 | brz %g3, etrap_kernel_spill | ||
71 | mov ASI_AIUS, %g3 | ||
72 | |||
73 | etrap_user_spill: | ||
74 | |||
75 | wr %g3, 0x0, %asi | ||
76 | ldx [%g6 + TI_FLAGS], %g3 | ||
77 | and %g3, _TIF_32BIT, %g3 | ||
78 | brnz,pt %g3, etrap_user_spill_32bit | ||
79 | nop | ||
80 | ba,a,pt %xcc, etrap_user_spill_64bit | ||
81 | |||
82 | etrap_save: save %g2, -STACK_BIAS, %sp | ||
58 | mov %g6, %l6 | 83 | mov %g6, %l6 |
59 | 84 | ||
60 | bne,pn %xcc, 3f | 85 | bne,pn %xcc, 3f |
@@ -70,42 +95,56 @@ etrap_irq: | |||
70 | wrpr %g2, 0, %wstate | 95 | wrpr %g2, 0, %wstate |
71 | sethi %hi(sparc64_kern_pri_context), %g2 | 96 | sethi %hi(sparc64_kern_pri_context), %g2 |
72 | ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 | 97 | ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 |
73 | stxa %g3, [%l4] ASI_DMMU | 98 | |
74 | flush %l6 | 99 | 661: stxa %g3, [%l4] ASI_DMMU |
75 | wr %g0, ASI_AIUS, %asi | 100 | .section .sun4v_1insn_patch, "ax" |
76 | 2: wrpr %g0, 0x0, %tl | 101 | .word 661b |
77 | mov %g4, %l4 | 102 | stxa %g3, [%l4] ASI_MMU |
103 | .previous | ||
104 | |||
105 | sethi %hi(KERNBASE), %l4 | ||
106 | flush %l4 | ||
107 | mov ASI_AIUS, %l7 | ||
108 | 2: mov %g4, %l4 | ||
78 | mov %g5, %l5 | 109 | mov %g5, %l5 |
110 | add %g7, 4, %l2 | ||
111 | |||
112 | /* Go to trap time globals so we can save them. */ | ||
113 | 661: wrpr %g0, ETRAP_PSTATE1, %pstate | ||
114 | .section .sun4v_1insn_patch, "ax" | ||
115 | .word 661b | ||
116 | SET_GL(0) | ||
117 | .previous | ||
79 | 118 | ||
80 | mov %g7, %l2 | ||
81 | wrpr %g0, ETRAP_PSTATE1, %pstate | ||
82 | stx %g1, [%sp + PTREGS_OFF + PT_V9_G1] | 119 | stx %g1, [%sp + PTREGS_OFF + PT_V9_G1] |
83 | stx %g2, [%sp + PTREGS_OFF + PT_V9_G2] | 120 | stx %g2, [%sp + PTREGS_OFF + PT_V9_G2] |
121 | sllx %l7, 24, %l7 | ||
84 | stx %g3, [%sp + PTREGS_OFF + PT_V9_G3] | 122 | stx %g3, [%sp + PTREGS_OFF + PT_V9_G3] |
123 | rdpr %cwp, %l0 | ||
85 | stx %g4, [%sp + PTREGS_OFF + PT_V9_G4] | 124 | stx %g4, [%sp + PTREGS_OFF + PT_V9_G4] |
86 | stx %g5, [%sp + PTREGS_OFF + PT_V9_G5] | 125 | stx %g5, [%sp + PTREGS_OFF + PT_V9_G5] |
87 | stx %g6, [%sp + PTREGS_OFF + PT_V9_G6] | 126 | stx %g6, [%sp + PTREGS_OFF + PT_V9_G6] |
88 | |||
89 | stx %g7, [%sp + PTREGS_OFF + PT_V9_G7] | 127 | stx %g7, [%sp + PTREGS_OFF + PT_V9_G7] |
128 | or %l7, %l0, %l7 | ||
129 | sethi %hi(TSTATE_RMO | TSTATE_PEF), %l0 | ||
130 | or %l7, %l0, %l7 | ||
131 | wrpr %l2, %tnpc | ||
132 | wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate | ||
90 | stx %i0, [%sp + PTREGS_OFF + PT_V9_I0] | 133 | stx %i0, [%sp + PTREGS_OFF + PT_V9_I0] |
91 | stx %i1, [%sp + PTREGS_OFF + PT_V9_I1] | 134 | stx %i1, [%sp + PTREGS_OFF + PT_V9_I1] |
92 | stx %i2, [%sp + PTREGS_OFF + PT_V9_I2] | 135 | stx %i2, [%sp + PTREGS_OFF + PT_V9_I2] |
93 | stx %i3, [%sp + PTREGS_OFF + PT_V9_I3] | 136 | stx %i3, [%sp + PTREGS_OFF + PT_V9_I3] |
94 | stx %i4, [%sp + PTREGS_OFF + PT_V9_I4] | 137 | stx %i4, [%sp + PTREGS_OFF + PT_V9_I4] |
95 | stx %i5, [%sp + PTREGS_OFF + PT_V9_I5] | 138 | stx %i5, [%sp + PTREGS_OFF + PT_V9_I5] |
96 | |||
97 | stx %i6, [%sp + PTREGS_OFF + PT_V9_I6] | 139 | stx %i6, [%sp + PTREGS_OFF + PT_V9_I6] |
98 | stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] | ||
99 | wrpr %g0, ETRAP_PSTATE2, %pstate | ||
100 | mov %l6, %g6 | 140 | mov %l6, %g6 |
101 | #ifdef CONFIG_SMP | 141 | stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] |
102 | mov TSB_REG, %g3 | 142 | LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %l1) |
103 | ldxa [%g3] ASI_IMMU, %g5 | 143 | ldx [%g6 + TI_TASK], %g4 |
104 | #endif | 144 | done |
105 | jmpl %l2 + 0x4, %g0 | ||
106 | ldx [%g6 + TI_TASK], %g4 | ||
107 | 145 | ||
108 | 3: ldub [%l6 + TI_FPDEPTH], %l5 | 146 | 3: mov ASI_P, %l7 |
147 | ldub [%l6 + TI_FPDEPTH], %l5 | ||
109 | add %l6, TI_FPSAVED + 1, %l4 | 148 | add %l6, TI_FPSAVED + 1, %l4 |
110 | srl %l5, 1, %l3 | 149 | srl %l5, 1, %l3 |
111 | add %l5, 2, %l5 | 150 | add %l5, 2, %l5 |
@@ -125,6 +164,7 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. | |||
125 | * 0x58 TL4's TT | 164 | * 0x58 TL4's TT |
126 | * 0x60 TL | 165 | * 0x60 TL |
127 | */ | 166 | */ |
167 | TRAP_LOAD_THREAD_REG(%g6, %g1) | ||
128 | sub %sp, ((4 * 8) * 4) + 8, %g2 | 168 | sub %sp, ((4 * 8) * 4) + 8, %g2 |
129 | rdpr %tl, %g1 | 169 | rdpr %tl, %g1 |
130 | 170 | ||
@@ -148,6 +188,11 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. | |||
148 | rdpr %tt, %g3 | 188 | rdpr %tt, %g3 |
149 | stx %g3, [%g2 + STACK_BIAS + 0x38] | 189 | stx %g3, [%g2 + STACK_BIAS + 0x38] |
150 | 190 | ||
191 | sethi %hi(is_sun4v), %g3 | ||
192 | lduw [%g3 + %lo(is_sun4v)], %g3 | ||
193 | brnz,pn %g3, finish_tl1_capture | ||
194 | nop | ||
195 | |||
151 | wrpr %g0, 3, %tl | 196 | wrpr %g0, 3, %tl |
152 | rdpr %tstate, %g3 | 197 | rdpr %tstate, %g3 |
153 | stx %g3, [%g2 + STACK_BIAS + 0x40] | 198 | stx %g3, [%g2 + STACK_BIAS + 0x40] |
@@ -168,91 +213,20 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. | |||
168 | rdpr %tt, %g3 | 213 | rdpr %tt, %g3 |
169 | stx %g3, [%g2 + STACK_BIAS + 0x78] | 214 | stx %g3, [%g2 + STACK_BIAS + 0x78] |
170 | 215 | ||
171 | wrpr %g1, %tl | ||
172 | stx %g1, [%g2 + STACK_BIAS + 0x80] | 216 | stx %g1, [%g2 + STACK_BIAS + 0x80] |
173 | 217 | ||
218 | finish_tl1_capture: | ||
219 | wrpr %g0, 1, %tl | ||
220 | 661: nop | ||
221 | .section .sun4v_1insn_patch, "ax" | ||
222 | .word 661b | ||
223 | SET_GL(1) | ||
224 | .previous | ||
225 | |||
174 | rdpr %tstate, %g1 | 226 | rdpr %tstate, %g1 |
175 | sub %g2, STACKFRAME_SZ + TRACEREG_SZ - STACK_BIAS, %g2 | 227 | sub %g2, STACKFRAME_SZ + TRACEREG_SZ - STACK_BIAS, %g2 |
176 | ba,pt %xcc, 1b | 228 | ba,pt %xcc, 1b |
177 | andcc %g1, TSTATE_PRIV, %g0 | 229 | andcc %g1, TSTATE_PRIV, %g0 |
178 | 230 | ||
179 | .align 64 | ||
180 | .globl scetrap | ||
181 | scetrap: rdpr %pil, %g2 | ||
182 | rdpr %tstate, %g1 | ||
183 | sllx %g2, 20, %g3 | ||
184 | andcc %g1, TSTATE_PRIV, %g0 | ||
185 | or %g1, %g3, %g1 | ||
186 | bne,pn %xcc, 1f | ||
187 | sub %sp, (STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS), %g2 | ||
188 | wrpr %g0, 7, %cleanwin | ||
189 | |||
190 | sllx %g1, 51, %g3 | ||
191 | sethi %hi(TASK_REGOFF), %g2 | ||
192 | or %g2, %lo(TASK_REGOFF), %g2 | ||
193 | brlz,pn %g3, 1f | ||
194 | add %g6, %g2, %g2 | ||
195 | wr %g0, 0, %fprs | ||
196 | 1: rdpr %tpc, %g3 | ||
197 | stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE] | ||
198 | |||
199 | rdpr %tnpc, %g1 | ||
200 | stx %g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC] | ||
201 | stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC] | ||
202 | save %g2, -STACK_BIAS, %sp ! Ordering here is critical | ||
203 | mov %g6, %l6 | ||
204 | bne,pn %xcc, 2f | ||
205 | mov ASI_P, %l7 | ||
206 | rdpr %canrestore, %g3 | ||
207 | |||
208 | rdpr %wstate, %g2 | ||
209 | wrpr %g0, 0, %canrestore | ||
210 | sll %g2, 3, %g2 | ||
211 | mov PRIMARY_CONTEXT, %l4 | ||
212 | wrpr %g3, 0, %otherwin | ||
213 | wrpr %g2, 0, %wstate | ||
214 | sethi %hi(sparc64_kern_pri_context), %g2 | ||
215 | ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 | ||
216 | stxa %g3, [%l4] ASI_DMMU | ||
217 | flush %l6 | ||
218 | |||
219 | mov ASI_AIUS, %l7 | ||
220 | 2: mov %g4, %l4 | ||
221 | mov %g5, %l5 | ||
222 | add %g7, 0x4, %l2 | ||
223 | wrpr %g0, ETRAP_PSTATE1, %pstate | ||
224 | stx %g1, [%sp + PTREGS_OFF + PT_V9_G1] | ||
225 | stx %g2, [%sp + PTREGS_OFF + PT_V9_G2] | ||
226 | sllx %l7, 24, %l7 | ||
227 | |||
228 | stx %g3, [%sp + PTREGS_OFF + PT_V9_G3] | ||
229 | rdpr %cwp, %l0 | ||
230 | stx %g4, [%sp + PTREGS_OFF + PT_V9_G4] | ||
231 | stx %g5, [%sp + PTREGS_OFF + PT_V9_G5] | ||
232 | stx %g6, [%sp + PTREGS_OFF + PT_V9_G6] | ||
233 | stx %g7, [%sp + PTREGS_OFF + PT_V9_G7] | ||
234 | or %l7, %l0, %l7 | ||
235 | sethi %hi(TSTATE_RMO | TSTATE_PEF), %l0 | ||
236 | |||
237 | or %l7, %l0, %l7 | ||
238 | wrpr %l2, %tnpc | ||
239 | wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate | ||
240 | stx %i0, [%sp + PTREGS_OFF + PT_V9_I0] | ||
241 | stx %i1, [%sp + PTREGS_OFF + PT_V9_I1] | ||
242 | stx %i2, [%sp + PTREGS_OFF + PT_V9_I2] | ||
243 | stx %i3, [%sp + PTREGS_OFF + PT_V9_I3] | ||
244 | stx %i4, [%sp + PTREGS_OFF + PT_V9_I4] | ||
245 | |||
246 | stx %i5, [%sp + PTREGS_OFF + PT_V9_I5] | ||
247 | stx %i6, [%sp + PTREGS_OFF + PT_V9_I6] | ||
248 | mov %l6, %g6 | ||
249 | stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] | ||
250 | #ifdef CONFIG_SMP | ||
251 | mov TSB_REG, %g3 | ||
252 | ldxa [%g3] ASI_IMMU, %g5 | ||
253 | #endif | ||
254 | ldx [%g6 + TI_TASK], %g4 | ||
255 | done | ||
256 | |||
257 | #undef TASK_REGOFF | 231 | #undef TASK_REGOFF |
258 | #undef ETRAP_PSTATE1 | 232 | #undef ETRAP_PSTATE1 |
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index b49dcd4504b0..3eadac5e171e 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <asm/head.h> | 26 | #include <asm/head.h> |
27 | #include <asm/ttable.h> | 27 | #include <asm/ttable.h> |
28 | #include <asm/mmu.h> | 28 | #include <asm/mmu.h> |
29 | #include <asm/cpudata.h> | ||
29 | 30 | ||
30 | /* This section from from _start to sparc64_boot_end should fit into | 31 | /* This section from from _start to sparc64_boot_end should fit into |
31 | * 0x0000000000404000 to 0x0000000000408000. | 32 | * 0x0000000000404000 to 0x0000000000408000. |
@@ -94,12 +95,17 @@ sparc64_boot: | |||
94 | wrpr %g1, 0x0, %pstate | 95 | wrpr %g1, 0x0, %pstate |
95 | ba,a,pt %xcc, 1f | 96 | ba,a,pt %xcc, 1f |
96 | 97 | ||
97 | .globl prom_finddev_name, prom_chosen_path | 98 | .globl prom_finddev_name, prom_chosen_path, prom_root_node |
98 | .globl prom_getprop_name, prom_mmu_name | 99 | .globl prom_getprop_name, prom_mmu_name, prom_peer_name |
99 | .globl prom_callmethod_name, prom_translate_name | 100 | .globl prom_callmethod_name, prom_translate_name, prom_root_compatible |
100 | .globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache | 101 | .globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache |
101 | .globl prom_boot_mapped_pc, prom_boot_mapping_mode | 102 | .globl prom_boot_mapped_pc, prom_boot_mapping_mode |
102 | .globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low | 103 | .globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low |
104 | .globl is_sun4v | ||
105 | prom_peer_name: | ||
106 | .asciz "peer" | ||
107 | prom_compatible_name: | ||
108 | .asciz "compatible" | ||
103 | prom_finddev_name: | 109 | prom_finddev_name: |
104 | .asciz "finddevice" | 110 | .asciz "finddevice" |
105 | prom_chosen_path: | 111 | prom_chosen_path: |
@@ -116,7 +122,13 @@ prom_map_name: | |||
116 | .asciz "map" | 122 | .asciz "map" |
117 | prom_unmap_name: | 123 | prom_unmap_name: |
118 | .asciz "unmap" | 124 | .asciz "unmap" |
125 | prom_sun4v_name: | ||
126 | .asciz "sun4v" | ||
119 | .align 4 | 127 | .align 4 |
128 | prom_root_compatible: | ||
129 | .skip 64 | ||
130 | prom_root_node: | ||
131 | .word 0 | ||
120 | prom_mmu_ihandle_cache: | 132 | prom_mmu_ihandle_cache: |
121 | .word 0 | 133 | .word 0 |
122 | prom_boot_mapped_pc: | 134 | prom_boot_mapped_pc: |
@@ -128,8 +140,54 @@ prom_boot_mapping_phys_high: | |||
128 | .xword 0 | 140 | .xword 0 |
129 | prom_boot_mapping_phys_low: | 141 | prom_boot_mapping_phys_low: |
130 | .xword 0 | 142 | .xword 0 |
143 | is_sun4v: | ||
144 | .word 0 | ||
131 | 1: | 145 | 1: |
132 | rd %pc, %l0 | 146 | rd %pc, %l0 |
147 | |||
148 | mov (1b - prom_peer_name), %l1 | ||
149 | sub %l0, %l1, %l1 | ||
150 | mov 0, %l2 | ||
151 | |||
152 | /* prom_root_node = prom_peer(0) */ | ||
153 | stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "peer" | ||
154 | mov 1, %l3 | ||
155 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1 | ||
156 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 | ||
157 | stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, 0 | ||
158 | stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1 | ||
159 | call %l7 | ||
160 | add %sp, (2047 + 128), %o0 ! argument array | ||
161 | |||
162 | ldx [%sp + 2047 + 128 + 0x20], %l4 ! prom root node | ||
163 | mov (1b - prom_root_node), %l1 | ||
164 | sub %l0, %l1, %l1 | ||
165 | stw %l4, [%l1] | ||
166 | |||
167 | mov (1b - prom_getprop_name), %l1 | ||
168 | mov (1b - prom_compatible_name), %l2 | ||
169 | mov (1b - prom_root_compatible), %l5 | ||
170 | sub %l0, %l1, %l1 | ||
171 | sub %l0, %l2, %l2 | ||
172 | sub %l0, %l5, %l5 | ||
173 | |||
174 | /* prom_getproperty(prom_root_node, "compatible", | ||
175 | * &prom_root_compatible, 64) | ||
176 | */ | ||
177 | stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop" | ||
178 | mov 4, %l3 | ||
179 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4 | ||
180 | mov 1, %l3 | ||
181 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 | ||
182 | stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, prom_root_node | ||
183 | stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "compatible" | ||
184 | stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_root_compatible | ||
185 | mov 64, %l3 | ||
186 | stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, size | ||
187 | stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1 | ||
188 | call %l7 | ||
189 | add %sp, (2047 + 128), %o0 ! argument array | ||
190 | |||
133 | mov (1b - prom_finddev_name), %l1 | 191 | mov (1b - prom_finddev_name), %l1 |
134 | mov (1b - prom_chosen_path), %l2 | 192 | mov (1b - prom_chosen_path), %l2 |
135 | mov (1b - prom_boot_mapped_pc), %l3 | 193 | mov (1b - prom_boot_mapped_pc), %l3 |
@@ -238,6 +296,27 @@ prom_boot_mapping_phys_low: | |||
238 | add %sp, (192 + 128), %sp | 296 | add %sp, (192 + 128), %sp |
239 | 297 | ||
240 | sparc64_boot_after_remap: | 298 | sparc64_boot_after_remap: |
299 | sethi %hi(prom_root_compatible), %g1 | ||
300 | or %g1, %lo(prom_root_compatible), %g1 | ||
301 | sethi %hi(prom_sun4v_name), %g7 | ||
302 | or %g7, %lo(prom_sun4v_name), %g7 | ||
303 | mov 5, %g3 | ||
304 | 1: ldub [%g7], %g2 | ||
305 | ldub [%g1], %g4 | ||
306 | cmp %g2, %g4 | ||
307 | bne,pn %icc, 2f | ||
308 | add %g7, 1, %g7 | ||
309 | subcc %g3, 1, %g3 | ||
310 | bne,pt %xcc, 1b | ||
311 | add %g1, 1, %g1 | ||
312 | |||
313 | sethi %hi(is_sun4v), %g1 | ||
314 | or %g1, %lo(is_sun4v), %g1 | ||
315 | mov 1, %g7 | ||
316 | stw %g7, [%g1] | ||
317 | |||
318 | 2: | ||
319 | BRANCH_IF_SUN4V(g1, jump_to_sun4u_init) | ||
241 | BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot) | 320 | BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot) |
242 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot) | 321 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot) |
243 | ba,pt %xcc, spitfire_boot | 322 | ba,pt %xcc, spitfire_boot |
@@ -301,20 +380,58 @@ jump_to_sun4u_init: | |||
301 | nop | 380 | nop |
302 | 381 | ||
303 | sun4u_init: | 382 | sun4u_init: |
383 | BRANCH_IF_SUN4V(g1, sun4v_init) | ||
384 | |||
304 | /* Set ctx 0 */ | 385 | /* Set ctx 0 */ |
305 | mov PRIMARY_CONTEXT, %g7 | 386 | mov PRIMARY_CONTEXT, %g7 |
306 | stxa %g0, [%g7] ASI_DMMU | 387 | stxa %g0, [%g7] ASI_DMMU |
307 | membar #Sync | 388 | membar #Sync |
308 | 389 | ||
309 | mov SECONDARY_CONTEXT, %g7 | 390 | mov SECONDARY_CONTEXT, %g7 |
310 | stxa %g0, [%g7] ASI_DMMU | 391 | stxa %g0, [%g7] ASI_DMMU |
311 | membar #Sync | 392 | membar #Sync |
312 | 393 | ||
313 | BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup) | 394 | ba,pt %xcc, sun4u_continue |
395 | nop | ||
396 | |||
397 | sun4v_init: | ||
398 | /* Set ctx 0 */ | ||
399 | mov PRIMARY_CONTEXT, %g7 | ||
400 | stxa %g0, [%g7] ASI_MMU | ||
401 | membar #Sync | ||
402 | |||
403 | mov SECONDARY_CONTEXT, %g7 | ||
404 | stxa %g0, [%g7] ASI_MMU | ||
405 | membar #Sync | ||
406 | ba,pt %xcc, niagara_tlb_fixup | ||
407 | nop | ||
408 | |||
409 | sun4u_continue: | ||
410 | BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup) | ||
314 | 411 | ||
315 | ba,pt %xcc, spitfire_tlb_fixup | 412 | ba,pt %xcc, spitfire_tlb_fixup |
316 | nop | 413 | nop |
317 | 414 | ||
415 | niagara_tlb_fixup: | ||
416 | mov 3, %g2 /* Set TLB type to hypervisor. */ | ||
417 | sethi %hi(tlb_type), %g1 | ||
418 | stw %g2, [%g1 + %lo(tlb_type)] | ||
419 | |||
420 | /* Patch copy/clear ops. */ | ||
421 | call niagara_patch_copyops | ||
422 | nop | ||
423 | call niagara_patch_bzero | ||
424 | nop | ||
425 | call niagara_patch_pageops | ||
426 | nop | ||
427 | |||
428 | /* Patch TLB/cache ops. */ | ||
429 | call hypervisor_patch_cachetlbops | ||
430 | nop | ||
431 | |||
432 | ba,pt %xcc, tlb_fixup_done | ||
433 | nop | ||
434 | |||
318 | cheetah_tlb_fixup: | 435 | cheetah_tlb_fixup: |
319 | mov 2, %g2 /* Set TLB type to cheetah+. */ | 436 | mov 2, %g2 /* Set TLB type to cheetah+. */ |
320 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) | 437 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) |
@@ -411,85 +528,55 @@ setup_trap_table: | |||
411 | wrpr %g0, 15, %pil | 528 | wrpr %g0, 15, %pil |
412 | 529 | ||
413 | /* Make the firmware call to jump over to the Linux trap table. */ | 530 | /* Make the firmware call to jump over to the Linux trap table. */ |
414 | call prom_set_trap_table | 531 | sethi %hi(is_sun4v), %o0 |
415 | sethi %hi(sparc64_ttable_tl0), %o0 | 532 | lduw [%o0 + %lo(is_sun4v)], %o0 |
533 | brz,pt %o0, 1f | ||
534 | nop | ||
416 | 535 | ||
417 | /* Start using proper page size encodings in ctx register. */ | 536 | TRAP_LOAD_TRAP_BLOCK(%g2, %g3) |
418 | sethi %hi(sparc64_kern_pri_context), %g3 | 537 | add %g2, TRAP_PER_CPU_FAULT_INFO, %g2 |
419 | ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 | 538 | stxa %g2, [%g0] ASI_SCRATCHPAD |
420 | mov PRIMARY_CONTEXT, %g1 | ||
421 | stxa %g2, [%g1] ASI_DMMU | ||
422 | membar #Sync | ||
423 | 539 | ||
424 | /* The Linux trap handlers expect various trap global registers | 540 | /* Compute physical address: |
425 | * to be setup with some fixed values. So here we set these | ||
426 | * up very carefully. These globals are: | ||
427 | * | ||
428 | * Alternate Globals (PSTATE_AG): | ||
429 | * | ||
430 | * %g6 --> current_thread_info() | ||
431 | * | ||
432 | * MMU Globals (PSTATE_MG): | ||
433 | * | ||
434 | * %g1 --> TLB_SFSR | ||
435 | * %g2 --> ((_PAGE_VALID | _PAGE_SZ4MB | | ||
436 | * _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) | ||
437 | * ^ 0xfffff80000000000) | ||
438 | * (this %g2 value is used for computing the PAGE_OFFSET kernel | ||
439 | * TLB entries quickly, the virtual address of the fault XOR'd | ||
440 | * with this %g2 value is the PTE to load into the TLB) | ||
441 | * %g3 --> VPTE_BASE_CHEETAH or VPTE_BASE_SPITFIRE | ||
442 | * | 541 | * |
443 | * Interrupt Globals (PSTATE_IG, setup by init_irqwork_curcpu()): | 542 | * paddr = kern_base + (mmfsa_vaddr - KERNBASE) |
444 | * | ||
445 | * %g6 --> __irq_work[smp_processor_id()] | ||
446 | */ | 543 | */ |
544 | sethi %hi(KERNBASE), %g3 | ||
545 | sub %g2, %g3, %g2 | ||
546 | sethi %hi(kern_base), %g3 | ||
547 | ldx [%g3 + %lo(kern_base)], %g3 | ||
548 | add %g2, %g3, %o1 | ||
447 | 549 | ||
448 | rdpr %pstate, %o1 | 550 | call prom_set_trap_table_sun4v |
449 | mov %g6, %o2 | 551 | sethi %hi(sparc64_ttable_tl0), %o0 |
450 | wrpr %o1, PSTATE_AG, %pstate | 552 | |
451 | mov %o2, %g6 | 553 | ba,pt %xcc, 2f |
452 | |||
453 | #define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000) | ||
454 | #define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) | ||
455 | wrpr %o1, PSTATE_MG, %pstate | ||
456 | mov TSB_REG, %g1 | ||
457 | stxa %g0, [%g1] ASI_DMMU | ||
458 | membar #Sync | ||
459 | stxa %g0, [%g1] ASI_IMMU | ||
460 | membar #Sync | ||
461 | mov TLB_SFSR, %g1 | ||
462 | sethi %uhi(KERN_HIGHBITS), %g2 | ||
463 | or %g2, %ulo(KERN_HIGHBITS), %g2 | ||
464 | sllx %g2, 32, %g2 | ||
465 | or %g2, KERN_LOWBITS, %g2 | ||
466 | |||
467 | BRANCH_IF_ANY_CHEETAH(g3,g7,8f) | ||
468 | ba,pt %xcc, 9f | ||
469 | nop | 554 | nop |
470 | 555 | ||
471 | 8: | 556 | 1: call prom_set_trap_table |
472 | sethi %uhi(VPTE_BASE_CHEETAH), %g3 | 557 | sethi %hi(sparc64_ttable_tl0), %o0 |
473 | or %g3, %ulo(VPTE_BASE_CHEETAH), %g3 | ||
474 | ba,pt %xcc, 2f | ||
475 | sllx %g3, 32, %g3 | ||
476 | 558 | ||
477 | 9: | 559 | /* Start using proper page size encodings in ctx register. */ |
478 | sethi %uhi(VPTE_BASE_SPITFIRE), %g3 | 560 | 2: sethi %hi(sparc64_kern_pri_context), %g3 |
479 | or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3 | 561 | ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 |
480 | sllx %g3, 32, %g3 | ||
481 | 562 | ||
482 | 2: | 563 | mov PRIMARY_CONTEXT, %g1 |
483 | clr %g7 | 564 | |
484 | #undef KERN_HIGHBITS | 565 | 661: stxa %g2, [%g1] ASI_DMMU |
485 | #undef KERN_LOWBITS | 566 | .section .sun4v_1insn_patch, "ax" |
567 | .word 661b | ||
568 | stxa %g2, [%g1] ASI_MMU | ||
569 | .previous | ||
570 | |||
571 | membar #Sync | ||
486 | 572 | ||
487 | /* Kill PROM timer */ | 573 | /* Kill PROM timer */ |
488 | sethi %hi(0x80000000), %o2 | 574 | sethi %hi(0x80000000), %o2 |
489 | sllx %o2, 32, %o2 | 575 | sllx %o2, 32, %o2 |
490 | wr %o2, 0, %tick_cmpr | 576 | wr %o2, 0, %tick_cmpr |
491 | 577 | ||
492 | BRANCH_IF_ANY_CHEETAH(o2,o3,1f) | 578 | BRANCH_IF_SUN4V(o2, 1f) |
579 | BRANCH_IF_ANY_CHEETAH(o2, o3, 1f) | ||
493 | 580 | ||
494 | ba,pt %xcc, 2f | 581 | ba,pt %xcc, 2f |
495 | nop | 582 | nop |
@@ -502,7 +589,6 @@ setup_trap_table: | |||
502 | 589 | ||
503 | 2: | 590 | 2: |
504 | wrpr %g0, %g0, %wstate | 591 | wrpr %g0, %g0, %wstate |
505 | wrpr %o1, 0x0, %pstate | ||
506 | 592 | ||
507 | call init_irqwork_curcpu | 593 | call init_irqwork_curcpu |
508 | nop | 594 | nop |
@@ -517,7 +603,7 @@ setup_trap_table: | |||
517 | restore | 603 | restore |
518 | 604 | ||
519 | .globl setup_tba | 605 | .globl setup_tba |
520 | setup_tba: /* i0 = is_starfire */ | 606 | setup_tba: |
521 | save %sp, -192, %sp | 607 | save %sp, -192, %sp |
522 | 608 | ||
523 | /* The boot processor is the only cpu which invokes this | 609 | /* The boot processor is the only cpu which invokes this |
@@ -536,31 +622,35 @@ setup_tba: /* i0 = is_starfire */ | |||
536 | restore | 622 | restore |
537 | sparc64_boot_end: | 623 | sparc64_boot_end: |
538 | 624 | ||
539 | #include "systbls.S" | ||
540 | #include "ktlb.S" | 625 | #include "ktlb.S" |
626 | #include "tsb.S" | ||
541 | #include "etrap.S" | 627 | #include "etrap.S" |
542 | #include "rtrap.S" | 628 | #include "rtrap.S" |
543 | #include "winfixup.S" | 629 | #include "winfixup.S" |
544 | #include "entry.S" | 630 | #include "entry.S" |
631 | #include "sun4v_tlb_miss.S" | ||
632 | #include "sun4v_ivec.S" | ||
545 | 633 | ||
546 | /* | 634 | /* |
547 | * The following skip makes sure the trap table in ttable.S is aligned | 635 | * The following skip makes sure the trap table in ttable.S is aligned |
548 | * on a 32K boundary as required by the v9 specs for TBA register. | 636 | * on a 32K boundary as required by the v9 specs for TBA register. |
637 | * | ||
638 | * We align to a 32K boundary, then we have the 32K kernel TSB, | ||
639 | * then the 32K aligned trap table. | ||
549 | */ | 640 | */ |
550 | 1: | 641 | 1: |
551 | .skip 0x4000 + _start - 1b | 642 | .skip 0x4000 + _start - 1b |
552 | 643 | ||
553 | #ifdef CONFIG_SBUS | 644 | .globl swapper_tsb |
554 | /* This is just a hack to fool make depend config.h discovering | 645 | swapper_tsb: |
555 | strategy: As the .S files below need config.h, but | 646 | .skip (32 * 1024) |
556 | make depend does not find it for them, we include config.h | ||
557 | in head.S */ | ||
558 | #endif | ||
559 | 647 | ||
560 | ! 0x0000000000408000 | 648 | ! 0x0000000000408000 |
561 | 649 | ||
562 | #include "ttable.S" | 650 | #include "ttable.S" |
563 | 651 | ||
652 | #include "systbls.S" | ||
653 | |||
564 | .data | 654 | .data |
565 | .align 8 | 655 | .align 8 |
566 | .globl prom_tba, tlb_type | 656 | .globl prom_tba, tlb_type |
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 233526ba3abe..8c93ba655b33 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
22 | #include <linux/proc_fs.h> | 22 | #include <linux/proc_fs.h> |
23 | #include <linux/seq_file.h> | 23 | #include <linux/seq_file.h> |
24 | #include <linux/bootmem.h> | ||
24 | 25 | ||
25 | #include <asm/ptrace.h> | 26 | #include <asm/ptrace.h> |
26 | #include <asm/processor.h> | 27 | #include <asm/processor.h> |
@@ -39,6 +40,7 @@ | |||
39 | #include <asm/cache.h> | 40 | #include <asm/cache.h> |
40 | #include <asm/cpudata.h> | 41 | #include <asm/cpudata.h> |
41 | #include <asm/auxio.h> | 42 | #include <asm/auxio.h> |
43 | #include <asm/head.h> | ||
42 | 44 | ||
43 | #ifdef CONFIG_SMP | 45 | #ifdef CONFIG_SMP |
44 | static void distribute_irqs(void); | 46 | static void distribute_irqs(void); |
@@ -136,12 +138,48 @@ out_unlock: | |||
136 | return 0; | 138 | return 0; |
137 | } | 139 | } |
138 | 140 | ||
141 | extern unsigned long real_hard_smp_processor_id(void); | ||
142 | |||
143 | static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) | ||
144 | { | ||
145 | unsigned int tid; | ||
146 | |||
147 | if (this_is_starfire) { | ||
148 | tid = starfire_translate(imap, cpuid); | ||
149 | tid <<= IMAP_TID_SHIFT; | ||
150 | tid &= IMAP_TID_UPA; | ||
151 | } else { | ||
152 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
153 | unsigned long ver; | ||
154 | |||
155 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | ||
156 | if ((ver >> 32UL) == __JALAPENO_ID || | ||
157 | (ver >> 32UL) == __SERRANO_ID) { | ||
158 | tid = cpuid << IMAP_TID_SHIFT; | ||
159 | tid &= IMAP_TID_JBUS; | ||
160 | } else { | ||
161 | unsigned int a = cpuid & 0x1f; | ||
162 | unsigned int n = (cpuid >> 5) & 0x1f; | ||
163 | |||
164 | tid = ((a << IMAP_AID_SHIFT) | | ||
165 | (n << IMAP_NID_SHIFT)); | ||
166 | tid &= (IMAP_AID_SAFARI | | ||
167 | IMAP_NID_SAFARI);; | ||
168 | } | ||
169 | } else { | ||
170 | tid = cpuid << IMAP_TID_SHIFT; | ||
171 | tid &= IMAP_TID_UPA; | ||
172 | } | ||
173 | } | ||
174 | |||
175 | return tid; | ||
176 | } | ||
177 | |||
139 | /* Now these are always passed a true fully specified sun4u INO. */ | 178 | /* Now these are always passed a true fully specified sun4u INO. */ |
140 | void enable_irq(unsigned int irq) | 179 | void enable_irq(unsigned int irq) |
141 | { | 180 | { |
142 | struct ino_bucket *bucket = __bucket(irq); | 181 | struct ino_bucket *bucket = __bucket(irq); |
143 | unsigned long imap; | 182 | unsigned long imap, cpuid; |
144 | unsigned long tid; | ||
145 | 183 | ||
146 | imap = bucket->imap; | 184 | imap = bucket->imap; |
147 | if (imap == 0UL) | 185 | if (imap == 0UL) |
@@ -149,47 +187,38 @@ void enable_irq(unsigned int irq) | |||
149 | 187 | ||
150 | preempt_disable(); | 188 | preempt_disable(); |
151 | 189 | ||
152 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { | 190 | /* This gets the physical processor ID, even on uniprocessor, |
153 | unsigned long ver; | 191 | * so we can always program the interrupt target correctly. |
154 | 192 | */ | |
155 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | 193 | cpuid = real_hard_smp_processor_id(); |
156 | if ((ver >> 32) == 0x003e0016) { | 194 | |
157 | /* We set it to our JBUS ID. */ | 195 | if (tlb_type == hypervisor) { |
158 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | 196 | unsigned int ino = __irq_ino(irq); |
159 | : "=r" (tid) | 197 | int err; |
160 | : "i" (ASI_JBUS_CONFIG)); | 198 | |
161 | tid = ((tid & (0x1fUL<<17)) << 9); | 199 | err = sun4v_intr_settarget(ino, cpuid); |
162 | tid &= IMAP_TID_JBUS; | 200 | if (err != HV_EOK) |
163 | } else { | 201 | printk("sun4v_intr_settarget(%x,%lu): err(%d)\n", |
164 | /* We set it to our Safari AID. */ | 202 | ino, cpuid, err); |
165 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | 203 | err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED); |
166 | : "=r" (tid) | 204 | if (err != HV_EOK) |
167 | : "i" (ASI_SAFARI_CONFIG)); | 205 | printk("sun4v_intr_setenabled(%x): err(%d)\n", |
168 | tid = ((tid & (0x3ffUL<<17)) << 9); | 206 | ino, err); |
169 | tid &= IMAP_AID_SAFARI; | ||
170 | } | ||
171 | } else if (this_is_starfire == 0) { | ||
172 | /* We set it to our UPA MID. */ | ||
173 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | ||
174 | : "=r" (tid) | ||
175 | : "i" (ASI_UPA_CONFIG)); | ||
176 | tid = ((tid & UPA_CONFIG_MID) << 9); | ||
177 | tid &= IMAP_TID_UPA; | ||
178 | } else { | 207 | } else { |
179 | tid = (starfire_translate(imap, smp_processor_id()) << 26); | 208 | unsigned int tid = sun4u_compute_tid(imap, cpuid); |
180 | tid &= IMAP_TID_UPA; | 209 | |
210 | /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product | ||
211 | * of this SYSIO's preconfigured IGN in the SYSIO Control | ||
212 | * Register, the hardware just mirrors that value here. | ||
213 | * However for Graphics and UPA Slave devices the full | ||
214 | * IMAP_INR field can be set by the programmer here. | ||
215 | * | ||
216 | * Things like FFB can now be handled via the new IRQ | ||
217 | * mechanism. | ||
218 | */ | ||
219 | upa_writel(tid | IMAP_VALID, imap); | ||
181 | } | 220 | } |
182 | 221 | ||
183 | /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product | ||
184 | * of this SYSIO's preconfigured IGN in the SYSIO Control | ||
185 | * Register, the hardware just mirrors that value here. | ||
186 | * However for Graphics and UPA Slave devices the full | ||
187 | * IMAP_INR field can be set by the programmer here. | ||
188 | * | ||
189 | * Things like FFB can now be handled via the new IRQ mechanism. | ||
190 | */ | ||
191 | upa_writel(tid | IMAP_VALID, imap); | ||
192 | |||
193 | preempt_enable(); | 222 | preempt_enable(); |
194 | } | 223 | } |
195 | 224 | ||
@@ -201,16 +230,26 @@ void disable_irq(unsigned int irq) | |||
201 | 230 | ||
202 | imap = bucket->imap; | 231 | imap = bucket->imap; |
203 | if (imap != 0UL) { | 232 | if (imap != 0UL) { |
204 | u32 tmp; | 233 | if (tlb_type == hypervisor) { |
234 | unsigned int ino = __irq_ino(irq); | ||
235 | int err; | ||
236 | |||
237 | err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); | ||
238 | if (err != HV_EOK) | ||
239 | printk("sun4v_intr_setenabled(%x): " | ||
240 | "err(%d)\n", ino, err); | ||
241 | } else { | ||
242 | u32 tmp; | ||
205 | 243 | ||
206 | /* NOTE: We do not want to futz with the IRQ clear registers | 244 | /* NOTE: We do not want to futz with the IRQ clear registers |
207 | * and move the state to IDLE, the SCSI code does call | 245 | * and move the state to IDLE, the SCSI code does call |
208 | * disable_irq() to assure atomicity in the queue cmd | 246 | * disable_irq() to assure atomicity in the queue cmd |
209 | * SCSI adapter driver code. Thus we'd lose interrupts. | 247 | * SCSI adapter driver code. Thus we'd lose interrupts. |
210 | */ | 248 | */ |
211 | tmp = upa_readl(imap); | 249 | tmp = upa_readl(imap); |
212 | tmp &= ~IMAP_VALID; | 250 | tmp &= ~IMAP_VALID; |
213 | upa_writel(tmp, imap); | 251 | upa_writel(tmp, imap); |
252 | } | ||
214 | } | 253 | } |
215 | } | 254 | } |
216 | 255 | ||
@@ -248,6 +287,8 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long | |||
248 | return __irq(&pil0_dummy_bucket); | 287 | return __irq(&pil0_dummy_bucket); |
249 | } | 288 | } |
250 | 289 | ||
290 | BUG_ON(tlb_type == hypervisor); | ||
291 | |||
251 | /* RULE: Both must be specified in all other cases. */ | 292 | /* RULE: Both must be specified in all other cases. */ |
252 | if (iclr == 0UL || imap == 0UL) { | 293 | if (iclr == 0UL || imap == 0UL) { |
253 | prom_printf("Invalid build_irq %d %d %016lx %016lx\n", | 294 | prom_printf("Invalid build_irq %d %d %016lx %016lx\n", |
@@ -275,12 +316,11 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long | |||
275 | goto out; | 316 | goto out; |
276 | } | 317 | } |
277 | 318 | ||
278 | bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC); | 319 | bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC); |
279 | if (!bucket->irq_info) { | 320 | if (!bucket->irq_info) { |
280 | prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n"); | 321 | prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n"); |
281 | prom_halt(); | 322 | prom_halt(); |
282 | } | 323 | } |
283 | memset(bucket->irq_info, 0, sizeof(struct irq_desc)); | ||
284 | 324 | ||
285 | /* Ok, looks good, set it up. Don't touch the irq_chain or | 325 | /* Ok, looks good, set it up. Don't touch the irq_chain or |
286 | * the pending flag. | 326 | * the pending flag. |
@@ -294,6 +334,37 @@ out: | |||
294 | return __irq(bucket); | 334 | return __irq(bucket); |
295 | } | 335 | } |
296 | 336 | ||
337 | unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsigned char flags) | ||
338 | { | ||
339 | struct ino_bucket *bucket; | ||
340 | unsigned long sysino; | ||
341 | |||
342 | sysino = sun4v_devino_to_sysino(devhandle, devino); | ||
343 | |||
344 | bucket = &ivector_table[sysino]; | ||
345 | |||
346 | /* Catch accidental accesses to these things. IMAP/ICLR handling | ||
347 | * is done by hypervisor calls on sun4v platforms, not by direct | ||
348 | * register accesses. | ||
349 | * | ||
350 | * But we need to make them look unique for the disable_irq() logic | ||
351 | * in free_irq(). | ||
352 | */ | ||
353 | bucket->imap = ~0UL - sysino; | ||
354 | bucket->iclr = ~0UL - sysino; | ||
355 | |||
356 | bucket->pil = pil; | ||
357 | bucket->flags = flags; | ||
358 | |||
359 | bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC); | ||
360 | if (!bucket->irq_info) { | ||
361 | prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n"); | ||
362 | prom_halt(); | ||
363 | } | ||
364 | |||
365 | return __irq(bucket); | ||
366 | } | ||
367 | |||
297 | static void atomic_bucket_insert(struct ino_bucket *bucket) | 368 | static void atomic_bucket_insert(struct ino_bucket *bucket) |
298 | { | 369 | { |
299 | unsigned long pstate; | 370 | unsigned long pstate; |
@@ -482,7 +553,6 @@ void free_irq(unsigned int irq, void *dev_id) | |||
482 | bucket = __bucket(irq); | 553 | bucket = __bucket(irq); |
483 | if (bucket != &pil0_dummy_bucket) { | 554 | if (bucket != &pil0_dummy_bucket) { |
484 | struct irq_desc *desc = bucket->irq_info; | 555 | struct irq_desc *desc = bucket->irq_info; |
485 | unsigned long imap = bucket->imap; | ||
486 | int ent, i; | 556 | int ent, i; |
487 | 557 | ||
488 | for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { | 558 | for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { |
@@ -495,6 +565,8 @@ void free_irq(unsigned int irq, void *dev_id) | |||
495 | } | 565 | } |
496 | 566 | ||
497 | if (!desc->action_active_mask) { | 567 | if (!desc->action_active_mask) { |
568 | unsigned long imap = bucket->imap; | ||
569 | |||
498 | /* This unique interrupt source is now inactive. */ | 570 | /* This unique interrupt source is now inactive. */ |
499 | bucket->flags &= ~IBF_ACTIVE; | 571 | bucket->flags &= ~IBF_ACTIVE; |
500 | 572 | ||
@@ -592,7 +664,18 @@ static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs) | |||
592 | break; | 664 | break; |
593 | } | 665 | } |
594 | if (bp->pil != 0) { | 666 | if (bp->pil != 0) { |
595 | upa_writel(ICLR_IDLE, bp->iclr); | 667 | if (tlb_type == hypervisor) { |
668 | unsigned int ino = __irq_ino(bp); | ||
669 | int err; | ||
670 | |||
671 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); | ||
672 | if (err != HV_EOK) | ||
673 | printk("sun4v_intr_setstate(%x): " | ||
674 | "err(%d)\n", ino, err); | ||
675 | } else { | ||
676 | upa_writel(ICLR_IDLE, bp->iclr); | ||
677 | } | ||
678 | |||
596 | /* Test and add entropy */ | 679 | /* Test and add entropy */ |
597 | if (random & SA_SAMPLE_RANDOM) | 680 | if (random & SA_SAMPLE_RANDOM) |
598 | add_interrupt_randomness(irq); | 681 | add_interrupt_randomness(irq); |
@@ -694,7 +777,7 @@ irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs) | |||
694 | val = readb(auxio_register); | 777 | val = readb(auxio_register); |
695 | val |= AUXIO_AUX1_FTCNT; | 778 | val |= AUXIO_AUX1_FTCNT; |
696 | writeb(val, auxio_register); | 779 | writeb(val, auxio_register); |
697 | val &= AUXIO_AUX1_FTCNT; | 780 | val &= ~AUXIO_AUX1_FTCNT; |
698 | writeb(val, auxio_register); | 781 | writeb(val, auxio_register); |
699 | 782 | ||
700 | doing_pdma = 0; | 783 | doing_pdma = 0; |
@@ -727,25 +810,23 @@ EXPORT_SYMBOL(probe_irq_off); | |||
727 | static int retarget_one_irq(struct irqaction *p, int goal_cpu) | 810 | static int retarget_one_irq(struct irqaction *p, int goal_cpu) |
728 | { | 811 | { |
729 | struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table; | 812 | struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table; |
730 | unsigned long imap = bucket->imap; | ||
731 | unsigned int tid; | ||
732 | 813 | ||
733 | while (!cpu_online(goal_cpu)) { | 814 | while (!cpu_online(goal_cpu)) { |
734 | if (++goal_cpu >= NR_CPUS) | 815 | if (++goal_cpu >= NR_CPUS) |
735 | goal_cpu = 0; | 816 | goal_cpu = 0; |
736 | } | 817 | } |
737 | 818 | ||
738 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { | 819 | if (tlb_type == hypervisor) { |
739 | tid = goal_cpu << 26; | 820 | unsigned int ino = __irq_ino(bucket); |
740 | tid &= IMAP_AID_SAFARI; | 821 | |
741 | } else if (this_is_starfire == 0) { | 822 | sun4v_intr_settarget(ino, goal_cpu); |
742 | tid = goal_cpu << 26; | 823 | sun4v_intr_setenabled(ino, HV_INTR_ENABLED); |
743 | tid &= IMAP_TID_UPA; | ||
744 | } else { | 824 | } else { |
745 | tid = (starfire_translate(imap, goal_cpu) << 26); | 825 | unsigned long imap = bucket->imap; |
746 | tid &= IMAP_TID_UPA; | 826 | unsigned int tid = sun4u_compute_tid(imap, goal_cpu); |
827 | |||
828 | upa_writel(tid | IMAP_VALID, imap); | ||
747 | } | 829 | } |
748 | upa_writel(tid | IMAP_VALID, imap); | ||
749 | 830 | ||
750 | do { | 831 | do { |
751 | if (++goal_cpu >= NR_CPUS) | 832 | if (++goal_cpu >= NR_CPUS) |
@@ -848,33 +929,114 @@ static void kill_prom_timer(void) | |||
848 | 929 | ||
849 | void init_irqwork_curcpu(void) | 930 | void init_irqwork_curcpu(void) |
850 | { | 931 | { |
851 | register struct irq_work_struct *workp asm("o2"); | ||
852 | register unsigned long tmp asm("o3"); | ||
853 | int cpu = hard_smp_processor_id(); | 932 | int cpu = hard_smp_processor_id(); |
854 | 933 | ||
855 | memset(__irq_work + cpu, 0, sizeof(*workp)); | 934 | memset(__irq_work + cpu, 0, sizeof(struct irq_work_struct)); |
856 | 935 | } | |
857 | /* Make sure we are called with PSTATE_IE disabled. */ | 936 | |
858 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" | 937 | static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type) |
859 | : "=r" (tmp)); | 938 | { |
860 | if (tmp & PSTATE_IE) { | 939 | unsigned long num_entries = 128; |
861 | prom_printf("BUG: init_irqwork_curcpu() called with " | 940 | unsigned long status; |
862 | "PSTATE_IE enabled, bailing.\n"); | 941 | |
863 | __asm__ __volatile__("mov %%i7, %0\n\t" | 942 | status = sun4v_cpu_qconf(type, paddr, num_entries); |
864 | : "=r" (tmp)); | 943 | if (status != HV_EOK) { |
865 | prom_printf("BUG: Called from %lx\n", tmp); | 944 | prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, " |
945 | "err %lu\n", type, paddr, num_entries, status); | ||
866 | prom_halt(); | 946 | prom_halt(); |
867 | } | 947 | } |
948 | } | ||
868 | 949 | ||
869 | /* Set interrupt globals. */ | 950 | static void __cpuinit sun4v_register_mondo_queues(int this_cpu) |
870 | workp = &__irq_work[cpu]; | 951 | { |
871 | __asm__ __volatile__( | 952 | struct trap_per_cpu *tb = &trap_block[this_cpu]; |
872 | "rdpr %%pstate, %0\n\t" | 953 | |
873 | "wrpr %0, %1, %%pstate\n\t" | 954 | register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO); |
874 | "mov %2, %%g6\n\t" | 955 | register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO); |
875 | "wrpr %0, 0x0, %%pstate\n\t" | 956 | register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR); |
876 | : "=&r" (tmp) | 957 | register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR); |
877 | : "i" (PSTATE_IG), "r" (workp)); | 958 | } |
959 | |||
960 | static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem) | ||
961 | { | ||
962 | void *page; | ||
963 | |||
964 | if (use_bootmem) | ||
965 | page = alloc_bootmem_low_pages(PAGE_SIZE); | ||
966 | else | ||
967 | page = (void *) get_zeroed_page(GFP_ATOMIC); | ||
968 | |||
969 | if (!page) { | ||
970 | prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); | ||
971 | prom_halt(); | ||
972 | } | ||
973 | |||
974 | *pa_ptr = __pa(page); | ||
975 | } | ||
976 | |||
977 | static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem) | ||
978 | { | ||
979 | void *page; | ||
980 | |||
981 | if (use_bootmem) | ||
982 | page = alloc_bootmem_low_pages(PAGE_SIZE); | ||
983 | else | ||
984 | page = (void *) get_zeroed_page(GFP_ATOMIC); | ||
985 | |||
986 | if (!page) { | ||
987 | prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); | ||
988 | prom_halt(); | ||
989 | } | ||
990 | |||
991 | *pa_ptr = __pa(page); | ||
992 | } | ||
993 | |||
994 | static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem) | ||
995 | { | ||
996 | #ifdef CONFIG_SMP | ||
997 | void *page; | ||
998 | |||
999 | BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); | ||
1000 | |||
1001 | if (use_bootmem) | ||
1002 | page = alloc_bootmem_low_pages(PAGE_SIZE); | ||
1003 | else | ||
1004 | page = (void *) get_zeroed_page(GFP_ATOMIC); | ||
1005 | |||
1006 | if (!page) { | ||
1007 | prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); | ||
1008 | prom_halt(); | ||
1009 | } | ||
1010 | |||
1011 | tb->cpu_mondo_block_pa = __pa(page); | ||
1012 | tb->cpu_list_pa = __pa(page + 64); | ||
1013 | #endif | ||
1014 | } | ||
1015 | |||
1016 | /* Allocate and register the mondo and error queues for this cpu. */ | ||
1017 | void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load) | ||
1018 | { | ||
1019 | struct trap_per_cpu *tb = &trap_block[cpu]; | ||
1020 | |||
1021 | if (alloc) { | ||
1022 | alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem); | ||
1023 | alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem); | ||
1024 | alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem); | ||
1025 | alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem); | ||
1026 | alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem); | ||
1027 | alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem); | ||
1028 | |||
1029 | init_cpu_send_mondo_info(tb, use_bootmem); | ||
1030 | } | ||
1031 | |||
1032 | if (load) { | ||
1033 | if (cpu != hard_smp_processor_id()) { | ||
1034 | prom_printf("SUN4V: init mondo on cpu %d not %d\n", | ||
1035 | cpu, hard_smp_processor_id()); | ||
1036 | prom_halt(); | ||
1037 | } | ||
1038 | sun4v_register_mondo_queues(cpu); | ||
1039 | } | ||
878 | } | 1040 | } |
879 | 1041 | ||
880 | /* Only invoked on boot processor. */ | 1042 | /* Only invoked on boot processor. */ |
@@ -884,6 +1046,9 @@ void __init init_IRQ(void) | |||
884 | kill_prom_timer(); | 1046 | kill_prom_timer(); |
885 | memset(&ivector_table[0], 0, sizeof(ivector_table)); | 1047 | memset(&ivector_table[0], 0, sizeof(ivector_table)); |
886 | 1048 | ||
1049 | if (tlb_type == hypervisor) | ||
1050 | sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1); | ||
1051 | |||
887 | /* We need to clear any IRQ's pending in the soft interrupt | 1052 | /* We need to clear any IRQ's pending in the soft interrupt |
888 | * registers, a spurious one could be left around from the | 1053 | * registers, a spurious one could be left around from the |
889 | * PROM timer which we just disabled. | 1054 | * PROM timer which we just disabled. |
diff --git a/arch/sparc64/kernel/itlb_base.S b/arch/sparc64/kernel/itlb_base.S deleted file mode 100644 index 4951ff8f6877..000000000000 --- a/arch/sparc64/kernel/itlb_base.S +++ /dev/null | |||
@@ -1,79 +0,0 @@ | |||
1 | /* $Id: itlb_base.S,v 1.12 2002/02/09 19:49:30 davem Exp $ | ||
2 | * itlb_base.S: Front end to ITLB miss replacement strategy. | ||
3 | * This is included directly into the trap table. | ||
4 | * | ||
5 | * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com) | ||
6 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz) | ||
7 | */ | ||
8 | |||
9 | #if PAGE_SHIFT == 13 | ||
10 | /* | ||
11 | * To compute vpte offset, we need to do ((addr >> 13) << 3), | ||
12 | * which can be optimized to (addr >> 10) if bits 10/11/12 can | ||
13 | * be guaranteed to be 0 ... mmu_context.h does guarantee this | ||
14 | * by only using 10 bits in the hwcontext value. | ||
15 | */ | ||
16 | #define CREATE_VPTE_OFFSET1(r1, r2) \ | ||
17 | srax r1, 10, r2 | ||
18 | #define CREATE_VPTE_OFFSET2(r1, r2) nop | ||
19 | #else /* PAGE_SHIFT */ | ||
20 | #define CREATE_VPTE_OFFSET1(r1, r2) \ | ||
21 | srax r1, PAGE_SHIFT, r2 | ||
22 | #define CREATE_VPTE_OFFSET2(r1, r2) \ | ||
23 | sllx r2, 3, r2 | ||
24 | #endif /* PAGE_SHIFT */ | ||
25 | |||
26 | |||
27 | /* Ways we can get here: | ||
28 | * | ||
29 | * 1) Nucleus instruction misses from module code. | ||
30 | * 2) All user instruction misses. | ||
31 | * | ||
32 | * All real page faults merge their code paths to the | ||
33 | * sparc64_realfault_common label below. | ||
34 | */ | ||
35 | |||
36 | /* ITLB ** ICACHE line 1: Quick user TLB misses */ | ||
37 | mov TLB_SFSR, %g1 | ||
38 | ldxa [%g1 + %g1] ASI_IMMU, %g4 ! Get TAG_ACCESS | ||
39 | CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset | ||
40 | CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset | ||
41 | ldxa [%g3 + %g6] ASI_P, %g5 ! Load VPTE | ||
42 | 1: brgez,pn %g5, 3f ! Not valid, branch out | ||
43 | sethi %hi(_PAGE_EXEC), %g4 ! Delay-slot | ||
44 | andcc %g5, %g4, %g0 ! Executable? | ||
45 | |||
46 | /* ITLB ** ICACHE line 2: Real faults */ | ||
47 | be,pn %xcc, 3f ! Nope, branch. | ||
48 | nop ! Delay-slot | ||
49 | 2: stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load PTE into TLB | ||
50 | retry ! Trap return | ||
51 | 3: rdpr %pstate, %g4 ! Move into alt-globals | ||
52 | wrpr %g4, PSTATE_AG|PSTATE_MG, %pstate | ||
53 | rdpr %tpc, %g5 ! And load faulting VA | ||
54 | mov FAULT_CODE_ITLB, %g4 ! It was read from ITLB | ||
55 | |||
56 | /* ITLB ** ICACHE line 3: Finish faults */ | ||
57 | sparc64_realfault_common: ! Called by dtlb_miss | ||
58 | stb %g4, [%g6 + TI_FAULT_CODE] | ||
59 | stx %g5, [%g6 + TI_FAULT_ADDR] | ||
60 | ba,pt %xcc, etrap ! Save state | ||
61 | 1: rd %pc, %g7 ! ... | ||
62 | call do_sparc64_fault ! Call fault handler | ||
63 | add %sp, PTREGS_OFF, %o0! Compute pt_regs arg | ||
64 | ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state | ||
65 | nop | ||
66 | |||
67 | /* ITLB ** ICACHE line 4: Window fixups */ | ||
68 | winfix_trampoline: | ||
69 | rdpr %tpc, %g3 ! Prepare winfixup TNPC | ||
70 | or %g3, 0x7c, %g3 ! Compute branch offset | ||
71 | wrpr %g3, %tnpc ! Write it into TNPC | ||
72 | done ! Do it to it | ||
73 | nop | ||
74 | nop | ||
75 | nop | ||
76 | nop | ||
77 | |||
78 | #undef CREATE_VPTE_OFFSET1 | ||
79 | #undef CREATE_VPTE_OFFSET2 | ||
diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S new file mode 100644 index 000000000000..ad46e2024f4b --- /dev/null +++ b/arch/sparc64/kernel/itlb_miss.S | |||
@@ -0,0 +1,39 @@ | |||
1 | /* ITLB ** ICACHE line 1: Context 0 check and TSB load */ | ||
2 | ldxa [%g0] ASI_IMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer | ||
3 | ldxa [%g0] ASI_IMMU, %g6 ! Get TAG TARGET | ||
4 | srlx %g6, 48, %g5 ! Get context | ||
5 | sllx %g6, 22, %g6 ! Zero out context | ||
6 | brz,pn %g5, kvmap_itlb ! Context 0 processing | ||
7 | srlx %g6, 22, %g6 ! Delay slot | ||
8 | TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry | ||
9 | cmp %g4, %g6 ! Compare TAG | ||
10 | |||
11 | /* ITLB ** ICACHE line 2: TSB compare and TLB load */ | ||
12 | bne,pn %xcc, tsb_miss_itlb ! Miss | ||
13 | mov FAULT_CODE_ITLB, %g3 | ||
14 | andcc %g5, _PAGE_EXEC_4U, %g0 ! Executable? | ||
15 | be,pn %xcc, tsb_do_fault | ||
16 | nop ! Delay slot, fill me | ||
17 | stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB | ||
18 | retry ! Trap done | ||
19 | nop | ||
20 | |||
21 | /* ITLB ** ICACHE line 3: */ | ||
22 | nop | ||
23 | nop | ||
24 | nop | ||
25 | nop | ||
26 | nop | ||
27 | nop | ||
28 | nop | ||
29 | nop | ||
30 | |||
31 | /* ITLB ** ICACHE line 4: */ | ||
32 | nop | ||
33 | nop | ||
34 | nop | ||
35 | nop | ||
36 | nop | ||
37 | nop | ||
38 | nop | ||
39 | nop | ||
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S index d9244d3c9f73..31da1e564c95 100644 --- a/arch/sparc64/kernel/ktlb.S +++ b/arch/sparc64/kernel/ktlb.S | |||
@@ -4,191 +4,276 @@ | |||
4 | * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de) | 4 | * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de) |
5 | * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) | 5 | * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) |
6 | * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | 6 | * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/config.h> | 9 | #include <linux/config.h> |
10 | #include <asm/head.h> | 10 | #include <asm/head.h> |
11 | #include <asm/asi.h> | 11 | #include <asm/asi.h> |
12 | #include <asm/page.h> | 12 | #include <asm/page.h> |
13 | #include <asm/pgtable.h> | 13 | #include <asm/pgtable.h> |
14 | #include <asm/tsb.h> | ||
14 | 15 | ||
15 | .text | 16 | .text |
16 | .align 32 | 17 | .align 32 |
17 | 18 | ||
18 | /* | 19 | kvmap_itlb: |
19 | * On a second level vpte miss, check whether the original fault is to the OBP | 20 | /* g6: TAG TARGET */ |
20 | * range (note that this is only possible for instruction miss, data misses to | 21 | mov TLB_TAG_ACCESS, %g4 |
21 | * obp range do not use vpte). If so, go back directly to the faulting address. | 22 | ldxa [%g4] ASI_IMMU, %g4 |
22 | * This is because we want to read the tpc, otherwise we have no way of knowing | 23 | |
23 | * the 8k aligned faulting address if we are using >8k kernel pagesize. This | 24 | /* sun4v_itlb_miss branches here with the missing virtual |
24 | * also ensures no vpte range addresses are dropped into tlb while obp is | 25 | * address already loaded into %g4 |
25 | * executing (see inherit_locked_prom_mappings() rant). | ||
26 | */ | ||
27 | sparc64_vpte_nucleus: | ||
28 | /* Note that kvmap below has verified that the address is | ||
29 | * in the range MODULES_VADDR --> VMALLOC_END already. So | ||
30 | * here we need only check if it is an OBP address or not. | ||
31 | */ | 26 | */ |
27 | kvmap_itlb_4v: | ||
28 | |||
29 | kvmap_itlb_nonlinear: | ||
30 | /* Catch kernel NULL pointer calls. */ | ||
31 | sethi %hi(PAGE_SIZE), %g5 | ||
32 | cmp %g4, %g5 | ||
33 | bleu,pn %xcc, kvmap_dtlb_longpath | ||
34 | nop | ||
35 | |||
36 | KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load) | ||
37 | |||
38 | kvmap_itlb_tsb_miss: | ||
32 | sethi %hi(LOW_OBP_ADDRESS), %g5 | 39 | sethi %hi(LOW_OBP_ADDRESS), %g5 |
33 | cmp %g4, %g5 | 40 | cmp %g4, %g5 |
34 | blu,pn %xcc, kern_vpte | 41 | blu,pn %xcc, kvmap_itlb_vmalloc_addr |
35 | mov 0x1, %g5 | 42 | mov 0x1, %g5 |
36 | sllx %g5, 32, %g5 | 43 | sllx %g5, 32, %g5 |
37 | cmp %g4, %g5 | 44 | cmp %g4, %g5 |
38 | blu,pn %xcc, vpte_insn_obp | 45 | blu,pn %xcc, kvmap_itlb_obp |
39 | nop | 46 | nop |
40 | 47 | ||
41 | /* These two instructions are patched by paginig_init(). */ | 48 | kvmap_itlb_vmalloc_addr: |
42 | kern_vpte: | 49 | KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) |
43 | sethi %hi(swapper_pgd_zero), %g5 | ||
44 | lduw [%g5 + %lo(swapper_pgd_zero)], %g5 | ||
45 | 50 | ||
46 | /* With kernel PGD in %g5, branch back into dtlb_backend. */ | 51 | KTSB_LOCK_TAG(%g1, %g2, %g7) |
47 | ba,pt %xcc, sparc64_kpte_continue | ||
48 | andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */ | ||
49 | 52 | ||
50 | vpte_noent: | 53 | /* Load and check PTE. */ |
51 | /* Restore previous TAG_ACCESS, %g5 is zero, and we will | 54 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 |
52 | * skip over the trap instruction so that the top level | 55 | mov 1, %g7 |
53 | * TLB miss handler will thing this %g5 value is just an | 56 | sllx %g7, TSB_TAG_INVALID_BIT, %g7 |
54 | * invalid PTE, thus branching to full fault processing. | 57 | brgez,a,pn %g5, kvmap_itlb_longpath |
55 | */ | 58 | KTSB_STORE(%g1, %g7) |
56 | mov TLB_SFSR, %g1 | 59 | |
57 | stxa %g4, [%g1 + %g1] ASI_DMMU | 60 | KTSB_WRITE(%g1, %g5, %g6) |
58 | done | 61 | |
59 | 62 | /* fallthrough to TLB load */ | |
60 | vpte_insn_obp: | ||
61 | /* Behave as if we are at TL0. */ | ||
62 | wrpr %g0, 1, %tl | ||
63 | rdpr %tpc, %g4 /* Find original faulting iaddr */ | ||
64 | srlx %g4, 13, %g4 /* Throw out context bits */ | ||
65 | sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */ | ||
66 | |||
67 | /* Restore previous TAG_ACCESS. */ | ||
68 | mov TLB_SFSR, %g1 | ||
69 | stxa %g4, [%g1 + %g1] ASI_IMMU | ||
70 | |||
71 | sethi %hi(prom_trans), %g5 | ||
72 | or %g5, %lo(prom_trans), %g5 | ||
73 | |||
74 | 1: ldx [%g5 + 0x00], %g6 ! base | ||
75 | brz,a,pn %g6, longpath ! no more entries, fail | ||
76 | mov TLB_SFSR, %g1 ! and restore %g1 | ||
77 | ldx [%g5 + 0x08], %g1 ! len | ||
78 | add %g6, %g1, %g1 ! end | ||
79 | cmp %g6, %g4 | ||
80 | bgu,pt %xcc, 2f | ||
81 | cmp %g4, %g1 | ||
82 | bgeu,pt %xcc, 2f | ||
83 | ldx [%g5 + 0x10], %g1 ! PTE | ||
84 | |||
85 | /* TLB load, restore %g1, and return from trap. */ | ||
86 | sub %g4, %g6, %g6 | ||
87 | add %g1, %g6, %g5 | ||
88 | mov TLB_SFSR, %g1 | ||
89 | stxa %g5, [%g0] ASI_ITLB_DATA_IN | ||
90 | retry | ||
91 | 63 | ||
92 | 2: ba,pt %xcc, 1b | 64 | kvmap_itlb_load: |
93 | add %g5, (3 * 8), %g5 ! next entry | 65 | |
94 | 66 | 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN | |
95 | kvmap_do_obp: | ||
96 | sethi %hi(prom_trans), %g5 | ||
97 | or %g5, %lo(prom_trans), %g5 | ||
98 | srlx %g4, 13, %g4 | ||
99 | sllx %g4, 13, %g4 | ||
100 | |||
101 | 1: ldx [%g5 + 0x00], %g6 ! base | ||
102 | brz,a,pn %g6, longpath ! no more entries, fail | ||
103 | mov TLB_SFSR, %g1 ! and restore %g1 | ||
104 | ldx [%g5 + 0x08], %g1 ! len | ||
105 | add %g6, %g1, %g1 ! end | ||
106 | cmp %g6, %g4 | ||
107 | bgu,pt %xcc, 2f | ||
108 | cmp %g4, %g1 | ||
109 | bgeu,pt %xcc, 2f | ||
110 | ldx [%g5 + 0x10], %g1 ! PTE | ||
111 | |||
112 | /* TLB load, restore %g1, and return from trap. */ | ||
113 | sub %g4, %g6, %g6 | ||
114 | add %g1, %g6, %g5 | ||
115 | mov TLB_SFSR, %g1 | ||
116 | stxa %g5, [%g0] ASI_DTLB_DATA_IN | ||
117 | retry | 67 | retry |
68 | .section .sun4v_2insn_patch, "ax" | ||
69 | .word 661b | ||
70 | nop | ||
71 | nop | ||
72 | .previous | ||
73 | |||
74 | /* For sun4v the ASI_ITLB_DATA_IN store and the retry | ||
75 | * instruction get nop'd out and we get here to branch | ||
76 | * to the sun4v tlb load code. The registers are setup | ||
77 | * as follows: | ||
78 | * | ||
79 | * %g4: vaddr | ||
80 | * %g5: PTE | ||
81 | * %g6: TAG | ||
82 | * | ||
83 | * The sun4v TLB load wants the PTE in %g3 so we fix that | ||
84 | * up here. | ||
85 | */ | ||
86 | ba,pt %xcc, sun4v_itlb_load | ||
87 | mov %g5, %g3 | ||
118 | 88 | ||
119 | 2: ba,pt %xcc, 1b | 89 | kvmap_itlb_longpath: |
120 | add %g5, (3 * 8), %g5 ! next entry | 90 | |
91 | 661: rdpr %pstate, %g5 | ||
92 | wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate | ||
93 | .section .sun4v_2insn_patch, "ax" | ||
94 | .word 661b | ||
95 | SET_GL(1) | ||
96 | nop | ||
97 | .previous | ||
98 | |||
99 | rdpr %tpc, %g5 | ||
100 | ba,pt %xcc, sparc64_realfault_common | ||
101 | mov FAULT_CODE_ITLB, %g4 | ||
102 | |||
103 | kvmap_itlb_obp: | ||
104 | OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath) | ||
105 | |||
106 | KTSB_LOCK_TAG(%g1, %g2, %g7) | ||
107 | |||
108 | KTSB_WRITE(%g1, %g5, %g6) | ||
109 | |||
110 | ba,pt %xcc, kvmap_itlb_load | ||
111 | nop | ||
112 | |||
113 | kvmap_dtlb_obp: | ||
114 | OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath) | ||
115 | |||
116 | KTSB_LOCK_TAG(%g1, %g2, %g7) | ||
117 | |||
118 | KTSB_WRITE(%g1, %g5, %g6) | ||
119 | |||
120 | ba,pt %xcc, kvmap_dtlb_load | ||
121 | nop | ||
121 | 122 | ||
122 | /* | ||
123 | * On a first level data miss, check whether this is to the OBP range (note | ||
124 | * that such accesses can be made by prom, as well as by kernel using | ||
125 | * prom_getproperty on "address"), and if so, do not use vpte access ... | ||
126 | * rather, use information saved during inherit_prom_mappings() using 8k | ||
127 | * pagesize. | ||
128 | */ | ||
129 | .align 32 | 123 | .align 32 |
130 | kvmap: | 124 | kvmap_dtlb_tsb4m_load: |
131 | brgez,pn %g4, kvmap_nonlinear | 125 | KTSB_LOCK_TAG(%g1, %g2, %g7) |
126 | KTSB_WRITE(%g1, %g5, %g6) | ||
127 | ba,pt %xcc, kvmap_dtlb_load | ||
132 | nop | 128 | nop |
133 | 129 | ||
134 | #ifdef CONFIG_DEBUG_PAGEALLOC | 130 | kvmap_dtlb: |
131 | /* %g6: TAG TARGET */ | ||
132 | mov TLB_TAG_ACCESS, %g4 | ||
133 | ldxa [%g4] ASI_DMMU, %g4 | ||
134 | |||
135 | /* sun4v_dtlb_miss branches here with the missing virtual | ||
136 | * address already loaded into %g4 | ||
137 | */ | ||
138 | kvmap_dtlb_4v: | ||
139 | brgez,pn %g4, kvmap_dtlb_nonlinear | ||
140 | nop | ||
141 | |||
142 | /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */ | ||
143 | KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) | ||
144 | |||
145 | /* TSB entry address left in %g1, lookup linear PTE. | ||
146 | * Must preserve %g1 and %g6 (TAG). | ||
147 | */ | ||
148 | kvmap_dtlb_tsb4m_miss: | ||
149 | sethi %hi(kpte_linear_bitmap), %g2 | ||
150 | or %g2, %lo(kpte_linear_bitmap), %g2 | ||
151 | |||
152 | /* Clear the PAGE_OFFSET top virtual bits, then shift | ||
153 | * down to get a 256MB physical address index. | ||
154 | */ | ||
155 | sllx %g4, 21, %g5 | ||
156 | mov 1, %g7 | ||
157 | srlx %g5, 21 + 28, %g5 | ||
158 | |||
159 | /* Don't try this at home kids... this depends upon srlx | ||
160 | * only taking the low 6 bits of the shift count in %g5. | ||
161 | */ | ||
162 | sllx %g7, %g5, %g7 | ||
163 | |||
164 | /* Divide by 64 to get the offset into the bitmask. */ | ||
165 | srlx %g5, 6, %g5 | ||
166 | sllx %g5, 3, %g5 | ||
167 | |||
168 | /* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */ | ||
169 | ldx [%g2 + %g5], %g2 | ||
170 | andcc %g2, %g7, %g0 | ||
171 | sethi %hi(kern_linear_pte_xor), %g5 | ||
172 | or %g5, %lo(kern_linear_pte_xor), %g5 | ||
173 | bne,a,pt %xcc, 1f | ||
174 | add %g5, 8, %g5 | ||
175 | |||
176 | 1: ldx [%g5], %g2 | ||
177 | |||
135 | .globl kvmap_linear_patch | 178 | .globl kvmap_linear_patch |
136 | kvmap_linear_patch: | 179 | kvmap_linear_patch: |
137 | #endif | 180 | ba,pt %xcc, kvmap_dtlb_tsb4m_load |
138 | ba,pt %xcc, kvmap_load | ||
139 | xor %g2, %g4, %g5 | 181 | xor %g2, %g4, %g5 |
140 | 182 | ||
141 | #ifdef CONFIG_DEBUG_PAGEALLOC | 183 | kvmap_dtlb_vmalloc_addr: |
142 | sethi %hi(swapper_pg_dir), %g5 | 184 | KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) |
143 | or %g5, %lo(swapper_pg_dir), %g5 | 185 | |
144 | sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6 | 186 | KTSB_LOCK_TAG(%g1, %g2, %g7) |
145 | srlx %g6, 64 - PAGE_SHIFT, %g6 | 187 | |
146 | andn %g6, 0x3, %g6 | 188 | /* Load and check PTE. */ |
147 | lduw [%g5 + %g6], %g5 | 189 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 |
148 | brz,pn %g5, longpath | 190 | mov 1, %g7 |
149 | sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6 | 191 | sllx %g7, TSB_TAG_INVALID_BIT, %g7 |
150 | srlx %g6, 64 - PAGE_SHIFT, %g6 | 192 | brgez,a,pn %g5, kvmap_dtlb_longpath |
151 | sllx %g5, 11, %g5 | 193 | KTSB_STORE(%g1, %g7) |
152 | andn %g6, 0x3, %g6 | 194 | |
153 | lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | 195 | KTSB_WRITE(%g1, %g5, %g6) |
154 | brz,pn %g5, longpath | 196 | |
155 | sllx %g4, 64 - PMD_SHIFT, %g6 | 197 | /* fallthrough to TLB load */ |
156 | srlx %g6, 64 - PAGE_SHIFT, %g6 | 198 | |
157 | sllx %g5, 11, %g5 | 199 | kvmap_dtlb_load: |
158 | andn %g6, 0x7, %g6 | 200 | |
159 | ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | 201 | 661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB |
160 | brz,pn %g5, longpath | 202 | retry |
203 | .section .sun4v_2insn_patch, "ax" | ||
204 | .word 661b | ||
205 | nop | ||
206 | nop | ||
207 | .previous | ||
208 | |||
209 | /* For sun4v the ASI_DTLB_DATA_IN store and the retry | ||
210 | * instruction get nop'd out and we get here to branch | ||
211 | * to the sun4v tlb load code. The registers are setup | ||
212 | * as follows: | ||
213 | * | ||
214 | * %g4: vaddr | ||
215 | * %g5: PTE | ||
216 | * %g6: TAG | ||
217 | * | ||
218 | * The sun4v TLB load wants the PTE in %g3 so we fix that | ||
219 | * up here. | ||
220 | */ | ||
221 | ba,pt %xcc, sun4v_dtlb_load | ||
222 | mov %g5, %g3 | ||
223 | |||
224 | kvmap_dtlb_nonlinear: | ||
225 | /* Catch kernel NULL pointer derefs. */ | ||
226 | sethi %hi(PAGE_SIZE), %g5 | ||
227 | cmp %g4, %g5 | ||
228 | bleu,pn %xcc, kvmap_dtlb_longpath | ||
161 | nop | 229 | nop |
162 | ba,a,pt %xcc, kvmap_load | ||
163 | #endif | ||
164 | 230 | ||
165 | kvmap_nonlinear: | 231 | KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) |
232 | |||
233 | kvmap_dtlb_tsbmiss: | ||
166 | sethi %hi(MODULES_VADDR), %g5 | 234 | sethi %hi(MODULES_VADDR), %g5 |
167 | cmp %g4, %g5 | 235 | cmp %g4, %g5 |
168 | blu,pn %xcc, longpath | 236 | blu,pn %xcc, kvmap_dtlb_longpath |
169 | mov (VMALLOC_END >> 24), %g5 | 237 | mov (VMALLOC_END >> 24), %g5 |
170 | sllx %g5, 24, %g5 | 238 | sllx %g5, 24, %g5 |
171 | cmp %g4, %g5 | 239 | cmp %g4, %g5 |
172 | bgeu,pn %xcc, longpath | 240 | bgeu,pn %xcc, kvmap_dtlb_longpath |
173 | nop | 241 | nop |
174 | 242 | ||
175 | kvmap_check_obp: | 243 | kvmap_check_obp: |
176 | sethi %hi(LOW_OBP_ADDRESS), %g5 | 244 | sethi %hi(LOW_OBP_ADDRESS), %g5 |
177 | cmp %g4, %g5 | 245 | cmp %g4, %g5 |
178 | blu,pn %xcc, kvmap_vmalloc_addr | 246 | blu,pn %xcc, kvmap_dtlb_vmalloc_addr |
179 | mov 0x1, %g5 | 247 | mov 0x1, %g5 |
180 | sllx %g5, 32, %g5 | 248 | sllx %g5, 32, %g5 |
181 | cmp %g4, %g5 | 249 | cmp %g4, %g5 |
182 | blu,pn %xcc, kvmap_do_obp | 250 | blu,pn %xcc, kvmap_dtlb_obp |
183 | nop | 251 | nop |
184 | 252 | ba,pt %xcc, kvmap_dtlb_vmalloc_addr | |
185 | kvmap_vmalloc_addr: | ||
186 | /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */ | ||
187 | ldxa [%g3 + %g6] ASI_N, %g5 | ||
188 | brgez,pn %g5, longpath | ||
189 | nop | 253 | nop |
190 | 254 | ||
191 | kvmap_load: | 255 | kvmap_dtlb_longpath: |
192 | /* PTE is valid, load into TLB and return from trap. */ | 256 | |
193 | stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB | 257 | 661: rdpr %pstate, %g5 |
194 | retry | 258 | wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate |
259 | .section .sun4v_2insn_patch, "ax" | ||
260 | .word 661b | ||
261 | SET_GL(1) | ||
262 | ldxa [%g0] ASI_SCRATCHPAD, %g5 | ||
263 | .previous | ||
264 | |||
265 | rdpr %tl, %g3 | ||
266 | cmp %g3, 1 | ||
267 | |||
268 | 661: mov TLB_TAG_ACCESS, %g4 | ||
269 | ldxa [%g4] ASI_DMMU, %g5 | ||
270 | .section .sun4v_2insn_patch, "ax" | ||
271 | .word 661b | ||
272 | ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5 | ||
273 | nop | ||
274 | .previous | ||
275 | |||
276 | be,pt %xcc, sparc64_realfault_common | ||
277 | mov FAULT_CODE_DTLB, %g4 | ||
278 | ba,pt %xcc, winfix_trampoline | ||
279 | nop | ||
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c index 2ff7c32ab0ce..95ffa9418620 100644 --- a/arch/sparc64/kernel/pci.c +++ b/arch/sparc64/kernel/pci.c | |||
@@ -188,6 +188,7 @@ extern void psycho_init(int, char *); | |||
188 | extern void schizo_init(int, char *); | 188 | extern void schizo_init(int, char *); |
189 | extern void schizo_plus_init(int, char *); | 189 | extern void schizo_plus_init(int, char *); |
190 | extern void tomatillo_init(int, char *); | 190 | extern void tomatillo_init(int, char *); |
191 | extern void sun4v_pci_init(int, char *); | ||
191 | 192 | ||
192 | static struct { | 193 | static struct { |
193 | char *model_name; | 194 | char *model_name; |
@@ -204,6 +205,7 @@ static struct { | |||
204 | { "pci108e,8002", schizo_plus_init }, | 205 | { "pci108e,8002", schizo_plus_init }, |
205 | { "SUNW,tomatillo", tomatillo_init }, | 206 | { "SUNW,tomatillo", tomatillo_init }, |
206 | { "pci108e,a801", tomatillo_init }, | 207 | { "pci108e,a801", tomatillo_init }, |
208 | { "SUNW,sun4v-pci", sun4v_pci_init }, | ||
207 | }; | 209 | }; |
208 | #define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \ | 210 | #define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \ |
209 | sizeof(pci_controller_table[0])) | 211 | sizeof(pci_controller_table[0])) |
@@ -283,6 +285,12 @@ int __init pcic_present(void) | |||
283 | return pci_controller_scan(pci_is_controller); | 285 | return pci_controller_scan(pci_is_controller); |
284 | } | 286 | } |
285 | 287 | ||
288 | struct pci_iommu_ops *pci_iommu_ops; | ||
289 | EXPORT_SYMBOL(pci_iommu_ops); | ||
290 | |||
291 | extern struct pci_iommu_ops pci_sun4u_iommu_ops, | ||
292 | pci_sun4v_iommu_ops; | ||
293 | |||
286 | /* Find each controller in the system, attach and initialize | 294 | /* Find each controller in the system, attach and initialize |
287 | * software state structure for each and link into the | 295 | * software state structure for each and link into the |
288 | * pci_controller_root. Setup the controller enough such | 296 | * pci_controller_root. Setup the controller enough such |
@@ -290,6 +298,11 @@ int __init pcic_present(void) | |||
290 | */ | 298 | */ |
291 | static void __init pci_controller_probe(void) | 299 | static void __init pci_controller_probe(void) |
292 | { | 300 | { |
301 | if (tlb_type == hypervisor) | ||
302 | pci_iommu_ops = &pci_sun4v_iommu_ops; | ||
303 | else | ||
304 | pci_iommu_ops = &pci_sun4u_iommu_ops; | ||
305 | |||
293 | printk("PCI: Probing for controllers.\n"); | 306 | printk("PCI: Probing for controllers.\n"); |
294 | 307 | ||
295 | pci_controller_scan(pci_controller_init); | 308 | pci_controller_scan(pci_controller_init); |
diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c index 58310aacea28..33dedb1aacd4 100644 --- a/arch/sparc64/kernel/pci_common.c +++ b/arch/sparc64/kernel/pci_common.c | |||
@@ -39,6 +39,8 @@ static int __init find_device_prom_node(struct pci_pbm_info *pbm, | |||
39 | { | 39 | { |
40 | int node; | 40 | int node; |
41 | 41 | ||
42 | *nregs = 0; | ||
43 | |||
42 | /* | 44 | /* |
43 | * Return the PBM's PROM node in case we are it's PCI device, | 45 | * Return the PBM's PROM node in case we are it's PCI device, |
44 | * as the PBM's reg property is different to standard PCI reg | 46 | * as the PBM's reg property is different to standard PCI reg |
@@ -51,10 +53,8 @@ static int __init find_device_prom_node(struct pci_pbm_info *pbm, | |||
51 | pdev->device == PCI_DEVICE_ID_SUN_SCHIZO || | 53 | pdev->device == PCI_DEVICE_ID_SUN_SCHIZO || |
52 | pdev->device == PCI_DEVICE_ID_SUN_TOMATILLO || | 54 | pdev->device == PCI_DEVICE_ID_SUN_TOMATILLO || |
53 | pdev->device == PCI_DEVICE_ID_SUN_SABRE || | 55 | pdev->device == PCI_DEVICE_ID_SUN_SABRE || |
54 | pdev->device == PCI_DEVICE_ID_SUN_HUMMINGBIRD)) { | 56 | pdev->device == PCI_DEVICE_ID_SUN_HUMMINGBIRD)) |
55 | *nregs = 0; | ||
56 | return bus_prom_node; | 57 | return bus_prom_node; |
57 | } | ||
58 | 58 | ||
59 | node = prom_getchild(bus_prom_node); | 59 | node = prom_getchild(bus_prom_node); |
60 | while (node != 0) { | 60 | while (node != 0) { |
@@ -541,135 +541,183 @@ void __init pci_assign_unassigned(struct pci_pbm_info *pbm, | |||
541 | pci_assign_unassigned(pbm, bus); | 541 | pci_assign_unassigned(pbm, bus); |
542 | } | 542 | } |
543 | 543 | ||
544 | static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt) | 544 | static inline unsigned int pci_slot_swivel(struct pci_pbm_info *pbm, |
545 | struct pci_dev *toplevel_pdev, | ||
546 | struct pci_dev *pdev, | ||
547 | unsigned int interrupt) | ||
545 | { | 548 | { |
546 | struct linux_prom_pci_intmap bridge_local_intmap[PROM_PCIIMAP_MAX], *intmap; | 549 | unsigned int ret; |
547 | struct linux_prom_pci_intmask bridge_local_intmask, *intmask; | ||
548 | struct pcidev_cookie *dev_pcp = pdev->sysdata; | ||
549 | struct pci_pbm_info *pbm = dev_pcp->pbm; | ||
550 | struct linux_prom_pci_registers *pregs = dev_pcp->prom_regs; | ||
551 | unsigned int hi, mid, lo, irq; | ||
552 | int i, num_intmap, map_slot; | ||
553 | 550 | ||
554 | intmap = &pbm->pbm_intmap[0]; | 551 | if (unlikely(interrupt < 1 || interrupt > 4)) { |
555 | intmask = &pbm->pbm_intmask; | 552 | printk("%s: Device %s interrupt value of %u is strange.\n", |
556 | num_intmap = pbm->num_pbm_intmap; | 553 | pbm->name, pci_name(pdev), interrupt); |
557 | map_slot = 0; | 554 | return interrupt; |
555 | } | ||
558 | 556 | ||
559 | /* If we are underneath a PCI bridge, use PROM register | 557 | ret = ((interrupt - 1 + (PCI_SLOT(pdev->devfn) & 3)) & 3) + 1; |
560 | * property of the parent bridge which is closest to | 558 | |
561 | * the PBM. | 559 | printk("%s: %s IRQ Swivel %s [%x:%x] -> [%x]\n", |
562 | * | 560 | pbm->name, pci_name(toplevel_pdev), pci_name(pdev), |
563 | * However if that parent bridge has interrupt map/mask | 561 | interrupt, PCI_SLOT(pdev->devfn), ret); |
564 | * properties of its own we use the PROM register property | 562 | |
565 | * of the next child device on the path to PDEV. | 563 | return ret; |
566 | * | 564 | } |
567 | * In detail the two cases are (note that the 'X' below is the | 565 | |
568 | * 'next child on the path to PDEV' mentioned above): | 566 | static inline unsigned int pci_apply_intmap(struct pci_pbm_info *pbm, |
569 | * | 567 | struct pci_dev *toplevel_pdev, |
570 | * 1) PBM --> PCI bus lacking int{map,mask} --> X ... PDEV | 568 | struct pci_dev *pbus, |
571 | * | 569 | struct pci_dev *pdev, |
572 | * Here we use regs of 'PCI bus' device. | 570 | unsigned int interrupt, |
573 | * | 571 | unsigned int *cnode) |
574 | * 2) PBM --> PCI bus with int{map,mask} --> X ... PDEV | 572 | { |
575 | * | 573 | struct linux_prom_pci_intmap imap[PROM_PCIIMAP_MAX]; |
576 | * Here we use regs of 'X'. Note that X can be PDEV. | 574 | struct linux_prom_pci_intmask imask; |
577 | */ | 575 | struct pcidev_cookie *pbus_pcp = pbus->sysdata; |
578 | if (pdev->bus->number != pbm->pci_first_busno) { | 576 | struct pcidev_cookie *pdev_pcp = pdev->sysdata; |
579 | struct pcidev_cookie *bus_pcp, *regs_pcp; | 577 | struct linux_prom_pci_registers *pregs = pdev_pcp->prom_regs; |
580 | struct pci_dev *bus_dev, *regs_dev; | 578 | int plen, num_imap, i; |
581 | int plen; | 579 | unsigned int hi, mid, lo, irq, orig_interrupt; |
580 | |||
581 | *cnode = pbus_pcp->prom_node; | ||
582 | |||
583 | plen = prom_getproperty(pbus_pcp->prom_node, "interrupt-map", | ||
584 | (char *) &imap[0], sizeof(imap)); | ||
585 | if (plen <= 0 || | ||
586 | (plen % sizeof(struct linux_prom_pci_intmap)) != 0) { | ||
587 | printk("%s: Device %s interrupt-map has bad len %d\n", | ||
588 | pbm->name, pci_name(pbus), plen); | ||
589 | goto no_intmap; | ||
590 | } | ||
591 | num_imap = plen / sizeof(struct linux_prom_pci_intmap); | ||
592 | |||
593 | plen = prom_getproperty(pbus_pcp->prom_node, "interrupt-map-mask", | ||
594 | (char *) &imask, sizeof(imask)); | ||
595 | if (plen <= 0 || | ||
596 | (plen % sizeof(struct linux_prom_pci_intmask)) != 0) { | ||
597 | printk("%s: Device %s interrupt-map-mask has bad len %d\n", | ||
598 | pbm->name, pci_name(pbus), plen); | ||
599 | goto no_intmap; | ||
600 | } | ||
601 | |||
602 | orig_interrupt = interrupt; | ||
582 | 603 | ||
583 | bus_dev = pdev->bus->self; | 604 | hi = pregs->phys_hi & imask.phys_hi; |
584 | regs_dev = pdev; | 605 | mid = pregs->phys_mid & imask.phys_mid; |
606 | lo = pregs->phys_lo & imask.phys_lo; | ||
607 | irq = interrupt & imask.interrupt; | ||
585 | 608 | ||
586 | while (bus_dev->bus && | 609 | for (i = 0; i < num_imap; i++) { |
587 | bus_dev->bus->number != pbm->pci_first_busno) { | 610 | if (imap[i].phys_hi == hi && |
588 | regs_dev = bus_dev; | 611 | imap[i].phys_mid == mid && |
589 | bus_dev = bus_dev->bus->self; | 612 | imap[i].phys_lo == lo && |
613 | imap[i].interrupt == irq) { | ||
614 | *cnode = imap[i].cnode; | ||
615 | interrupt = imap[i].cinterrupt; | ||
590 | } | 616 | } |
617 | } | ||
591 | 618 | ||
592 | regs_pcp = regs_dev->sysdata; | 619 | printk("%s: %s MAP BUS %s DEV %s [%x] -> [%x]\n", |
593 | pregs = regs_pcp->prom_regs; | 620 | pbm->name, pci_name(toplevel_pdev), |
621 | pci_name(pbus), pci_name(pdev), | ||
622 | orig_interrupt, interrupt); | ||
594 | 623 | ||
595 | bus_pcp = bus_dev->sysdata; | 624 | no_intmap: |
625 | return interrupt; | ||
626 | } | ||
596 | 627 | ||
597 | /* But if the PCI bridge has it's own interrupt map | 628 | /* For each PCI bus on the way to the root: |
598 | * and mask properties, use that and the regs of the | 629 | * 1) If it has an interrupt-map property, apply it. |
599 | * PCI entity at the next level down on the path to the | 630 | * 2) Else, swivel the interrupt number based upon the PCI device number. |
600 | * device. | 631 | * |
601 | */ | 632 | * Return the "IRQ controller" node. If this is the PBM's device node, |
602 | plen = prom_getproperty(bus_pcp->prom_node, "interrupt-map", | 633 | * all interrupt translations are complete, else we should use that node's |
603 | (char *) &bridge_local_intmap[0], | 634 | * "reg" property to apply the PBM's "interrupt-{map,mask}" to the interrupt. |
604 | sizeof(bridge_local_intmap)); | 635 | */ |
605 | if (plen != -1) { | 636 | static unsigned int __init pci_intmap_match_to_root(struct pci_pbm_info *pbm, |
606 | intmap = &bridge_local_intmap[0]; | 637 | struct pci_dev *pdev, |
607 | num_intmap = plen / sizeof(struct linux_prom_pci_intmap); | 638 | unsigned int *interrupt) |
608 | plen = prom_getproperty(bus_pcp->prom_node, | 639 | { |
609 | "interrupt-map-mask", | 640 | struct pci_dev *toplevel_pdev = pdev; |
610 | (char *) &bridge_local_intmask, | 641 | struct pcidev_cookie *toplevel_pcp = toplevel_pdev->sysdata; |
611 | sizeof(bridge_local_intmask)); | 642 | unsigned int cnode = toplevel_pcp->prom_node; |
612 | if (plen == -1) { | 643 | |
613 | printk("pci_intmap_match: Warning! Bridge has intmap " | 644 | while (pdev->bus->number != pbm->pci_first_busno) { |
614 | "but no intmask.\n"); | 645 | struct pci_dev *pbus = pdev->bus->self; |
615 | printk("pci_intmap_match: Trying to recover.\n"); | 646 | struct pcidev_cookie *pcp = pbus->sysdata; |
616 | return 0; | 647 | int plen; |
617 | } | ||
618 | 648 | ||
619 | if (pdev->bus->self != bus_dev) | 649 | plen = prom_getproplen(pcp->prom_node, "interrupt-map"); |
620 | map_slot = 1; | 650 | if (plen <= 0) { |
651 | *interrupt = pci_slot_swivel(pbm, toplevel_pdev, | ||
652 | pdev, *interrupt); | ||
653 | cnode = pcp->prom_node; | ||
621 | } else { | 654 | } else { |
622 | pregs = bus_pcp->prom_regs; | 655 | *interrupt = pci_apply_intmap(pbm, toplevel_pdev, |
623 | map_slot = 1; | 656 | pbus, pdev, |
657 | *interrupt, &cnode); | ||
658 | |||
659 | while (pcp->prom_node != cnode && | ||
660 | pbus->bus->number != pbm->pci_first_busno) { | ||
661 | pbus = pbus->bus->self; | ||
662 | pcp = pbus->sysdata; | ||
663 | } | ||
624 | } | 664 | } |
625 | } | 665 | pdev = pbus; |
626 | 666 | ||
627 | if (map_slot) { | 667 | if (cnode == pbm->prom_node) |
628 | *interrupt = ((*interrupt | 668 | break; |
629 | - 1 | ||
630 | + PCI_SLOT(pdev->devfn)) & 0x3) + 1; | ||
631 | } | 669 | } |
632 | 670 | ||
633 | hi = pregs->phys_hi & intmask->phys_hi; | 671 | return cnode; |
634 | mid = pregs->phys_mid & intmask->phys_mid; | 672 | } |
635 | lo = pregs->phys_lo & intmask->phys_lo; | 673 | |
636 | irq = *interrupt & intmask->interrupt; | 674 | static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt) |
637 | 675 | { | |
638 | for (i = 0; i < num_intmap; i++) { | 676 | struct pcidev_cookie *dev_pcp = pdev->sysdata; |
639 | if (intmap[i].phys_hi == hi && | 677 | struct pci_pbm_info *pbm = dev_pcp->pbm; |
640 | intmap[i].phys_mid == mid && | 678 | struct linux_prom_pci_registers reg[PROMREG_MAX]; |
641 | intmap[i].phys_lo == lo && | 679 | unsigned int hi, mid, lo, irq; |
642 | intmap[i].interrupt == irq) { | 680 | int i, cnode, plen; |
643 | *interrupt = intmap[i].cinterrupt; | 681 | |
644 | printk("PCI-IRQ: Routing bus[%2x] slot[%2x] map[%d] to INO[%02x]\n", | 682 | cnode = pci_intmap_match_to_root(pbm, pdev, interrupt); |
645 | pdev->bus->number, PCI_SLOT(pdev->devfn), | 683 | if (cnode == pbm->prom_node) |
646 | map_slot, *interrupt); | 684 | goto success; |
647 | return 1; | 685 | |
648 | } | 686 | plen = prom_getproperty(cnode, "reg", (char *) reg, sizeof(reg)); |
687 | if (plen <= 0 || | ||
688 | (plen % sizeof(struct linux_prom_pci_registers)) != 0) { | ||
689 | printk("%s: OBP node %x reg property has bad len %d\n", | ||
690 | pbm->name, cnode, plen); | ||
691 | goto fail; | ||
649 | } | 692 | } |
650 | 693 | ||
651 | /* We will run this code even if pbm->num_pbm_intmap is zero, just so | 694 | hi = reg[0].phys_hi & pbm->pbm_intmask.phys_hi; |
652 | * we can apply the slot mapping to the PROM interrupt property value. | 695 | mid = reg[0].phys_mid & pbm->pbm_intmask.phys_mid; |
653 | * So do not spit out these warnings in that case. | 696 | lo = reg[0].phys_lo & pbm->pbm_intmask.phys_lo; |
654 | */ | 697 | irq = *interrupt & pbm->pbm_intmask.interrupt; |
655 | if (num_intmap != 0) { | 698 | |
656 | /* Print it both to OBP console and kernel one so that if bootup | 699 | for (i = 0; i < pbm->num_pbm_intmap; i++) { |
657 | * hangs here the user has the information to report. | 700 | struct linux_prom_pci_intmap *intmap; |
658 | */ | 701 | |
659 | prom_printf("pci_intmap_match: bus %02x, devfn %02x: ", | 702 | intmap = &pbm->pbm_intmap[i]; |
660 | pdev->bus->number, pdev->devfn); | 703 | |
661 | prom_printf("IRQ [%08x.%08x.%08x.%08x] not found in interrupt-map\n", | 704 | if (intmap->phys_hi == hi && |
662 | pregs->phys_hi, pregs->phys_mid, pregs->phys_lo, *interrupt); | 705 | intmap->phys_mid == mid && |
663 | prom_printf("Please email this information to davem@redhat.com\n"); | 706 | intmap->phys_lo == lo && |
664 | 707 | intmap->interrupt == irq) { | |
665 | printk("pci_intmap_match: bus %02x, devfn %02x: ", | 708 | *interrupt = intmap->cinterrupt; |
666 | pdev->bus->number, pdev->devfn); | 709 | goto success; |
667 | printk("IRQ [%08x.%08x.%08x.%08x] not found in interrupt-map\n", | 710 | } |
668 | pregs->phys_hi, pregs->phys_mid, pregs->phys_lo, *interrupt); | ||
669 | printk("Please email this information to davem@redhat.com\n"); | ||
670 | } | 711 | } |
671 | 712 | ||
713 | fail: | ||
672 | return 0; | 714 | return 0; |
715 | |||
716 | success: | ||
717 | printk("PCI-IRQ: Routing bus[%2x] slot[%2x] to INO[%02x]\n", | ||
718 | pdev->bus->number, PCI_SLOT(pdev->devfn), | ||
719 | *interrupt); | ||
720 | return 1; | ||
673 | } | 721 | } |
674 | 722 | ||
675 | static void __init pdev_fixup_irq(struct pci_dev *pdev) | 723 | static void __init pdev_fixup_irq(struct pci_dev *pdev) |
@@ -703,16 +751,18 @@ static void __init pdev_fixup_irq(struct pci_dev *pdev) | |||
703 | return; | 751 | return; |
704 | } | 752 | } |
705 | 753 | ||
706 | /* Fully specified already? */ | 754 | if (tlb_type != hypervisor) { |
707 | if (((prom_irq & PCI_IRQ_IGN) >> 6) == portid) { | 755 | /* Fully specified already? */ |
708 | pdev->irq = p->irq_build(pbm, pdev, prom_irq); | 756 | if (((prom_irq & PCI_IRQ_IGN) >> 6) == portid) { |
709 | goto have_irq; | 757 | pdev->irq = p->irq_build(pbm, pdev, prom_irq); |
710 | } | 758 | goto have_irq; |
759 | } | ||
711 | 760 | ||
712 | /* An onboard device? (bit 5 set) */ | 761 | /* An onboard device? (bit 5 set) */ |
713 | if ((prom_irq & PCI_IRQ_INO) & 0x20) { | 762 | if ((prom_irq & PCI_IRQ_INO) & 0x20) { |
714 | pdev->irq = p->irq_build(pbm, pdev, (portid << 6 | prom_irq)); | 763 | pdev->irq = p->irq_build(pbm, pdev, (portid << 6 | prom_irq)); |
715 | goto have_irq; | 764 | goto have_irq; |
765 | } | ||
716 | } | 766 | } |
717 | 767 | ||
718 | /* Can we find a matching entry in the interrupt-map? */ | 768 | /* Can we find a matching entry in the interrupt-map? */ |
@@ -927,33 +977,30 @@ void pci_register_legacy_regions(struct resource *io_res, | |||
927 | struct resource *p; | 977 | struct resource *p; |
928 | 978 | ||
929 | /* VGA Video RAM. */ | 979 | /* VGA Video RAM. */ |
930 | p = kmalloc(sizeof(*p), GFP_KERNEL); | 980 | p = kzalloc(sizeof(*p), GFP_KERNEL); |
931 | if (!p) | 981 | if (!p) |
932 | return; | 982 | return; |
933 | 983 | ||
934 | memset(p, 0, sizeof(*p)); | ||
935 | p->name = "Video RAM area"; | 984 | p->name = "Video RAM area"; |
936 | p->start = mem_res->start + 0xa0000UL; | 985 | p->start = mem_res->start + 0xa0000UL; |
937 | p->end = p->start + 0x1ffffUL; | 986 | p->end = p->start + 0x1ffffUL; |
938 | p->flags = IORESOURCE_BUSY; | 987 | p->flags = IORESOURCE_BUSY; |
939 | request_resource(mem_res, p); | 988 | request_resource(mem_res, p); |
940 | 989 | ||
941 | p = kmalloc(sizeof(*p), GFP_KERNEL); | 990 | p = kzalloc(sizeof(*p), GFP_KERNEL); |
942 | if (!p) | 991 | if (!p) |
943 | return; | 992 | return; |
944 | 993 | ||
945 | memset(p, 0, sizeof(*p)); | ||
946 | p->name = "System ROM"; | 994 | p->name = "System ROM"; |
947 | p->start = mem_res->start + 0xf0000UL; | 995 | p->start = mem_res->start + 0xf0000UL; |
948 | p->end = p->start + 0xffffUL; | 996 | p->end = p->start + 0xffffUL; |
949 | p->flags = IORESOURCE_BUSY; | 997 | p->flags = IORESOURCE_BUSY; |
950 | request_resource(mem_res, p); | 998 | request_resource(mem_res, p); |
951 | 999 | ||
952 | p = kmalloc(sizeof(*p), GFP_KERNEL); | 1000 | p = kzalloc(sizeof(*p), GFP_KERNEL); |
953 | if (!p) | 1001 | if (!p) |
954 | return; | 1002 | return; |
955 | 1003 | ||
956 | memset(p, 0, sizeof(*p)); | ||
957 | p->name = "Video ROM"; | 1004 | p->name = "Video ROM"; |
958 | p->start = mem_res->start + 0xc0000UL; | 1005 | p->start = mem_res->start + 0xc0000UL; |
959 | p->end = p->start + 0x7fffUL; | 1006 | p->end = p->start + 0x7fffUL; |
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index a11910be1013..8efbc139769d 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c | |||
@@ -139,12 +139,11 @@ void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, | |||
139 | /* Allocate and initialize the free area map. */ | 139 | /* Allocate and initialize the free area map. */ |
140 | sz = num_tsb_entries / 8; | 140 | sz = num_tsb_entries / 8; |
141 | sz = (sz + 7UL) & ~7UL; | 141 | sz = (sz + 7UL) & ~7UL; |
142 | iommu->arena.map = kmalloc(sz, GFP_KERNEL); | 142 | iommu->arena.map = kzalloc(sz, GFP_KERNEL); |
143 | if (!iommu->arena.map) { | 143 | if (!iommu->arena.map) { |
144 | prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); | 144 | prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); |
145 | prom_halt(); | 145 | prom_halt(); |
146 | } | 146 | } |
147 | memset(iommu->arena.map, 0, sz); | ||
148 | iommu->arena.limit = num_tsb_entries; | 147 | iommu->arena.limit = num_tsb_entries; |
149 | 148 | ||
150 | /* Allocate and initialize the dummy page which we | 149 | /* Allocate and initialize the dummy page which we |
@@ -219,7 +218,7 @@ static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx) | |||
219 | * DMA for PCI device PDEV. Return non-NULL cpu-side address if | 218 | * DMA for PCI device PDEV. Return non-NULL cpu-side address if |
220 | * successful and set *DMA_ADDRP to the PCI side dma address. | 219 | * successful and set *DMA_ADDRP to the PCI side dma address. |
221 | */ | 220 | */ |
222 | void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) | 221 | static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) |
223 | { | 222 | { |
224 | struct pcidev_cookie *pcp; | 223 | struct pcidev_cookie *pcp; |
225 | struct pci_iommu *iommu; | 224 | struct pci_iommu *iommu; |
@@ -267,7 +266,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad | |||
267 | } | 266 | } |
268 | 267 | ||
269 | /* Free and unmap a consistent DMA translation. */ | 268 | /* Free and unmap a consistent DMA translation. */ |
270 | void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) | 269 | static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) |
271 | { | 270 | { |
272 | struct pcidev_cookie *pcp; | 271 | struct pcidev_cookie *pcp; |
273 | struct pci_iommu *iommu; | 272 | struct pci_iommu *iommu; |
@@ -294,7 +293,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_ | |||
294 | /* Map a single buffer at PTR of SZ bytes for PCI DMA | 293 | /* Map a single buffer at PTR of SZ bytes for PCI DMA |
295 | * in streaming mode. | 294 | * in streaming mode. |
296 | */ | 295 | */ |
297 | dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) | 296 | static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) |
298 | { | 297 | { |
299 | struct pcidev_cookie *pcp; | 298 | struct pcidev_cookie *pcp; |
300 | struct pci_iommu *iommu; | 299 | struct pci_iommu *iommu; |
@@ -415,7 +414,7 @@ do_flush_sync: | |||
415 | } | 414 | } |
416 | 415 | ||
417 | /* Unmap a single streaming mode DMA translation. */ | 416 | /* Unmap a single streaming mode DMA translation. */ |
418 | void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | 417 | static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) |
419 | { | 418 | { |
420 | struct pcidev_cookie *pcp; | 419 | struct pcidev_cookie *pcp; |
421 | struct pci_iommu *iommu; | 420 | struct pci_iommu *iommu; |
@@ -548,7 +547,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, | |||
548 | * When making changes here, inspect the assembly output. I was having | 547 | * When making changes here, inspect the assembly output. I was having |
549 | * hard time to kepp this routine out of using stack slots for holding variables. | 548 | * hard time to kepp this routine out of using stack slots for holding variables. |
550 | */ | 549 | */ |
551 | int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 550 | static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
552 | { | 551 | { |
553 | struct pcidev_cookie *pcp; | 552 | struct pcidev_cookie *pcp; |
554 | struct pci_iommu *iommu; | 553 | struct pci_iommu *iommu; |
@@ -562,9 +561,9 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int | |||
562 | /* Fast path single entry scatterlists. */ | 561 | /* Fast path single entry scatterlists. */ |
563 | if (nelems == 1) { | 562 | if (nelems == 1) { |
564 | sglist->dma_address = | 563 | sglist->dma_address = |
565 | pci_map_single(pdev, | 564 | pci_4u_map_single(pdev, |
566 | (page_address(sglist->page) + sglist->offset), | 565 | (page_address(sglist->page) + sglist->offset), |
567 | sglist->length, direction); | 566 | sglist->length, direction); |
568 | if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) | 567 | if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) |
569 | return 0; | 568 | return 0; |
570 | sglist->dma_length = sglist->length; | 569 | sglist->dma_length = sglist->length; |
@@ -635,7 +634,7 @@ bad_no_ctx: | |||
635 | } | 634 | } |
636 | 635 | ||
637 | /* Unmap a set of streaming mode DMA translations. */ | 636 | /* Unmap a set of streaming mode DMA translations. */ |
638 | void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 637 | static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
639 | { | 638 | { |
640 | struct pcidev_cookie *pcp; | 639 | struct pcidev_cookie *pcp; |
641 | struct pci_iommu *iommu; | 640 | struct pci_iommu *iommu; |
@@ -695,7 +694,7 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, | |||
695 | /* Make physical memory consistent for a single | 694 | /* Make physical memory consistent for a single |
696 | * streaming mode DMA translation after a transfer. | 695 | * streaming mode DMA translation after a transfer. |
697 | */ | 696 | */ |
698 | void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | 697 | static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) |
699 | { | 698 | { |
700 | struct pcidev_cookie *pcp; | 699 | struct pcidev_cookie *pcp; |
701 | struct pci_iommu *iommu; | 700 | struct pci_iommu *iommu; |
@@ -735,7 +734,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size | |||
735 | /* Make physical memory consistent for a set of streaming | 734 | /* Make physical memory consistent for a set of streaming |
736 | * mode DMA translations after a transfer. | 735 | * mode DMA translations after a transfer. |
737 | */ | 736 | */ |
738 | void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 737 | static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
739 | { | 738 | { |
740 | struct pcidev_cookie *pcp; | 739 | struct pcidev_cookie *pcp; |
741 | struct pci_iommu *iommu; | 740 | struct pci_iommu *iommu; |
@@ -776,6 +775,17 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i | |||
776 | spin_unlock_irqrestore(&iommu->lock, flags); | 775 | spin_unlock_irqrestore(&iommu->lock, flags); |
777 | } | 776 | } |
778 | 777 | ||
778 | struct pci_iommu_ops pci_sun4u_iommu_ops = { | ||
779 | .alloc_consistent = pci_4u_alloc_consistent, | ||
780 | .free_consistent = pci_4u_free_consistent, | ||
781 | .map_single = pci_4u_map_single, | ||
782 | .unmap_single = pci_4u_unmap_single, | ||
783 | .map_sg = pci_4u_map_sg, | ||
784 | .unmap_sg = pci_4u_unmap_sg, | ||
785 | .dma_sync_single_for_cpu = pci_4u_dma_sync_single_for_cpu, | ||
786 | .dma_sync_sg_for_cpu = pci_4u_dma_sync_sg_for_cpu, | ||
787 | }; | ||
788 | |||
779 | static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) | 789 | static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) |
780 | { | 790 | { |
781 | struct pci_dev *ali_isa_bridge; | 791 | struct pci_dev *ali_isa_bridge; |
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index c03ed5f49d31..d17878b145c2 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c | |||
@@ -286,17 +286,17 @@ static unsigned char psycho_pil_table[] = { | |||
286 | /*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */ | 286 | /*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */ |
287 | /*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */ | 287 | /*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */ |
288 | /*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */ | 288 | /*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */ |
289 | /*0x20*/4, /* SCSI */ | 289 | /*0x20*/5, /* SCSI */ |
290 | /*0x21*/5, /* Ethernet */ | 290 | /*0x21*/5, /* Ethernet */ |
291 | /*0x22*/8, /* Parallel Port */ | 291 | /*0x22*/8, /* Parallel Port */ |
292 | /*0x23*/13, /* Audio Record */ | 292 | /*0x23*/13, /* Audio Record */ |
293 | /*0x24*/14, /* Audio Playback */ | 293 | /*0x24*/14, /* Audio Playback */ |
294 | /*0x25*/15, /* PowerFail */ | 294 | /*0x25*/15, /* PowerFail */ |
295 | /*0x26*/4, /* second SCSI */ | 295 | /*0x26*/5, /* second SCSI */ |
296 | /*0x27*/11, /* Floppy */ | 296 | /*0x27*/11, /* Floppy */ |
297 | /*0x28*/4, /* Spare Hardware */ | 297 | /*0x28*/5, /* Spare Hardware */ |
298 | /*0x29*/9, /* Keyboard */ | 298 | /*0x29*/9, /* Keyboard */ |
299 | /*0x2a*/4, /* Mouse */ | 299 | /*0x2a*/5, /* Mouse */ |
300 | /*0x2b*/12, /* Serial */ | 300 | /*0x2b*/12, /* Serial */ |
301 | /*0x2c*/10, /* Timer 0 */ | 301 | /*0x2c*/10, /* Timer 0 */ |
302 | /*0x2d*/11, /* Timer 1 */ | 302 | /*0x2d*/11, /* Timer 1 */ |
@@ -313,11 +313,11 @@ static int psycho_ino_to_pil(struct pci_dev *pdev, unsigned int ino) | |||
313 | 313 | ||
314 | ret = psycho_pil_table[ino]; | 314 | ret = psycho_pil_table[ino]; |
315 | if (ret == 0 && pdev == NULL) { | 315 | if (ret == 0 && pdev == NULL) { |
316 | ret = 4; | 316 | ret = 5; |
317 | } else if (ret == 0) { | 317 | } else if (ret == 0) { |
318 | switch ((pdev->class >> 16) & 0xff) { | 318 | switch ((pdev->class >> 16) & 0xff) { |
319 | case PCI_BASE_CLASS_STORAGE: | 319 | case PCI_BASE_CLASS_STORAGE: |
320 | ret = 4; | 320 | ret = 5; |
321 | break; | 321 | break; |
322 | 322 | ||
323 | case PCI_BASE_CLASS_NETWORK: | 323 | case PCI_BASE_CLASS_NETWORK: |
@@ -336,7 +336,7 @@ static int psycho_ino_to_pil(struct pci_dev *pdev, unsigned int ino) | |||
336 | break; | 336 | break; |
337 | 337 | ||
338 | default: | 338 | default: |
339 | ret = 4; | 339 | ret = 5; |
340 | break; | 340 | break; |
341 | }; | 341 | }; |
342 | } | 342 | } |
@@ -1164,7 +1164,7 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm) | |||
1164 | static void pbm_scan_bus(struct pci_controller_info *p, | 1164 | static void pbm_scan_bus(struct pci_controller_info *p, |
1165 | struct pci_pbm_info *pbm) | 1165 | struct pci_pbm_info *pbm) |
1166 | { | 1166 | { |
1167 | struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL); | 1167 | struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); |
1168 | 1168 | ||
1169 | if (!cookie) { | 1169 | if (!cookie) { |
1170 | prom_printf("PSYCHO: Critical allocation failure.\n"); | 1170 | prom_printf("PSYCHO: Critical allocation failure.\n"); |
@@ -1172,7 +1172,6 @@ static void pbm_scan_bus(struct pci_controller_info *p, | |||
1172 | } | 1172 | } |
1173 | 1173 | ||
1174 | /* All we care about is the PBM. */ | 1174 | /* All we care about is the PBM. */ |
1175 | memset(cookie, 0, sizeof(*cookie)); | ||
1176 | cookie->pbm = pbm; | 1175 | cookie->pbm = pbm; |
1177 | 1176 | ||
1178 | pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, | 1177 | pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, |
@@ -1465,18 +1464,16 @@ void psycho_init(int node, char *model_name) | |||
1465 | } | 1464 | } |
1466 | } | 1465 | } |
1467 | 1466 | ||
1468 | p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); | 1467 | p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); |
1469 | if (!p) { | 1468 | if (!p) { |
1470 | prom_printf("PSYCHO: Fatal memory allocation error.\n"); | 1469 | prom_printf("PSYCHO: Fatal memory allocation error.\n"); |
1471 | prom_halt(); | 1470 | prom_halt(); |
1472 | } | 1471 | } |
1473 | memset(p, 0, sizeof(*p)); | 1472 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); |
1474 | iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | ||
1475 | if (!iommu) { | 1473 | if (!iommu) { |
1476 | prom_printf("PSYCHO: Fatal memory allocation error.\n"); | 1474 | prom_printf("PSYCHO: Fatal memory allocation error.\n"); |
1477 | prom_halt(); | 1475 | prom_halt(); |
1478 | } | 1476 | } |
1479 | memset(iommu, 0, sizeof(*iommu)); | ||
1480 | p->pbm_A.iommu = p->pbm_B.iommu = iommu; | 1477 | p->pbm_A.iommu = p->pbm_B.iommu = iommu; |
1481 | 1478 | ||
1482 | p->next = pci_controller_root; | 1479 | p->next = pci_controller_root; |
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index da8e1364194f..f67bb7f078cf 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c | |||
@@ -533,17 +533,17 @@ static unsigned char sabre_pil_table[] = { | |||
533 | /*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */ | 533 | /*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */ |
534 | /*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */ | 534 | /*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */ |
535 | /*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */ | 535 | /*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */ |
536 | /*0x20*/4, /* SCSI */ | 536 | /*0x20*/5, /* SCSI */ |
537 | /*0x21*/5, /* Ethernet */ | 537 | /*0x21*/5, /* Ethernet */ |
538 | /*0x22*/8, /* Parallel Port */ | 538 | /*0x22*/8, /* Parallel Port */ |
539 | /*0x23*/13, /* Audio Record */ | 539 | /*0x23*/13, /* Audio Record */ |
540 | /*0x24*/14, /* Audio Playback */ | 540 | /*0x24*/14, /* Audio Playback */ |
541 | /*0x25*/15, /* PowerFail */ | 541 | /*0x25*/15, /* PowerFail */ |
542 | /*0x26*/4, /* second SCSI */ | 542 | /*0x26*/5, /* second SCSI */ |
543 | /*0x27*/11, /* Floppy */ | 543 | /*0x27*/11, /* Floppy */ |
544 | /*0x28*/4, /* Spare Hardware */ | 544 | /*0x28*/5, /* Spare Hardware */ |
545 | /*0x29*/9, /* Keyboard */ | 545 | /*0x29*/9, /* Keyboard */ |
546 | /*0x2a*/4, /* Mouse */ | 546 | /*0x2a*/5, /* Mouse */ |
547 | /*0x2b*/12, /* Serial */ | 547 | /*0x2b*/12, /* Serial */ |
548 | /*0x2c*/10, /* Timer 0 */ | 548 | /*0x2c*/10, /* Timer 0 */ |
549 | /*0x2d*/11, /* Timer 1 */ | 549 | /*0x2d*/11, /* Timer 1 */ |
@@ -565,11 +565,11 @@ static int sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino) | |||
565 | 565 | ||
566 | ret = sabre_pil_table[ino]; | 566 | ret = sabre_pil_table[ino]; |
567 | if (ret == 0 && pdev == NULL) { | 567 | if (ret == 0 && pdev == NULL) { |
568 | ret = 4; | 568 | ret = 5; |
569 | } else if (ret == 0) { | 569 | } else if (ret == 0) { |
570 | switch ((pdev->class >> 16) & 0xff) { | 570 | switch ((pdev->class >> 16) & 0xff) { |
571 | case PCI_BASE_CLASS_STORAGE: | 571 | case PCI_BASE_CLASS_STORAGE: |
572 | ret = 4; | 572 | ret = 5; |
573 | break; | 573 | break; |
574 | 574 | ||
575 | case PCI_BASE_CLASS_NETWORK: | 575 | case PCI_BASE_CLASS_NETWORK: |
@@ -588,7 +588,7 @@ static int sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino) | |||
588 | break; | 588 | break; |
589 | 589 | ||
590 | default: | 590 | default: |
591 | ret = 4; | 591 | ret = 5; |
592 | break; | 592 | break; |
593 | }; | 593 | }; |
594 | } | 594 | } |
@@ -1167,7 +1167,7 @@ static void apb_init(struct pci_controller_info *p, struct pci_bus *sabre_bus) | |||
1167 | 1167 | ||
1168 | static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm) | 1168 | static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm) |
1169 | { | 1169 | { |
1170 | struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL); | 1170 | struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); |
1171 | 1171 | ||
1172 | if (!cookie) { | 1172 | if (!cookie) { |
1173 | prom_printf("SABRE: Critical allocation failure.\n"); | 1173 | prom_printf("SABRE: Critical allocation failure.\n"); |
@@ -1175,7 +1175,6 @@ static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm) | |||
1175 | } | 1175 | } |
1176 | 1176 | ||
1177 | /* All we care about is the PBM. */ | 1177 | /* All we care about is the PBM. */ |
1178 | memset(cookie, 0, sizeof(*cookie)); | ||
1179 | cookie->pbm = pbm; | 1178 | cookie->pbm = pbm; |
1180 | 1179 | ||
1181 | return cookie; | 1180 | return cookie; |
@@ -1556,19 +1555,17 @@ void sabre_init(int pnode, char *model_name) | |||
1556 | } | 1555 | } |
1557 | } | 1556 | } |
1558 | 1557 | ||
1559 | p = kmalloc(sizeof(*p), GFP_ATOMIC); | 1558 | p = kzalloc(sizeof(*p), GFP_ATOMIC); |
1560 | if (!p) { | 1559 | if (!p) { |
1561 | prom_printf("SABRE: Error, kmalloc(pci_controller_info) failed.\n"); | 1560 | prom_printf("SABRE: Error, kmalloc(pci_controller_info) failed.\n"); |
1562 | prom_halt(); | 1561 | prom_halt(); |
1563 | } | 1562 | } |
1564 | memset(p, 0, sizeof(*p)); | ||
1565 | 1563 | ||
1566 | iommu = kmalloc(sizeof(*iommu), GFP_ATOMIC); | 1564 | iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC); |
1567 | if (!iommu) { | 1565 | if (!iommu) { |
1568 | prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n"); | 1566 | prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n"); |
1569 | prom_halt(); | 1567 | prom_halt(); |
1570 | } | 1568 | } |
1571 | memset(iommu, 0, sizeof(*iommu)); | ||
1572 | p->pbm_A.iommu = p->pbm_B.iommu = iommu; | 1569 | p->pbm_A.iommu = p->pbm_B.iommu = iommu; |
1573 | 1570 | ||
1574 | upa_portid = prom_getintdefault(pnode, "upa-portid", 0xff); | 1571 | upa_portid = prom_getintdefault(pnode, "upa-portid", 0xff); |
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index d8c4e0919b4e..7fe4de03ac2e 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c | |||
@@ -243,8 +243,8 @@ static unsigned char schizo_pil_table[] = { | |||
243 | /*0x0c*/0, 0, 0, 0, /* PCI slot 3 Int A, B, C, D */ | 243 | /*0x0c*/0, 0, 0, 0, /* PCI slot 3 Int A, B, C, D */ |
244 | /*0x10*/0, 0, 0, 0, /* PCI slot 4 Int A, B, C, D */ | 244 | /*0x10*/0, 0, 0, 0, /* PCI slot 4 Int A, B, C, D */ |
245 | /*0x14*/0, 0, 0, 0, /* PCI slot 5 Int A, B, C, D */ | 245 | /*0x14*/0, 0, 0, 0, /* PCI slot 5 Int A, B, C, D */ |
246 | /*0x18*/4, /* SCSI */ | 246 | /*0x18*/5, /* SCSI */ |
247 | /*0x19*/4, /* second SCSI */ | 247 | /*0x19*/5, /* second SCSI */ |
248 | /*0x1a*/0, /* UNKNOWN */ | 248 | /*0x1a*/0, /* UNKNOWN */ |
249 | /*0x1b*/0, /* UNKNOWN */ | 249 | /*0x1b*/0, /* UNKNOWN */ |
250 | /*0x1c*/8, /* Parallel */ | 250 | /*0x1c*/8, /* Parallel */ |
@@ -254,7 +254,7 @@ static unsigned char schizo_pil_table[] = { | |||
254 | /*0x20*/13, /* Audio Record */ | 254 | /*0x20*/13, /* Audio Record */ |
255 | /*0x21*/14, /* Audio Playback */ | 255 | /*0x21*/14, /* Audio Playback */ |
256 | /*0x22*/12, /* Serial */ | 256 | /*0x22*/12, /* Serial */ |
257 | /*0x23*/4, /* EBUS I2C */ | 257 | /*0x23*/5, /* EBUS I2C */ |
258 | /*0x24*/10, /* RTC Clock */ | 258 | /*0x24*/10, /* RTC Clock */ |
259 | /*0x25*/11, /* Floppy */ | 259 | /*0x25*/11, /* Floppy */ |
260 | /*0x26*/0, /* UNKNOWN */ | 260 | /*0x26*/0, /* UNKNOWN */ |
@@ -296,11 +296,11 @@ static int schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino) | |||
296 | 296 | ||
297 | ret = schizo_pil_table[ino]; | 297 | ret = schizo_pil_table[ino]; |
298 | if (ret == 0 && pdev == NULL) { | 298 | if (ret == 0 && pdev == NULL) { |
299 | ret = 4; | 299 | ret = 5; |
300 | } else if (ret == 0) { | 300 | } else if (ret == 0) { |
301 | switch ((pdev->class >> 16) & 0xff) { | 301 | switch ((pdev->class >> 16) & 0xff) { |
302 | case PCI_BASE_CLASS_STORAGE: | 302 | case PCI_BASE_CLASS_STORAGE: |
303 | ret = 4; | 303 | ret = 5; |
304 | break; | 304 | break; |
305 | 305 | ||
306 | case PCI_BASE_CLASS_NETWORK: | 306 | case PCI_BASE_CLASS_NETWORK: |
@@ -319,7 +319,7 @@ static int schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino) | |||
319 | break; | 319 | break; |
320 | 320 | ||
321 | default: | 321 | default: |
322 | ret = 4; | 322 | ret = 5; |
323 | break; | 323 | break; |
324 | }; | 324 | }; |
325 | } | 325 | } |
@@ -1525,7 +1525,7 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm) | |||
1525 | static void pbm_scan_bus(struct pci_controller_info *p, | 1525 | static void pbm_scan_bus(struct pci_controller_info *p, |
1526 | struct pci_pbm_info *pbm) | 1526 | struct pci_pbm_info *pbm) |
1527 | { | 1527 | { |
1528 | struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL); | 1528 | struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); |
1529 | 1529 | ||
1530 | if (!cookie) { | 1530 | if (!cookie) { |
1531 | prom_printf("%s: Critical allocation failure.\n", pbm->name); | 1531 | prom_printf("%s: Critical allocation failure.\n", pbm->name); |
@@ -1533,7 +1533,6 @@ static void pbm_scan_bus(struct pci_controller_info *p, | |||
1533 | } | 1533 | } |
1534 | 1534 | ||
1535 | /* All we care about is the PBM. */ | 1535 | /* All we care about is the PBM. */ |
1536 | memset(cookie, 0, sizeof(*cookie)); | ||
1537 | cookie->pbm = pbm; | 1536 | cookie->pbm = pbm; |
1538 | 1537 | ||
1539 | pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, | 1538 | pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, |
@@ -2120,27 +2119,24 @@ static void __schizo_init(int node, char *model_name, int chip_type) | |||
2120 | } | 2119 | } |
2121 | } | 2120 | } |
2122 | 2121 | ||
2123 | p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); | 2122 | p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); |
2124 | if (!p) { | 2123 | if (!p) { |
2125 | prom_printf("SCHIZO: Fatal memory allocation error.\n"); | 2124 | prom_printf("SCHIZO: Fatal memory allocation error.\n"); |
2126 | prom_halt(); | 2125 | prom_halt(); |
2127 | } | 2126 | } |
2128 | memset(p, 0, sizeof(*p)); | ||
2129 | 2127 | ||
2130 | iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | 2128 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); |
2131 | if (!iommu) { | 2129 | if (!iommu) { |
2132 | prom_printf("SCHIZO: Fatal memory allocation error.\n"); | 2130 | prom_printf("SCHIZO: Fatal memory allocation error.\n"); |
2133 | prom_halt(); | 2131 | prom_halt(); |
2134 | } | 2132 | } |
2135 | memset(iommu, 0, sizeof(*iommu)); | ||
2136 | p->pbm_A.iommu = iommu; | 2133 | p->pbm_A.iommu = iommu; |
2137 | 2134 | ||
2138 | iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | 2135 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); |
2139 | if (!iommu) { | 2136 | if (!iommu) { |
2140 | prom_printf("SCHIZO: Fatal memory allocation error.\n"); | 2137 | prom_printf("SCHIZO: Fatal memory allocation error.\n"); |
2141 | prom_halt(); | 2138 | prom_halt(); |
2142 | } | 2139 | } |
2143 | memset(iommu, 0, sizeof(*iommu)); | ||
2144 | p->pbm_B.iommu = iommu; | 2140 | p->pbm_B.iommu = iommu; |
2145 | 2141 | ||
2146 | p->next = pci_controller_root; | 2142 | p->next = pci_controller_root; |
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c new file mode 100644 index 000000000000..9372d4f376d5 --- /dev/null +++ b/arch/sparc64/kernel/pci_sun4v.c | |||
@@ -0,0 +1,1147 @@ | |||
1 | /* pci_sun4v.c: SUN4V specific PCI controller support. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/pci.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/percpu.h> | ||
13 | |||
14 | #include <asm/pbm.h> | ||
15 | #include <asm/iommu.h> | ||
16 | #include <asm/irq.h> | ||
17 | #include <asm/upa.h> | ||
18 | #include <asm/pstate.h> | ||
19 | #include <asm/oplib.h> | ||
20 | #include <asm/hypervisor.h> | ||
21 | |||
22 | #include "pci_impl.h" | ||
23 | #include "iommu_common.h" | ||
24 | |||
25 | #include "pci_sun4v.h" | ||
26 | |||
27 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) | ||
28 | |||
29 | struct pci_iommu_batch { | ||
30 | struct pci_dev *pdev; /* Device mapping is for. */ | ||
31 | unsigned long prot; /* IOMMU page protections */ | ||
32 | unsigned long entry; /* Index into IOTSB. */ | ||
33 | u64 *pglist; /* List of physical pages */ | ||
34 | unsigned long npages; /* Number of pages in list. */ | ||
35 | }; | ||
36 | |||
37 | static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch); | ||
38 | |||
39 | /* Interrupts must be disabled. */ | ||
40 | static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) | ||
41 | { | ||
42 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | ||
43 | |||
44 | p->pdev = pdev; | ||
45 | p->prot = prot; | ||
46 | p->entry = entry; | ||
47 | p->npages = 0; | ||
48 | } | ||
49 | |||
50 | /* Interrupts must be disabled. */ | ||
51 | static long pci_iommu_batch_flush(struct pci_iommu_batch *p) | ||
52 | { | ||
53 | struct pcidev_cookie *pcp = p->pdev->sysdata; | ||
54 | unsigned long devhandle = pcp->pbm->devhandle; | ||
55 | unsigned long prot = p->prot; | ||
56 | unsigned long entry = p->entry; | ||
57 | u64 *pglist = p->pglist; | ||
58 | unsigned long npages = p->npages; | ||
59 | |||
60 | while (npages != 0) { | ||
61 | long num; | ||
62 | |||
63 | num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), | ||
64 | npages, prot, __pa(pglist)); | ||
65 | if (unlikely(num < 0)) { | ||
66 | if (printk_ratelimit()) | ||
67 | printk("pci_iommu_batch_flush: IOMMU map of " | ||
68 | "[%08lx:%08lx:%lx:%lx:%lx] failed with " | ||
69 | "status %ld\n", | ||
70 | devhandle, HV_PCI_TSBID(0, entry), | ||
71 | npages, prot, __pa(pglist), num); | ||
72 | return -1; | ||
73 | } | ||
74 | |||
75 | entry += num; | ||
76 | npages -= num; | ||
77 | pglist += num; | ||
78 | } | ||
79 | |||
80 | p->entry = entry; | ||
81 | p->npages = 0; | ||
82 | |||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | /* Interrupts must be disabled. */ | ||
87 | static inline long pci_iommu_batch_add(u64 phys_page) | ||
88 | { | ||
89 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | ||
90 | |||
91 | BUG_ON(p->npages >= PGLIST_NENTS); | ||
92 | |||
93 | p->pglist[p->npages++] = phys_page; | ||
94 | if (p->npages == PGLIST_NENTS) | ||
95 | return pci_iommu_batch_flush(p); | ||
96 | |||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | /* Interrupts must be disabled. */ | ||
101 | static inline long pci_iommu_batch_end(void) | ||
102 | { | ||
103 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | ||
104 | |||
105 | BUG_ON(p->npages >= PGLIST_NENTS); | ||
106 | |||
107 | return pci_iommu_batch_flush(p); | ||
108 | } | ||
109 | |||
110 | static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages) | ||
111 | { | ||
112 | unsigned long n, i, start, end, limit; | ||
113 | int pass; | ||
114 | |||
115 | limit = arena->limit; | ||
116 | start = arena->hint; | ||
117 | pass = 0; | ||
118 | |||
119 | again: | ||
120 | n = find_next_zero_bit(arena->map, limit, start); | ||
121 | end = n + npages; | ||
122 | if (unlikely(end >= limit)) { | ||
123 | if (likely(pass < 1)) { | ||
124 | limit = start; | ||
125 | start = 0; | ||
126 | pass++; | ||
127 | goto again; | ||
128 | } else { | ||
129 | /* Scanned the whole thing, give up. */ | ||
130 | return -1; | ||
131 | } | ||
132 | } | ||
133 | |||
134 | for (i = n; i < end; i++) { | ||
135 | if (test_bit(i, arena->map)) { | ||
136 | start = i + 1; | ||
137 | goto again; | ||
138 | } | ||
139 | } | ||
140 | |||
141 | for (i = n; i < end; i++) | ||
142 | __set_bit(i, arena->map); | ||
143 | |||
144 | arena->hint = end; | ||
145 | |||
146 | return n; | ||
147 | } | ||
148 | |||
149 | static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages) | ||
150 | { | ||
151 | unsigned long i; | ||
152 | |||
153 | for (i = base; i < (base + npages); i++) | ||
154 | __clear_bit(i, arena->map); | ||
155 | } | ||
156 | |||
157 | static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) | ||
158 | { | ||
159 | struct pcidev_cookie *pcp; | ||
160 | struct pci_iommu *iommu; | ||
161 | unsigned long flags, order, first_page, npages, n; | ||
162 | void *ret; | ||
163 | long entry; | ||
164 | |||
165 | size = IO_PAGE_ALIGN(size); | ||
166 | order = get_order(size); | ||
167 | if (unlikely(order >= MAX_ORDER)) | ||
168 | return NULL; | ||
169 | |||
170 | npages = size >> IO_PAGE_SHIFT; | ||
171 | |||
172 | first_page = __get_free_pages(GFP_ATOMIC, order); | ||
173 | if (unlikely(first_page == 0UL)) | ||
174 | return NULL; | ||
175 | |||
176 | memset((char *)first_page, 0, PAGE_SIZE << order); | ||
177 | |||
178 | pcp = pdev->sysdata; | ||
179 | iommu = pcp->pbm->iommu; | ||
180 | |||
181 | spin_lock_irqsave(&iommu->lock, flags); | ||
182 | entry = pci_arena_alloc(&iommu->arena, npages); | ||
183 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
184 | |||
185 | if (unlikely(entry < 0L)) | ||
186 | goto arena_alloc_fail; | ||
187 | |||
188 | *dma_addrp = (iommu->page_table_map_base + | ||
189 | (entry << IO_PAGE_SHIFT)); | ||
190 | ret = (void *) first_page; | ||
191 | first_page = __pa(first_page); | ||
192 | |||
193 | local_irq_save(flags); | ||
194 | |||
195 | pci_iommu_batch_start(pdev, | ||
196 | (HV_PCI_MAP_ATTR_READ | | ||
197 | HV_PCI_MAP_ATTR_WRITE), | ||
198 | entry); | ||
199 | |||
200 | for (n = 0; n < npages; n++) { | ||
201 | long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE)); | ||
202 | if (unlikely(err < 0L)) | ||
203 | goto iommu_map_fail; | ||
204 | } | ||
205 | |||
206 | if (unlikely(pci_iommu_batch_end() < 0L)) | ||
207 | goto iommu_map_fail; | ||
208 | |||
209 | local_irq_restore(flags); | ||
210 | |||
211 | return ret; | ||
212 | |||
213 | iommu_map_fail: | ||
214 | /* Interrupts are disabled. */ | ||
215 | spin_lock(&iommu->lock); | ||
216 | pci_arena_free(&iommu->arena, entry, npages); | ||
217 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
218 | |||
219 | arena_alloc_fail: | ||
220 | free_pages(first_page, order); | ||
221 | return NULL; | ||
222 | } | ||
223 | |||
224 | static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) | ||
225 | { | ||
226 | struct pcidev_cookie *pcp; | ||
227 | struct pci_iommu *iommu; | ||
228 | unsigned long flags, order, npages, entry; | ||
229 | u32 devhandle; | ||
230 | |||
231 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | ||
232 | pcp = pdev->sysdata; | ||
233 | iommu = pcp->pbm->iommu; | ||
234 | devhandle = pcp->pbm->devhandle; | ||
235 | entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | ||
236 | |||
237 | spin_lock_irqsave(&iommu->lock, flags); | ||
238 | |||
239 | pci_arena_free(&iommu->arena, entry, npages); | ||
240 | |||
241 | do { | ||
242 | unsigned long num; | ||
243 | |||
244 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
245 | npages); | ||
246 | entry += num; | ||
247 | npages -= num; | ||
248 | } while (npages != 0); | ||
249 | |||
250 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
251 | |||
252 | order = get_order(size); | ||
253 | if (order < 10) | ||
254 | free_pages((unsigned long)cpu, order); | ||
255 | } | ||
256 | |||
257 | static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) | ||
258 | { | ||
259 | struct pcidev_cookie *pcp; | ||
260 | struct pci_iommu *iommu; | ||
261 | unsigned long flags, npages, oaddr; | ||
262 | unsigned long i, base_paddr; | ||
263 | u32 bus_addr, ret; | ||
264 | unsigned long prot; | ||
265 | long entry; | ||
266 | |||
267 | pcp = pdev->sysdata; | ||
268 | iommu = pcp->pbm->iommu; | ||
269 | |||
270 | if (unlikely(direction == PCI_DMA_NONE)) | ||
271 | goto bad; | ||
272 | |||
273 | oaddr = (unsigned long)ptr; | ||
274 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); | ||
275 | npages >>= IO_PAGE_SHIFT; | ||
276 | |||
277 | spin_lock_irqsave(&iommu->lock, flags); | ||
278 | entry = pci_arena_alloc(&iommu->arena, npages); | ||
279 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
280 | |||
281 | if (unlikely(entry < 0L)) | ||
282 | goto bad; | ||
283 | |||
284 | bus_addr = (iommu->page_table_map_base + | ||
285 | (entry << IO_PAGE_SHIFT)); | ||
286 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); | ||
287 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | ||
288 | prot = HV_PCI_MAP_ATTR_READ; | ||
289 | if (direction != PCI_DMA_TODEVICE) | ||
290 | prot |= HV_PCI_MAP_ATTR_WRITE; | ||
291 | |||
292 | local_irq_save(flags); | ||
293 | |||
294 | pci_iommu_batch_start(pdev, prot, entry); | ||
295 | |||
296 | for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { | ||
297 | long err = pci_iommu_batch_add(base_paddr); | ||
298 | if (unlikely(err < 0L)) | ||
299 | goto iommu_map_fail; | ||
300 | } | ||
301 | if (unlikely(pci_iommu_batch_end() < 0L)) | ||
302 | goto iommu_map_fail; | ||
303 | |||
304 | local_irq_restore(flags); | ||
305 | |||
306 | return ret; | ||
307 | |||
308 | bad: | ||
309 | if (printk_ratelimit()) | ||
310 | WARN_ON(1); | ||
311 | return PCI_DMA_ERROR_CODE; | ||
312 | |||
313 | iommu_map_fail: | ||
314 | /* Interrupts are disabled. */ | ||
315 | spin_lock(&iommu->lock); | ||
316 | pci_arena_free(&iommu->arena, entry, npages); | ||
317 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
318 | |||
319 | return PCI_DMA_ERROR_CODE; | ||
320 | } | ||
321 | |||
322 | static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | ||
323 | { | ||
324 | struct pcidev_cookie *pcp; | ||
325 | struct pci_iommu *iommu; | ||
326 | unsigned long flags, npages; | ||
327 | long entry; | ||
328 | u32 devhandle; | ||
329 | |||
330 | if (unlikely(direction == PCI_DMA_NONE)) { | ||
331 | if (printk_ratelimit()) | ||
332 | WARN_ON(1); | ||
333 | return; | ||
334 | } | ||
335 | |||
336 | pcp = pdev->sysdata; | ||
337 | iommu = pcp->pbm->iommu; | ||
338 | devhandle = pcp->pbm->devhandle; | ||
339 | |||
340 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | ||
341 | npages >>= IO_PAGE_SHIFT; | ||
342 | bus_addr &= IO_PAGE_MASK; | ||
343 | |||
344 | spin_lock_irqsave(&iommu->lock, flags); | ||
345 | |||
346 | entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; | ||
347 | pci_arena_free(&iommu->arena, entry, npages); | ||
348 | |||
349 | do { | ||
350 | unsigned long num; | ||
351 | |||
352 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
353 | npages); | ||
354 | entry += num; | ||
355 | npages -= num; | ||
356 | } while (npages != 0); | ||
357 | |||
358 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
359 | } | ||
360 | |||
361 | #define SG_ENT_PHYS_ADDRESS(SG) \ | ||
362 | (__pa(page_address((SG)->page)) + (SG)->offset) | ||
363 | |||
364 | static inline long fill_sg(long entry, struct pci_dev *pdev, | ||
365 | struct scatterlist *sg, | ||
366 | int nused, int nelems, unsigned long prot) | ||
367 | { | ||
368 | struct scatterlist *dma_sg = sg; | ||
369 | struct scatterlist *sg_end = sg + nelems; | ||
370 | unsigned long flags; | ||
371 | int i; | ||
372 | |||
373 | local_irq_save(flags); | ||
374 | |||
375 | pci_iommu_batch_start(pdev, prot, entry); | ||
376 | |||
377 | for (i = 0; i < nused; i++) { | ||
378 | unsigned long pteval = ~0UL; | ||
379 | u32 dma_npages; | ||
380 | |||
381 | dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) + | ||
382 | dma_sg->dma_length + | ||
383 | ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT; | ||
384 | do { | ||
385 | unsigned long offset; | ||
386 | signed int len; | ||
387 | |||
388 | /* If we are here, we know we have at least one | ||
389 | * more page to map. So walk forward until we | ||
390 | * hit a page crossing, and begin creating new | ||
391 | * mappings from that spot. | ||
392 | */ | ||
393 | for (;;) { | ||
394 | unsigned long tmp; | ||
395 | |||
396 | tmp = SG_ENT_PHYS_ADDRESS(sg); | ||
397 | len = sg->length; | ||
398 | if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) { | ||
399 | pteval = tmp & IO_PAGE_MASK; | ||
400 | offset = tmp & (IO_PAGE_SIZE - 1UL); | ||
401 | break; | ||
402 | } | ||
403 | if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) { | ||
404 | pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK; | ||
405 | offset = 0UL; | ||
406 | len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); | ||
407 | break; | ||
408 | } | ||
409 | sg++; | ||
410 | } | ||
411 | |||
412 | pteval = (pteval & IOPTE_PAGE); | ||
413 | while (len > 0) { | ||
414 | long err; | ||
415 | |||
416 | err = pci_iommu_batch_add(pteval); | ||
417 | if (unlikely(err < 0L)) | ||
418 | goto iommu_map_failed; | ||
419 | |||
420 | pteval += IO_PAGE_SIZE; | ||
421 | len -= (IO_PAGE_SIZE - offset); | ||
422 | offset = 0; | ||
423 | dma_npages--; | ||
424 | } | ||
425 | |||
426 | pteval = (pteval & IOPTE_PAGE) + len; | ||
427 | sg++; | ||
428 | |||
429 | /* Skip over any tail mappings we've fully mapped, | ||
430 | * adjusting pteval along the way. Stop when we | ||
431 | * detect a page crossing event. | ||
432 | */ | ||
433 | while (sg < sg_end && | ||
434 | (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && | ||
435 | (pteval == SG_ENT_PHYS_ADDRESS(sg)) && | ||
436 | ((pteval ^ | ||
437 | (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { | ||
438 | pteval += sg->length; | ||
439 | sg++; | ||
440 | } | ||
441 | if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) | ||
442 | pteval = ~0UL; | ||
443 | } while (dma_npages != 0); | ||
444 | dma_sg++; | ||
445 | } | ||
446 | |||
447 | if (unlikely(pci_iommu_batch_end() < 0L)) | ||
448 | goto iommu_map_failed; | ||
449 | |||
450 | local_irq_restore(flags); | ||
451 | return 0; | ||
452 | |||
453 | iommu_map_failed: | ||
454 | local_irq_restore(flags); | ||
455 | return -1L; | ||
456 | } | ||
457 | |||
458 | static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | ||
459 | { | ||
460 | struct pcidev_cookie *pcp; | ||
461 | struct pci_iommu *iommu; | ||
462 | unsigned long flags, npages, prot; | ||
463 | u32 dma_base; | ||
464 | struct scatterlist *sgtmp; | ||
465 | long entry, err; | ||
466 | int used; | ||
467 | |||
468 | /* Fast path single entry scatterlists. */ | ||
469 | if (nelems == 1) { | ||
470 | sglist->dma_address = | ||
471 | pci_4v_map_single(pdev, | ||
472 | (page_address(sglist->page) + sglist->offset), | ||
473 | sglist->length, direction); | ||
474 | if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) | ||
475 | return 0; | ||
476 | sglist->dma_length = sglist->length; | ||
477 | return 1; | ||
478 | } | ||
479 | |||
480 | pcp = pdev->sysdata; | ||
481 | iommu = pcp->pbm->iommu; | ||
482 | |||
483 | if (unlikely(direction == PCI_DMA_NONE)) | ||
484 | goto bad; | ||
485 | |||
486 | /* Step 1: Prepare scatter list. */ | ||
487 | npages = prepare_sg(sglist, nelems); | ||
488 | |||
489 | /* Step 2: Allocate a cluster and context, if necessary. */ | ||
490 | spin_lock_irqsave(&iommu->lock, flags); | ||
491 | entry = pci_arena_alloc(&iommu->arena, npages); | ||
492 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
493 | |||
494 | if (unlikely(entry < 0L)) | ||
495 | goto bad; | ||
496 | |||
497 | dma_base = iommu->page_table_map_base + | ||
498 | (entry << IO_PAGE_SHIFT); | ||
499 | |||
500 | /* Step 3: Normalize DMA addresses. */ | ||
501 | used = nelems; | ||
502 | |||
503 | sgtmp = sglist; | ||
504 | while (used && sgtmp->dma_length) { | ||
505 | sgtmp->dma_address += dma_base; | ||
506 | sgtmp++; | ||
507 | used--; | ||
508 | } | ||
509 | used = nelems - used; | ||
510 | |||
511 | /* Step 4: Create the mappings. */ | ||
512 | prot = HV_PCI_MAP_ATTR_READ; | ||
513 | if (direction != PCI_DMA_TODEVICE) | ||
514 | prot |= HV_PCI_MAP_ATTR_WRITE; | ||
515 | |||
516 | err = fill_sg(entry, pdev, sglist, used, nelems, prot); | ||
517 | if (unlikely(err < 0L)) | ||
518 | goto iommu_map_failed; | ||
519 | |||
520 | return used; | ||
521 | |||
522 | bad: | ||
523 | if (printk_ratelimit()) | ||
524 | WARN_ON(1); | ||
525 | return 0; | ||
526 | |||
527 | iommu_map_failed: | ||
528 | spin_lock_irqsave(&iommu->lock, flags); | ||
529 | pci_arena_free(&iommu->arena, entry, npages); | ||
530 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
531 | |||
532 | return 0; | ||
533 | } | ||
534 | |||
535 | static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | ||
536 | { | ||
537 | struct pcidev_cookie *pcp; | ||
538 | struct pci_iommu *iommu; | ||
539 | unsigned long flags, i, npages; | ||
540 | long entry; | ||
541 | u32 devhandle, bus_addr; | ||
542 | |||
543 | if (unlikely(direction == PCI_DMA_NONE)) { | ||
544 | if (printk_ratelimit()) | ||
545 | WARN_ON(1); | ||
546 | } | ||
547 | |||
548 | pcp = pdev->sysdata; | ||
549 | iommu = pcp->pbm->iommu; | ||
550 | devhandle = pcp->pbm->devhandle; | ||
551 | |||
552 | bus_addr = sglist->dma_address & IO_PAGE_MASK; | ||
553 | |||
554 | for (i = 1; i < nelems; i++) | ||
555 | if (sglist[i].dma_length == 0) | ||
556 | break; | ||
557 | i--; | ||
558 | npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - | ||
559 | bus_addr) >> IO_PAGE_SHIFT; | ||
560 | |||
561 | entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | ||
562 | |||
563 | spin_lock_irqsave(&iommu->lock, flags); | ||
564 | |||
565 | pci_arena_free(&iommu->arena, entry, npages); | ||
566 | |||
567 | do { | ||
568 | unsigned long num; | ||
569 | |||
570 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
571 | npages); | ||
572 | entry += num; | ||
573 | npages -= num; | ||
574 | } while (npages != 0); | ||
575 | |||
576 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
577 | } | ||
578 | |||
579 | static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | ||
580 | { | ||
581 | /* Nothing to do... */ | ||
582 | } | ||
583 | |||
584 | static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | ||
585 | { | ||
586 | /* Nothing to do... */ | ||
587 | } | ||
588 | |||
589 | struct pci_iommu_ops pci_sun4v_iommu_ops = { | ||
590 | .alloc_consistent = pci_4v_alloc_consistent, | ||
591 | .free_consistent = pci_4v_free_consistent, | ||
592 | .map_single = pci_4v_map_single, | ||
593 | .unmap_single = pci_4v_unmap_single, | ||
594 | .map_sg = pci_4v_map_sg, | ||
595 | .unmap_sg = pci_4v_unmap_sg, | ||
596 | .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu, | ||
597 | .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu, | ||
598 | }; | ||
599 | |||
600 | /* SUN4V PCI configuration space accessors. */ | ||
601 | |||
602 | static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func) | ||
603 | { | ||
604 | if (bus == pbm->pci_first_busno) { | ||
605 | if (device == 0 && func == 0) | ||
606 | return 0; | ||
607 | return 1; | ||
608 | } | ||
609 | |||
610 | if (bus < pbm->pci_first_busno || | ||
611 | bus > pbm->pci_last_busno) | ||
612 | return 1; | ||
613 | return 0; | ||
614 | } | ||
615 | |||
616 | static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | ||
617 | int where, int size, u32 *value) | ||
618 | { | ||
619 | struct pci_pbm_info *pbm = bus_dev->sysdata; | ||
620 | u32 devhandle = pbm->devhandle; | ||
621 | unsigned int bus = bus_dev->number; | ||
622 | unsigned int device = PCI_SLOT(devfn); | ||
623 | unsigned int func = PCI_FUNC(devfn); | ||
624 | unsigned long ret; | ||
625 | |||
626 | if (pci_sun4v_out_of_range(pbm, bus, device, func)) { | ||
627 | ret = ~0UL; | ||
628 | } else { | ||
629 | ret = pci_sun4v_config_get(devhandle, | ||
630 | HV_PCI_DEVICE_BUILD(bus, device, func), | ||
631 | where, size); | ||
632 | #if 0 | ||
633 | printk("rcfg: [%x:%x:%x:%d]=[%lx]\n", | ||
634 | devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), | ||
635 | where, size, ret); | ||
636 | #endif | ||
637 | } | ||
638 | switch (size) { | ||
639 | case 1: | ||
640 | *value = ret & 0xff; | ||
641 | break; | ||
642 | case 2: | ||
643 | *value = ret & 0xffff; | ||
644 | break; | ||
645 | case 4: | ||
646 | *value = ret & 0xffffffff; | ||
647 | break; | ||
648 | }; | ||
649 | |||
650 | |||
651 | return PCIBIOS_SUCCESSFUL; | ||
652 | } | ||
653 | |||
654 | static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | ||
655 | int where, int size, u32 value) | ||
656 | { | ||
657 | struct pci_pbm_info *pbm = bus_dev->sysdata; | ||
658 | u32 devhandle = pbm->devhandle; | ||
659 | unsigned int bus = bus_dev->number; | ||
660 | unsigned int device = PCI_SLOT(devfn); | ||
661 | unsigned int func = PCI_FUNC(devfn); | ||
662 | unsigned long ret; | ||
663 | |||
664 | if (pci_sun4v_out_of_range(pbm, bus, device, func)) { | ||
665 | /* Do nothing. */ | ||
666 | } else { | ||
667 | ret = pci_sun4v_config_put(devhandle, | ||
668 | HV_PCI_DEVICE_BUILD(bus, device, func), | ||
669 | where, size, value); | ||
670 | #if 0 | ||
671 | printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n", | ||
672 | devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), | ||
673 | where, size, value, ret); | ||
674 | #endif | ||
675 | } | ||
676 | return PCIBIOS_SUCCESSFUL; | ||
677 | } | ||
678 | |||
679 | static struct pci_ops pci_sun4v_ops = { | ||
680 | .read = pci_sun4v_read_pci_cfg, | ||
681 | .write = pci_sun4v_write_pci_cfg, | ||
682 | }; | ||
683 | |||
684 | |||
685 | static void pbm_scan_bus(struct pci_controller_info *p, | ||
686 | struct pci_pbm_info *pbm) | ||
687 | { | ||
688 | struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL); | ||
689 | |||
690 | if (!cookie) { | ||
691 | prom_printf("%s: Critical allocation failure.\n", pbm->name); | ||
692 | prom_halt(); | ||
693 | } | ||
694 | |||
695 | /* All we care about is the PBM. */ | ||
696 | memset(cookie, 0, sizeof(*cookie)); | ||
697 | cookie->pbm = pbm; | ||
698 | |||
699 | pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm); | ||
700 | #if 0 | ||
701 | pci_fixup_host_bridge_self(pbm->pci_bus); | ||
702 | pbm->pci_bus->self->sysdata = cookie; | ||
703 | #endif | ||
704 | pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, | ||
705 | pbm->prom_node); | ||
706 | pci_record_assignments(pbm, pbm->pci_bus); | ||
707 | pci_assign_unassigned(pbm, pbm->pci_bus); | ||
708 | pci_fixup_irq(pbm, pbm->pci_bus); | ||
709 | pci_determine_66mhz_disposition(pbm, pbm->pci_bus); | ||
710 | pci_setup_busmastering(pbm, pbm->pci_bus); | ||
711 | } | ||
712 | |||
713 | static void pci_sun4v_scan_bus(struct pci_controller_info *p) | ||
714 | { | ||
715 | if (p->pbm_A.prom_node) { | ||
716 | p->pbm_A.is_66mhz_capable = | ||
717 | prom_getbool(p->pbm_A.prom_node, "66mhz-capable"); | ||
718 | |||
719 | pbm_scan_bus(p, &p->pbm_A); | ||
720 | } | ||
721 | if (p->pbm_B.prom_node) { | ||
722 | p->pbm_B.is_66mhz_capable = | ||
723 | prom_getbool(p->pbm_B.prom_node, "66mhz-capable"); | ||
724 | |||
725 | pbm_scan_bus(p, &p->pbm_B); | ||
726 | } | ||
727 | |||
728 | /* XXX register error interrupt handlers XXX */ | ||
729 | } | ||
730 | |||
731 | static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm, | ||
732 | struct pci_dev *pdev, | ||
733 | unsigned int devino) | ||
734 | { | ||
735 | u32 devhandle = pbm->devhandle; | ||
736 | int pil; | ||
737 | |||
738 | pil = 5; | ||
739 | if (pdev) { | ||
740 | switch ((pdev->class >> 16) & 0xff) { | ||
741 | case PCI_BASE_CLASS_STORAGE: | ||
742 | pil = 5; | ||
743 | break; | ||
744 | |||
745 | case PCI_BASE_CLASS_NETWORK: | ||
746 | pil = 6; | ||
747 | break; | ||
748 | |||
749 | case PCI_BASE_CLASS_DISPLAY: | ||
750 | pil = 9; | ||
751 | break; | ||
752 | |||
753 | case PCI_BASE_CLASS_MULTIMEDIA: | ||
754 | case PCI_BASE_CLASS_MEMORY: | ||
755 | case PCI_BASE_CLASS_BRIDGE: | ||
756 | case PCI_BASE_CLASS_SERIAL: | ||
757 | pil = 10; | ||
758 | break; | ||
759 | |||
760 | default: | ||
761 | pil = 5; | ||
762 | break; | ||
763 | }; | ||
764 | } | ||
765 | BUG_ON(PIL_RESERVED(pil)); | ||
766 | |||
767 | return sun4v_build_irq(devhandle, devino, pil, IBF_PCI); | ||
768 | } | ||
769 | |||
770 | static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource) | ||
771 | { | ||
772 | struct pcidev_cookie *pcp = pdev->sysdata; | ||
773 | struct pci_pbm_info *pbm = pcp->pbm; | ||
774 | struct resource *res, *root; | ||
775 | u32 reg; | ||
776 | int where, size, is_64bit; | ||
777 | |||
778 | res = &pdev->resource[resource]; | ||
779 | if (resource < 6) { | ||
780 | where = PCI_BASE_ADDRESS_0 + (resource * 4); | ||
781 | } else if (resource == PCI_ROM_RESOURCE) { | ||
782 | where = pdev->rom_base_reg; | ||
783 | } else { | ||
784 | /* Somebody might have asked allocation of a non-standard resource */ | ||
785 | return; | ||
786 | } | ||
787 | |||
788 | /* XXX 64-bit MEM handling is not %100 correct... XXX */ | ||
789 | is_64bit = 0; | ||
790 | if (res->flags & IORESOURCE_IO) | ||
791 | root = &pbm->io_space; | ||
792 | else { | ||
793 | root = &pbm->mem_space; | ||
794 | if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) | ||
795 | == PCI_BASE_ADDRESS_MEM_TYPE_64) | ||
796 | is_64bit = 1; | ||
797 | } | ||
798 | |||
799 | size = res->end - res->start; | ||
800 | pci_read_config_dword(pdev, where, ®); | ||
801 | reg = ((reg & size) | | ||
802 | (((u32)(res->start - root->start)) & ~size)); | ||
803 | if (resource == PCI_ROM_RESOURCE) { | ||
804 | reg |= PCI_ROM_ADDRESS_ENABLE; | ||
805 | res->flags |= IORESOURCE_ROM_ENABLE; | ||
806 | } | ||
807 | pci_write_config_dword(pdev, where, reg); | ||
808 | |||
809 | /* This knows that the upper 32-bits of the address | ||
810 | * must be zero. Our PCI common layer enforces this. | ||
811 | */ | ||
812 | if (is_64bit) | ||
813 | pci_write_config_dword(pdev, where + 4, 0); | ||
814 | } | ||
815 | |||
816 | static void pci_sun4v_resource_adjust(struct pci_dev *pdev, | ||
817 | struct resource *res, | ||
818 | struct resource *root) | ||
819 | { | ||
820 | res->start += root->start; | ||
821 | res->end += root->start; | ||
822 | } | ||
823 | |||
824 | /* Use ranges property to determine where PCI MEM, I/O, and Config | ||
825 | * space are for this PCI bus module. | ||
826 | */ | ||
827 | static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm) | ||
828 | { | ||
829 | int i, saw_mem, saw_io; | ||
830 | |||
831 | saw_mem = saw_io = 0; | ||
832 | for (i = 0; i < pbm->num_pbm_ranges; i++) { | ||
833 | struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i]; | ||
834 | unsigned long a; | ||
835 | int type; | ||
836 | |||
837 | type = (pr->child_phys_hi >> 24) & 0x3; | ||
838 | a = (((unsigned long)pr->parent_phys_hi << 32UL) | | ||
839 | ((unsigned long)pr->parent_phys_lo << 0UL)); | ||
840 | |||
841 | switch (type) { | ||
842 | case 1: | ||
843 | /* 16-bit IO space, 16MB */ | ||
844 | pbm->io_space.start = a; | ||
845 | pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL); | ||
846 | pbm->io_space.flags = IORESOURCE_IO; | ||
847 | saw_io = 1; | ||
848 | break; | ||
849 | |||
850 | case 2: | ||
851 | /* 32-bit MEM space, 2GB */ | ||
852 | pbm->mem_space.start = a; | ||
853 | pbm->mem_space.end = a + (0x80000000UL - 1UL); | ||
854 | pbm->mem_space.flags = IORESOURCE_MEM; | ||
855 | saw_mem = 1; | ||
856 | break; | ||
857 | |||
858 | case 3: | ||
859 | /* XXX 64-bit MEM handling XXX */ | ||
860 | |||
861 | default: | ||
862 | break; | ||
863 | }; | ||
864 | } | ||
865 | |||
866 | if (!saw_io || !saw_mem) { | ||
867 | prom_printf("%s: Fatal error, missing %s PBM range.\n", | ||
868 | pbm->name, | ||
869 | (!saw_io ? "IO" : "MEM")); | ||
870 | prom_halt(); | ||
871 | } | ||
872 | |||
873 | printk("%s: PCI IO[%lx] MEM[%lx]\n", | ||
874 | pbm->name, | ||
875 | pbm->io_space.start, | ||
876 | pbm->mem_space.start); | ||
877 | } | ||
878 | |||
879 | static void pbm_register_toplevel_resources(struct pci_controller_info *p, | ||
880 | struct pci_pbm_info *pbm) | ||
881 | { | ||
882 | pbm->io_space.name = pbm->mem_space.name = pbm->name; | ||
883 | |||
884 | request_resource(&ioport_resource, &pbm->io_space); | ||
885 | request_resource(&iomem_resource, &pbm->mem_space); | ||
886 | pci_register_legacy_regions(&pbm->io_space, | ||
887 | &pbm->mem_space); | ||
888 | } | ||
889 | |||
890 | static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, | ||
891 | struct pci_iommu *iommu) | ||
892 | { | ||
893 | struct pci_iommu_arena *arena = &iommu->arena; | ||
894 | unsigned long i, cnt = 0; | ||
895 | u32 devhandle; | ||
896 | |||
897 | devhandle = pbm->devhandle; | ||
898 | for (i = 0; i < arena->limit; i++) { | ||
899 | unsigned long ret, io_attrs, ra; | ||
900 | |||
901 | ret = pci_sun4v_iommu_getmap(devhandle, | ||
902 | HV_PCI_TSBID(0, i), | ||
903 | &io_attrs, &ra); | ||
904 | if (ret == HV_EOK) { | ||
905 | cnt++; | ||
906 | __set_bit(i, arena->map); | ||
907 | } | ||
908 | } | ||
909 | |||
910 | return cnt; | ||
911 | } | ||
912 | |||
913 | static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) | ||
914 | { | ||
915 | struct pci_iommu *iommu = pbm->iommu; | ||
916 | unsigned long num_tsb_entries, sz; | ||
917 | u32 vdma[2], dma_mask, dma_offset; | ||
918 | int err, tsbsize; | ||
919 | |||
920 | err = prom_getproperty(pbm->prom_node, "virtual-dma", | ||
921 | (char *)&vdma[0], sizeof(vdma)); | ||
922 | if (err == 0 || err == -1) { | ||
923 | /* No property, use default values. */ | ||
924 | vdma[0] = 0x80000000; | ||
925 | vdma[1] = 0x80000000; | ||
926 | } | ||
927 | |||
928 | dma_mask = vdma[0]; | ||
929 | switch (vdma[1]) { | ||
930 | case 0x20000000: | ||
931 | dma_mask |= 0x1fffffff; | ||
932 | tsbsize = 64; | ||
933 | break; | ||
934 | |||
935 | case 0x40000000: | ||
936 | dma_mask |= 0x3fffffff; | ||
937 | tsbsize = 128; | ||
938 | break; | ||
939 | |||
940 | case 0x80000000: | ||
941 | dma_mask |= 0x7fffffff; | ||
942 | tsbsize = 256; | ||
943 | break; | ||
944 | |||
945 | default: | ||
946 | prom_printf("PCI-SUN4V: strange virtual-dma size.\n"); | ||
947 | prom_halt(); | ||
948 | }; | ||
949 | |||
950 | tsbsize *= (8 * 1024); | ||
951 | |||
952 | num_tsb_entries = tsbsize / sizeof(iopte_t); | ||
953 | |||
954 | dma_offset = vdma[0]; | ||
955 | |||
956 | /* Setup initial software IOMMU state. */ | ||
957 | spin_lock_init(&iommu->lock); | ||
958 | iommu->ctx_lowest_free = 1; | ||
959 | iommu->page_table_map_base = dma_offset; | ||
960 | iommu->dma_addr_mask = dma_mask; | ||
961 | |||
962 | /* Allocate and initialize the free area map. */ | ||
963 | sz = num_tsb_entries / 8; | ||
964 | sz = (sz + 7UL) & ~7UL; | ||
965 | iommu->arena.map = kmalloc(sz, GFP_KERNEL); | ||
966 | if (!iommu->arena.map) { | ||
967 | prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); | ||
968 | prom_halt(); | ||
969 | } | ||
970 | memset(iommu->arena.map, 0, sz); | ||
971 | iommu->arena.limit = num_tsb_entries; | ||
972 | |||
973 | sz = probe_existing_entries(pbm, iommu); | ||
974 | |||
975 | printk("%s: TSB entries [%lu], existing mapings [%lu]\n", | ||
976 | pbm->name, num_tsb_entries, sz); | ||
977 | } | ||
978 | |||
979 | static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm) | ||
980 | { | ||
981 | unsigned int busrange[2]; | ||
982 | int prom_node = pbm->prom_node; | ||
983 | int err; | ||
984 | |||
985 | err = prom_getproperty(prom_node, "bus-range", | ||
986 | (char *)&busrange[0], | ||
987 | sizeof(busrange)); | ||
988 | if (err == 0 || err == -1) { | ||
989 | prom_printf("%s: Fatal error, no bus-range.\n", pbm->name); | ||
990 | prom_halt(); | ||
991 | } | ||
992 | |||
993 | pbm->pci_first_busno = busrange[0]; | ||
994 | pbm->pci_last_busno = busrange[1]; | ||
995 | |||
996 | } | ||
997 | |||
998 | static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32 devhandle) | ||
999 | { | ||
1000 | struct pci_pbm_info *pbm; | ||
1001 | int err, i; | ||
1002 | |||
1003 | if (devhandle & 0x40) | ||
1004 | pbm = &p->pbm_B; | ||
1005 | else | ||
1006 | pbm = &p->pbm_A; | ||
1007 | |||
1008 | pbm->parent = p; | ||
1009 | pbm->prom_node = prom_node; | ||
1010 | pbm->pci_first_slot = 1; | ||
1011 | |||
1012 | pbm->devhandle = devhandle; | ||
1013 | |||
1014 | sprintf(pbm->name, "SUN4V-PCI%d PBM%c", | ||
1015 | p->index, (pbm == &p->pbm_A ? 'A' : 'B')); | ||
1016 | |||
1017 | printk("%s: devhandle[%x] prom_node[%x:%x]\n", | ||
1018 | pbm->name, pbm->devhandle, | ||
1019 | pbm->prom_node, prom_getchild(pbm->prom_node)); | ||
1020 | |||
1021 | prom_getstring(prom_node, "name", | ||
1022 | pbm->prom_name, sizeof(pbm->prom_name)); | ||
1023 | |||
1024 | err = prom_getproperty(prom_node, "ranges", | ||
1025 | (char *) pbm->pbm_ranges, | ||
1026 | sizeof(pbm->pbm_ranges)); | ||
1027 | if (err == 0 || err == -1) { | ||
1028 | prom_printf("%s: Fatal error, no ranges property.\n", | ||
1029 | pbm->name); | ||
1030 | prom_halt(); | ||
1031 | } | ||
1032 | |||
1033 | pbm->num_pbm_ranges = | ||
1034 | (err / sizeof(struct linux_prom_pci_ranges)); | ||
1035 | |||
1036 | /* Mask out the top 8 bits of the ranges, leaving the real | ||
1037 | * physical address. | ||
1038 | */ | ||
1039 | for (i = 0; i < pbm->num_pbm_ranges; i++) | ||
1040 | pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff; | ||
1041 | |||
1042 | pci_sun4v_determine_mem_io_space(pbm); | ||
1043 | pbm_register_toplevel_resources(p, pbm); | ||
1044 | |||
1045 | err = prom_getproperty(prom_node, "interrupt-map", | ||
1046 | (char *)pbm->pbm_intmap, | ||
1047 | sizeof(pbm->pbm_intmap)); | ||
1048 | if (err == 0 || err == -1) { | ||
1049 | prom_printf("%s: Fatal error, no interrupt-map property.\n", | ||
1050 | pbm->name); | ||
1051 | prom_halt(); | ||
1052 | } | ||
1053 | |||
1054 | pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap)); | ||
1055 | err = prom_getproperty(prom_node, "interrupt-map-mask", | ||
1056 | (char *)&pbm->pbm_intmask, | ||
1057 | sizeof(pbm->pbm_intmask)); | ||
1058 | if (err == 0 || err == -1) { | ||
1059 | prom_printf("%s: Fatal error, no interrupt-map-mask.\n", | ||
1060 | pbm->name); | ||
1061 | prom_halt(); | ||
1062 | } | ||
1063 | |||
1064 | pci_sun4v_get_bus_range(pbm); | ||
1065 | pci_sun4v_iommu_init(pbm); | ||
1066 | } | ||
1067 | |||
1068 | void sun4v_pci_init(int node, char *model_name) | ||
1069 | { | ||
1070 | struct pci_controller_info *p; | ||
1071 | struct pci_iommu *iommu; | ||
1072 | struct linux_prom64_registers regs; | ||
1073 | u32 devhandle; | ||
1074 | int i; | ||
1075 | |||
1076 | prom_getproperty(node, "reg", (char *)®s, sizeof(regs)); | ||
1077 | devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff; | ||
1078 | |||
1079 | for (p = pci_controller_root; p; p = p->next) { | ||
1080 | struct pci_pbm_info *pbm; | ||
1081 | |||
1082 | if (p->pbm_A.prom_node && p->pbm_B.prom_node) | ||
1083 | continue; | ||
1084 | |||
1085 | pbm = (p->pbm_A.prom_node ? | ||
1086 | &p->pbm_A : | ||
1087 | &p->pbm_B); | ||
1088 | |||
1089 | if (pbm->devhandle == (devhandle ^ 0x40)) { | ||
1090 | pci_sun4v_pbm_init(p, node, devhandle); | ||
1091 | return; | ||
1092 | } | ||
1093 | } | ||
1094 | |||
1095 | for_each_cpu(i) { | ||
1096 | unsigned long page = get_zeroed_page(GFP_ATOMIC); | ||
1097 | |||
1098 | if (!page) | ||
1099 | goto fatal_memory_error; | ||
1100 | |||
1101 | per_cpu(pci_iommu_batch, i).pglist = (u64 *) page; | ||
1102 | } | ||
1103 | |||
1104 | p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); | ||
1105 | if (!p) | ||
1106 | goto fatal_memory_error; | ||
1107 | |||
1108 | memset(p, 0, sizeof(*p)); | ||
1109 | |||
1110 | iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | ||
1111 | if (!iommu) | ||
1112 | goto fatal_memory_error; | ||
1113 | |||
1114 | memset(iommu, 0, sizeof(*iommu)); | ||
1115 | p->pbm_A.iommu = iommu; | ||
1116 | |||
1117 | iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | ||
1118 | if (!iommu) | ||
1119 | goto fatal_memory_error; | ||
1120 | |||
1121 | memset(iommu, 0, sizeof(*iommu)); | ||
1122 | p->pbm_B.iommu = iommu; | ||
1123 | |||
1124 | p->next = pci_controller_root; | ||
1125 | pci_controller_root = p; | ||
1126 | |||
1127 | p->index = pci_num_controllers++; | ||
1128 | p->pbms_same_domain = 0; | ||
1129 | |||
1130 | p->scan_bus = pci_sun4v_scan_bus; | ||
1131 | p->irq_build = pci_sun4v_irq_build; | ||
1132 | p->base_address_update = pci_sun4v_base_address_update; | ||
1133 | p->resource_adjust = pci_sun4v_resource_adjust; | ||
1134 | p->pci_ops = &pci_sun4v_ops; | ||
1135 | |||
1136 | /* Like PSYCHO and SCHIZO we have a 2GB aligned area | ||
1137 | * for memory space. | ||
1138 | */ | ||
1139 | pci_memspace_mask = 0x7fffffffUL; | ||
1140 | |||
1141 | pci_sun4v_pbm_init(p, node, devhandle); | ||
1142 | return; | ||
1143 | |||
1144 | fatal_memory_error: | ||
1145 | prom_printf("SUN4V_PCI: Fatal memory allocation error.\n"); | ||
1146 | prom_halt(); | ||
1147 | } | ||
diff --git a/arch/sparc64/kernel/pci_sun4v.h b/arch/sparc64/kernel/pci_sun4v.h new file mode 100644 index 000000000000..884d25f6158d --- /dev/null +++ b/arch/sparc64/kernel/pci_sun4v.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* pci_sun4v.h: SUN4V specific PCI controller support. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #ifndef _PCI_SUN4V_H | ||
7 | #define _PCI_SUN4V_H | ||
8 | |||
9 | extern long pci_sun4v_iommu_map(unsigned long devhandle, | ||
10 | unsigned long tsbid, | ||
11 | unsigned long num_ttes, | ||
12 | unsigned long io_attributes, | ||
13 | unsigned long io_page_list_pa); | ||
14 | extern unsigned long pci_sun4v_iommu_demap(unsigned long devhandle, | ||
15 | unsigned long tsbid, | ||
16 | unsigned long num_ttes); | ||
17 | extern unsigned long pci_sun4v_iommu_getmap(unsigned long devhandle, | ||
18 | unsigned long tsbid, | ||
19 | unsigned long *io_attributes, | ||
20 | unsigned long *real_address); | ||
21 | extern unsigned long pci_sun4v_config_get(unsigned long devhandle, | ||
22 | unsigned long pci_device, | ||
23 | unsigned long config_offset, | ||
24 | unsigned long size); | ||
25 | extern int pci_sun4v_config_put(unsigned long devhandle, | ||
26 | unsigned long pci_device, | ||
27 | unsigned long config_offset, | ||
28 | unsigned long size, | ||
29 | unsigned long data); | ||
30 | |||
31 | #endif /* !(_PCI_SUN4V_H) */ | ||
diff --git a/arch/sparc64/kernel/pci_sun4v_asm.S b/arch/sparc64/kernel/pci_sun4v_asm.S new file mode 100644 index 000000000000..6604fdbf746c --- /dev/null +++ b/arch/sparc64/kernel/pci_sun4v_asm.S | |||
@@ -0,0 +1,95 @@ | |||
1 | /* pci_sun4v_asm: Hypervisor calls for PCI support. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <asm/hypervisor.h> | ||
7 | |||
8 | /* %o0: devhandle | ||
9 | * %o1: tsbid | ||
10 | * %o2: num ttes | ||
11 | * %o3: io_attributes | ||
12 | * %o4: io_page_list phys address | ||
13 | * | ||
14 | * returns %o0: -status if status was non-zero, else | ||
15 | * %o0: num pages mapped | ||
16 | */ | ||
17 | .globl pci_sun4v_iommu_map | ||
18 | pci_sun4v_iommu_map: | ||
19 | mov %o5, %g1 | ||
20 | mov HV_FAST_PCI_IOMMU_MAP, %o5 | ||
21 | ta HV_FAST_TRAP | ||
22 | brnz,pn %o0, 1f | ||
23 | sub %g0, %o0, %o0 | ||
24 | mov %o1, %o0 | ||
25 | 1: retl | ||
26 | nop | ||
27 | |||
28 | /* %o0: devhandle | ||
29 | * %o1: tsbid | ||
30 | * %o2: num ttes | ||
31 | * | ||
32 | * returns %o0: num ttes demapped | ||
33 | */ | ||
34 | .globl pci_sun4v_iommu_demap | ||
35 | pci_sun4v_iommu_demap: | ||
36 | mov HV_FAST_PCI_IOMMU_DEMAP, %o5 | ||
37 | ta HV_FAST_TRAP | ||
38 | retl | ||
39 | mov %o1, %o0 | ||
40 | |||
41 | /* %o0: devhandle | ||
42 | * %o1: tsbid | ||
43 | * %o2: &io_attributes | ||
44 | * %o3: &real_address | ||
45 | * | ||
46 | * returns %o0: status | ||
47 | */ | ||
48 | .globl pci_sun4v_iommu_getmap | ||
49 | pci_sun4v_iommu_getmap: | ||
50 | mov %o2, %o4 | ||
51 | mov HV_FAST_PCI_IOMMU_GETMAP, %o5 | ||
52 | ta HV_FAST_TRAP | ||
53 | stx %o1, [%o4] | ||
54 | stx %o2, [%o3] | ||
55 | retl | ||
56 | mov %o0, %o0 | ||
57 | |||
58 | /* %o0: devhandle | ||
59 | * %o1: pci_device | ||
60 | * %o2: pci_config_offset | ||
61 | * %o3: size | ||
62 | * | ||
63 | * returns %o0: data | ||
64 | * | ||
65 | * If there is an error, the data will be returned | ||
66 | * as all 1's. | ||
67 | */ | ||
68 | .globl pci_sun4v_config_get | ||
69 | pci_sun4v_config_get: | ||
70 | mov HV_FAST_PCI_CONFIG_GET, %o5 | ||
71 | ta HV_FAST_TRAP | ||
72 | brnz,a,pn %o1, 1f | ||
73 | mov -1, %o2 | ||
74 | 1: retl | ||
75 | mov %o2, %o0 | ||
76 | |||
77 | /* %o0: devhandle | ||
78 | * %o1: pci_device | ||
79 | * %o2: pci_config_offset | ||
80 | * %o3: size | ||
81 | * %o4: data | ||
82 | * | ||
83 | * returns %o0: status | ||
84 | * | ||
85 | * status will be zero if the operation completed | ||
86 | * successfully, else -1 if not | ||
87 | */ | ||
88 | .globl pci_sun4v_config_put | ||
89 | pci_sun4v_config_put: | ||
90 | mov HV_FAST_PCI_CONFIG_PUT, %o5 | ||
91 | ta HV_FAST_TRAP | ||
92 | brnz,a,pn %o1, 1f | ||
93 | mov -1, %o1 | ||
94 | 1: retl | ||
95 | mov %o1, %o0 | ||
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 059b0d025224..1c7ca2f712d9 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c | |||
@@ -44,83 +44,61 @@ | |||
44 | #include <asm/fpumacro.h> | 44 | #include <asm/fpumacro.h> |
45 | #include <asm/head.h> | 45 | #include <asm/head.h> |
46 | #include <asm/cpudata.h> | 46 | #include <asm/cpudata.h> |
47 | #include <asm/mmu_context.h> | ||
47 | #include <asm/unistd.h> | 48 | #include <asm/unistd.h> |
49 | #include <asm/hypervisor.h> | ||
48 | 50 | ||
49 | /* #define VERBOSE_SHOWREGS */ | 51 | /* #define VERBOSE_SHOWREGS */ |
50 | 52 | ||
51 | /* | 53 | static void sparc64_yield(void) |
52 | * Nothing special yet... | ||
53 | */ | ||
54 | void default_idle(void) | ||
55 | { | ||
56 | } | ||
57 | |||
58 | #ifndef CONFIG_SMP | ||
59 | |||
60 | /* | ||
61 | * the idle loop on a Sparc... ;) | ||
62 | */ | ||
63 | void cpu_idle(void) | ||
64 | { | 54 | { |
65 | /* endless idle loop with no priority at all */ | 55 | if (tlb_type != hypervisor) |
66 | for (;;) { | 56 | return; |
67 | /* If current->work.need_resched is zero we should really | ||
68 | * setup for a system wakup event and execute a shutdown | ||
69 | * instruction. | ||
70 | * | ||
71 | * But this requires writing back the contents of the | ||
72 | * L2 cache etc. so implement this later. -DaveM | ||
73 | */ | ||
74 | while (!need_resched()) | ||
75 | barrier(); | ||
76 | 57 | ||
77 | preempt_enable_no_resched(); | 58 | clear_thread_flag(TIF_POLLING_NRFLAG); |
78 | schedule(); | 59 | smp_mb__after_clear_bit(); |
79 | preempt_disable(); | 60 | |
80 | check_pgt_cache(); | 61 | while (!need_resched()) { |
62 | unsigned long pstate; | ||
63 | |||
64 | /* Disable interrupts. */ | ||
65 | __asm__ __volatile__( | ||
66 | "rdpr %%pstate, %0\n\t" | ||
67 | "andn %0, %1, %0\n\t" | ||
68 | "wrpr %0, %%g0, %%pstate" | ||
69 | : "=&r" (pstate) | ||
70 | : "i" (PSTATE_IE)); | ||
71 | |||
72 | if (!need_resched()) | ||
73 | sun4v_cpu_yield(); | ||
74 | |||
75 | /* Re-enable interrupts. */ | ||
76 | __asm__ __volatile__( | ||
77 | "rdpr %%pstate, %0\n\t" | ||
78 | "or %0, %1, %0\n\t" | ||
79 | "wrpr %0, %%g0, %%pstate" | ||
80 | : "=&r" (pstate) | ||
81 | : "i" (PSTATE_IE)); | ||
81 | } | 82 | } |
82 | } | ||
83 | 83 | ||
84 | #else | 84 | set_thread_flag(TIF_POLLING_NRFLAG); |
85 | } | ||
85 | 86 | ||
86 | /* | 87 | /* The idle loop on sparc64. */ |
87 | * the idle loop on a UltraMultiPenguin... | ||
88 | * | ||
89 | * TIF_POLLING_NRFLAG is set because we do not sleep the cpu | ||
90 | * inside of the idler task, so an interrupt is not needed | ||
91 | * to get a clean fast response. | ||
92 | * | ||
93 | * XXX Reverify this assumption... -DaveM | ||
94 | * | ||
95 | * Addendum: We do want it to do something for the signal | ||
96 | * delivery case, we detect that by just seeing | ||
97 | * if we are trying to send this to an idler or not. | ||
98 | */ | ||
99 | void cpu_idle(void) | 88 | void cpu_idle(void) |
100 | { | 89 | { |
101 | cpuinfo_sparc *cpuinfo = &local_cpu_data(); | ||
102 | set_thread_flag(TIF_POLLING_NRFLAG); | 90 | set_thread_flag(TIF_POLLING_NRFLAG); |
103 | 91 | ||
104 | while(1) { | 92 | while(1) { |
105 | if (need_resched()) { | 93 | if (need_resched()) { |
106 | cpuinfo->idle_volume = 0; | ||
107 | preempt_enable_no_resched(); | 94 | preempt_enable_no_resched(); |
108 | schedule(); | 95 | schedule(); |
109 | preempt_disable(); | 96 | preempt_disable(); |
110 | check_pgt_cache(); | ||
111 | } | 97 | } |
112 | cpuinfo->idle_volume++; | 98 | sparc64_yield(); |
113 | |||
114 | /* The store ordering is so that IRQ handlers on | ||
115 | * other cpus see our increasing idleness for the buddy | ||
116 | * redistribution algorithm. -DaveM | ||
117 | */ | ||
118 | membar_storeload_storestore(); | ||
119 | } | 99 | } |
120 | } | 100 | } |
121 | 101 | ||
122 | #endif | ||
123 | |||
124 | extern char reboot_command []; | 102 | extern char reboot_command []; |
125 | 103 | ||
126 | extern void (*prom_palette)(int); | 104 | extern void (*prom_palette)(int); |
@@ -354,6 +332,7 @@ void show_regs(struct pt_regs *regs) | |||
354 | extern long etrap, etraptl1; | 332 | extern long etrap, etraptl1; |
355 | #endif | 333 | #endif |
356 | __show_regs(regs); | 334 | __show_regs(regs); |
335 | #if 0 | ||
357 | #ifdef CONFIG_SMP | 336 | #ifdef CONFIG_SMP |
358 | { | 337 | { |
359 | extern void smp_report_regs(void); | 338 | extern void smp_report_regs(void); |
@@ -361,6 +340,7 @@ void show_regs(struct pt_regs *regs) | |||
361 | smp_report_regs(); | 340 | smp_report_regs(); |
362 | } | 341 | } |
363 | #endif | 342 | #endif |
343 | #endif | ||
364 | 344 | ||
365 | #ifdef VERBOSE_SHOWREGS | 345 | #ifdef VERBOSE_SHOWREGS |
366 | if (regs->tpc >= &etrap && regs->tpc < &etraptl1 && | 346 | if (regs->tpc >= &etrap && regs->tpc < &etraptl1 && |
@@ -433,30 +413,15 @@ void exit_thread(void) | |||
433 | void flush_thread(void) | 413 | void flush_thread(void) |
434 | { | 414 | { |
435 | struct thread_info *t = current_thread_info(); | 415 | struct thread_info *t = current_thread_info(); |
416 | struct mm_struct *mm; | ||
436 | 417 | ||
437 | if (t->flags & _TIF_ABI_PENDING) | 418 | if (t->flags & _TIF_ABI_PENDING) |
438 | t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); | 419 | t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); |
439 | 420 | ||
440 | if (t->task->mm) { | 421 | mm = t->task->mm; |
441 | unsigned long pgd_cache = 0UL; | 422 | if (mm) |
442 | if (test_thread_flag(TIF_32BIT)) { | 423 | tsb_context_switch(mm); |
443 | struct mm_struct *mm = t->task->mm; | ||
444 | pgd_t *pgd0 = &mm->pgd[0]; | ||
445 | pud_t *pud0 = pud_offset(pgd0, 0); | ||
446 | 424 | ||
447 | if (pud_none(*pud0)) { | ||
448 | pmd_t *page = pmd_alloc_one(mm, 0); | ||
449 | pud_set(pud0, page); | ||
450 | } | ||
451 | pgd_cache = get_pgd_cache(pgd0); | ||
452 | } | ||
453 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
454 | "membar #Sync" | ||
455 | : /* no outputs */ | ||
456 | : "r" (pgd_cache), | ||
457 | "r" (TSB_REG), | ||
458 | "i" (ASI_DMMU)); | ||
459 | } | ||
460 | set_thread_wsaved(0); | 425 | set_thread_wsaved(0); |
461 | 426 | ||
462 | /* Turn off performance counters if on. */ | 427 | /* Turn off performance counters if on. */ |
@@ -555,6 +520,18 @@ void synchronize_user_stack(void) | |||
555 | } | 520 | } |
556 | } | 521 | } |
557 | 522 | ||
523 | static void stack_unaligned(unsigned long sp) | ||
524 | { | ||
525 | siginfo_t info; | ||
526 | |||
527 | info.si_signo = SIGBUS; | ||
528 | info.si_errno = 0; | ||
529 | info.si_code = BUS_ADRALN; | ||
530 | info.si_addr = (void __user *) sp; | ||
531 | info.si_trapno = 0; | ||
532 | force_sig_info(SIGBUS, &info, current); | ||
533 | } | ||
534 | |||
558 | void fault_in_user_windows(void) | 535 | void fault_in_user_windows(void) |
559 | { | 536 | { |
560 | struct thread_info *t = current_thread_info(); | 537 | struct thread_info *t = current_thread_info(); |
@@ -570,13 +547,17 @@ void fault_in_user_windows(void) | |||
570 | flush_user_windows(); | 547 | flush_user_windows(); |
571 | window = get_thread_wsaved(); | 548 | window = get_thread_wsaved(); |
572 | 549 | ||
573 | if (window != 0) { | 550 | if (likely(window != 0)) { |
574 | window -= 1; | 551 | window -= 1; |
575 | do { | 552 | do { |
576 | unsigned long sp = (t->rwbuf_stkptrs[window] + bias); | 553 | unsigned long sp = (t->rwbuf_stkptrs[window] + bias); |
577 | struct reg_window *rwin = &t->reg_window[window]; | 554 | struct reg_window *rwin = &t->reg_window[window]; |
578 | 555 | ||
579 | if (copy_to_user((char __user *)sp, rwin, winsize)) | 556 | if (unlikely(sp & 0x7UL)) |
557 | stack_unaligned(sp); | ||
558 | |||
559 | if (unlikely(copy_to_user((char __user *)sp, | ||
560 | rwin, winsize))) | ||
580 | goto barf; | 561 | goto barf; |
581 | } while (window--); | 562 | } while (window--); |
582 | } | 563 | } |
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c index 3f9746f856d2..eb93e9c52846 100644 --- a/arch/sparc64/kernel/ptrace.c +++ b/arch/sparc64/kernel/ptrace.c | |||
@@ -124,6 +124,9 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
124 | { | 124 | { |
125 | BUG_ON(len > PAGE_SIZE); | 125 | BUG_ON(len > PAGE_SIZE); |
126 | 126 | ||
127 | if (tlb_type == hypervisor) | ||
128 | return; | ||
129 | |||
127 | #ifdef DCACHE_ALIASING_POSSIBLE | 130 | #ifdef DCACHE_ALIASING_POSSIBLE |
128 | /* If bit 13 of the kernel address we used to access the | 131 | /* If bit 13 of the kernel address we used to access the |
129 | * user page is the same as the virtual address that page | 132 | * user page is the same as the virtual address that page |
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index b80eba0081ca..7130e866f935 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S | |||
@@ -223,12 +223,26 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 | |||
223 | ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3 | 223 | ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3 |
224 | ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4 | 224 | ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4 |
225 | ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5 | 225 | ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5 |
226 | mov TSB_REG, %g6 | 226 | brz,pt %l3, 1f |
227 | brnz,a,pn %l3, 1f | 227 | mov %g6, %l2 |
228 | ldxa [%g6] ASI_IMMU, %g5 | 228 | |
229 | 1: ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6 | 229 | /* Must do this before thread reg is clobbered below. */ |
230 | LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2) | ||
231 | 1: | ||
232 | ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6 | ||
230 | ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7 | 233 | ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7 |
231 | wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate | 234 | |
235 | /* Normal globals are restored, go to trap globals. */ | ||
236 | 661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate | ||
237 | nop | ||
238 | .section .sun4v_2insn_patch, "ax" | ||
239 | .word 661b | ||
240 | wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate | ||
241 | SET_GL(1) | ||
242 | .previous | ||
243 | |||
244 | mov %l2, %g6 | ||
245 | |||
232 | ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0 | 246 | ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0 |
233 | ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1 | 247 | ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1 |
234 | 248 | ||
@@ -252,27 +266,108 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 | |||
252 | 266 | ||
253 | brnz,pn %l3, kern_rtt | 267 | brnz,pn %l3, kern_rtt |
254 | mov PRIMARY_CONTEXT, %l7 | 268 | mov PRIMARY_CONTEXT, %l7 |
255 | ldxa [%l7 + %l7] ASI_DMMU, %l0 | 269 | |
270 | 661: ldxa [%l7 + %l7] ASI_DMMU, %l0 | ||
271 | .section .sun4v_1insn_patch, "ax" | ||
272 | .word 661b | ||
273 | ldxa [%l7 + %l7] ASI_MMU, %l0 | ||
274 | .previous | ||
275 | |||
256 | sethi %hi(sparc64_kern_pri_nuc_bits), %l1 | 276 | sethi %hi(sparc64_kern_pri_nuc_bits), %l1 |
257 | ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1 | 277 | ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1 |
258 | or %l0, %l1, %l0 | 278 | or %l0, %l1, %l0 |
259 | stxa %l0, [%l7] ASI_DMMU | 279 | |
260 | flush %g6 | 280 | 661: stxa %l0, [%l7] ASI_DMMU |
281 | .section .sun4v_1insn_patch, "ax" | ||
282 | .word 661b | ||
283 | stxa %l0, [%l7] ASI_MMU | ||
284 | .previous | ||
285 | |||
286 | sethi %hi(KERNBASE), %l7 | ||
287 | flush %l7 | ||
261 | rdpr %wstate, %l1 | 288 | rdpr %wstate, %l1 |
262 | rdpr %otherwin, %l2 | 289 | rdpr %otherwin, %l2 |
263 | srl %l1, 3, %l1 | 290 | srl %l1, 3, %l1 |
264 | 291 | ||
265 | wrpr %l2, %g0, %canrestore | 292 | wrpr %l2, %g0, %canrestore |
266 | wrpr %l1, %g0, %wstate | 293 | wrpr %l1, %g0, %wstate |
267 | wrpr %g0, %g0, %otherwin | 294 | brnz,pt %l2, user_rtt_restore |
295 | wrpr %g0, %g0, %otherwin | ||
296 | |||
297 | ldx [%g6 + TI_FLAGS], %g3 | ||
298 | wr %g0, ASI_AIUP, %asi | ||
299 | rdpr %cwp, %g1 | ||
300 | andcc %g3, _TIF_32BIT, %g0 | ||
301 | sub %g1, 1, %g1 | ||
302 | bne,pt %xcc, user_rtt_fill_32bit | ||
303 | wrpr %g1, %cwp | ||
304 | ba,a,pt %xcc, user_rtt_fill_64bit | ||
305 | |||
306 | user_rtt_fill_fixup: | ||
307 | rdpr %cwp, %g1 | ||
308 | add %g1, 1, %g1 | ||
309 | wrpr %g1, 0x0, %cwp | ||
310 | |||
311 | rdpr %wstate, %g2 | ||
312 | sll %g2, 3, %g2 | ||
313 | wrpr %g2, 0x0, %wstate | ||
314 | |||
315 | /* We know %canrestore and %otherwin are both zero. */ | ||
316 | |||
317 | sethi %hi(sparc64_kern_pri_context), %g2 | ||
318 | ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2 | ||
319 | mov PRIMARY_CONTEXT, %g1 | ||
320 | |||
321 | 661: stxa %g2, [%g1] ASI_DMMU | ||
322 | .section .sun4v_1insn_patch, "ax" | ||
323 | .word 661b | ||
324 | stxa %g2, [%g1] ASI_MMU | ||
325 | .previous | ||
326 | |||
327 | sethi %hi(KERNBASE), %g1 | ||
328 | flush %g1 | ||
329 | |||
330 | or %g4, FAULT_CODE_WINFIXUP, %g4 | ||
331 | stb %g4, [%g6 + TI_FAULT_CODE] | ||
332 | stx %g5, [%g6 + TI_FAULT_ADDR] | ||
333 | |||
334 | mov %g6, %l1 | ||
335 | wrpr %g0, 0x0, %tl | ||
336 | |||
337 | 661: nop | ||
338 | .section .sun4v_1insn_patch, "ax" | ||
339 | .word 661b | ||
340 | SET_GL(0) | ||
341 | .previous | ||
342 | |||
343 | wrpr %g0, RTRAP_PSTATE, %pstate | ||
344 | |||
345 | mov %l1, %g6 | ||
346 | ldx [%g6 + TI_TASK], %g4 | ||
347 | LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) | ||
348 | call do_sparc64_fault | ||
349 | add %sp, PTREGS_OFF, %o0 | ||
350 | ba,pt %xcc, rtrap | ||
351 | nop | ||
352 | |||
353 | user_rtt_pre_restore: | ||
354 | add %g1, 1, %g1 | ||
355 | wrpr %g1, 0x0, %cwp | ||
356 | |||
357 | user_rtt_restore: | ||
268 | restore | 358 | restore |
269 | rdpr %canrestore, %g1 | 359 | rdpr %canrestore, %g1 |
270 | wrpr %g1, 0x0, %cleanwin | 360 | wrpr %g1, 0x0, %cleanwin |
271 | retry | 361 | retry |
272 | nop | 362 | nop |
273 | 363 | ||
274 | kern_rtt: restore | 364 | kern_rtt: rdpr %canrestore, %g1 |
365 | brz,pn %g1, kern_rtt_fill | ||
366 | nop | ||
367 | kern_rtt_restore: | ||
368 | restore | ||
275 | retry | 369 | retry |
370 | |||
276 | to_kernel: | 371 | to_kernel: |
277 | #ifdef CONFIG_PREEMPT | 372 | #ifdef CONFIG_PREEMPT |
278 | ldsw [%g6 + TI_PRE_COUNT], %l5 | 373 | ldsw [%g6 + TI_PRE_COUNT], %l5 |
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c index d95a1bcf163d..1d6ffdeabd4c 100644 --- a/arch/sparc64/kernel/sbus.c +++ b/arch/sparc64/kernel/sbus.c | |||
@@ -693,11 +693,11 @@ void sbus_set_sbus64(struct sbus_dev *sdev, int bursts) | |||
693 | 693 | ||
694 | /* SBUS SYSIO INO number to Sparc PIL level. */ | 694 | /* SBUS SYSIO INO number to Sparc PIL level. */ |
695 | static unsigned char sysio_ino_to_pil[] = { | 695 | static unsigned char sysio_ino_to_pil[] = { |
696 | 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 0 */ | 696 | 0, 5, 5, 7, 5, 7, 8, 9, /* SBUS slot 0 */ |
697 | 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 1 */ | 697 | 0, 5, 5, 7, 5, 7, 8, 9, /* SBUS slot 1 */ |
698 | 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 2 */ | 698 | 0, 5, 5, 7, 5, 7, 8, 9, /* SBUS slot 2 */ |
699 | 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 3 */ | 699 | 0, 5, 5, 7, 5, 7, 8, 9, /* SBUS slot 3 */ |
700 | 4, /* Onboard SCSI */ | 700 | 5, /* Onboard SCSI */ |
701 | 5, /* Onboard Ethernet */ | 701 | 5, /* Onboard Ethernet */ |
702 | /*XXX*/ 8, /* Onboard BPP */ | 702 | /*XXX*/ 8, /* Onboard BPP */ |
703 | 0, /* Bogon */ | 703 | 0, /* Bogon */ |
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 158bd31e15b7..7d0e67c1ce50 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c | |||
@@ -64,12 +64,6 @@ struct screen_info screen_info = { | |||
64 | 16 /* orig-video-points */ | 64 | 16 /* orig-video-points */ |
65 | }; | 65 | }; |
66 | 66 | ||
67 | /* Typing sync at the prom prompt calls the function pointed to by | ||
68 | * the sync callback which I set to the following function. | ||
69 | * This should sync all filesystems and return, for now it just | ||
70 | * prints out pretty messages and returns. | ||
71 | */ | ||
72 | |||
73 | void (*prom_palette)(int); | 67 | void (*prom_palette)(int); |
74 | void (*prom_keyboard)(void); | 68 | void (*prom_keyboard)(void); |
75 | 69 | ||
@@ -79,259 +73,6 @@ prom_console_write(struct console *con, const char *s, unsigned n) | |||
79 | prom_write(s, n); | 73 | prom_write(s, n); |
80 | } | 74 | } |
81 | 75 | ||
82 | static struct console prom_console = { | ||
83 | .name = "prom", | ||
84 | .write = prom_console_write, | ||
85 | .flags = CON_CONSDEV | CON_ENABLED, | ||
86 | .index = -1, | ||
87 | }; | ||
88 | |||
89 | #define PROM_TRUE -1 | ||
90 | #define PROM_FALSE 0 | ||
91 | |||
92 | /* Pretty sick eh? */ | ||
93 | int prom_callback(long *args) | ||
94 | { | ||
95 | struct console *cons, *saved_console = NULL; | ||
96 | unsigned long flags; | ||
97 | char *cmd; | ||
98 | extern spinlock_t prom_entry_lock; | ||
99 | |||
100 | if (!args) | ||
101 | return -1; | ||
102 | if (!(cmd = (char *)args[0])) | ||
103 | return -1; | ||
104 | |||
105 | /* | ||
106 | * The callback can be invoked on the cpu that first dropped | ||
107 | * into prom_cmdline after taking the serial interrupt, or on | ||
108 | * a slave processor that was smp_captured() if the | ||
109 | * administrator has done a switch-cpu inside obp. In either | ||
110 | * case, the cpu is marked as in-interrupt. Drop IRQ locks. | ||
111 | */ | ||
112 | irq_exit(); | ||
113 | |||
114 | /* XXX Revisit the locking here someday. This is a debugging | ||
115 | * XXX feature so it isnt all that critical. -DaveM | ||
116 | */ | ||
117 | local_irq_save(flags); | ||
118 | |||
119 | spin_unlock(&prom_entry_lock); | ||
120 | cons = console_drivers; | ||
121 | while (cons) { | ||
122 | unregister_console(cons); | ||
123 | cons->flags &= ~(CON_PRINTBUFFER); | ||
124 | cons->next = saved_console; | ||
125 | saved_console = cons; | ||
126 | cons = console_drivers; | ||
127 | } | ||
128 | register_console(&prom_console); | ||
129 | if (!strcmp(cmd, "sync")) { | ||
130 | prom_printf("PROM `%s' command...\n", cmd); | ||
131 | show_free_areas(); | ||
132 | if (current->pid != 0) { | ||
133 | local_irq_enable(); | ||
134 | sys_sync(); | ||
135 | local_irq_disable(); | ||
136 | } | ||
137 | args[2] = 0; | ||
138 | args[args[1] + 3] = -1; | ||
139 | prom_printf("Returning to PROM\n"); | ||
140 | } else if (!strcmp(cmd, "va>tte-data")) { | ||
141 | unsigned long ctx, va; | ||
142 | unsigned long tte = 0; | ||
143 | long res = PROM_FALSE; | ||
144 | |||
145 | ctx = args[3]; | ||
146 | va = args[4]; | ||
147 | if (ctx) { | ||
148 | /* | ||
149 | * Find process owning ctx, lookup mapping. | ||
150 | */ | ||
151 | struct task_struct *p; | ||
152 | struct mm_struct *mm = NULL; | ||
153 | pgd_t *pgdp; | ||
154 | pud_t *pudp; | ||
155 | pmd_t *pmdp; | ||
156 | pte_t *ptep; | ||
157 | pte_t pte; | ||
158 | |||
159 | for_each_process(p) { | ||
160 | mm = p->mm; | ||
161 | if (CTX_NRBITS(mm->context) == ctx) | ||
162 | break; | ||
163 | } | ||
164 | if (!mm || | ||
165 | CTX_NRBITS(mm->context) != ctx) | ||
166 | goto done; | ||
167 | |||
168 | pgdp = pgd_offset(mm, va); | ||
169 | if (pgd_none(*pgdp)) | ||
170 | goto done; | ||
171 | pudp = pud_offset(pgdp, va); | ||
172 | if (pud_none(*pudp)) | ||
173 | goto done; | ||
174 | pmdp = pmd_offset(pudp, va); | ||
175 | if (pmd_none(*pmdp)) | ||
176 | goto done; | ||
177 | |||
178 | /* Preemption implicitly disabled by virtue of | ||
179 | * being called from inside OBP. | ||
180 | */ | ||
181 | ptep = pte_offset_map(pmdp, va); | ||
182 | pte = *ptep; | ||
183 | if (pte_present(pte)) { | ||
184 | tte = pte_val(pte); | ||
185 | res = PROM_TRUE; | ||
186 | } | ||
187 | pte_unmap(ptep); | ||
188 | goto done; | ||
189 | } | ||
190 | |||
191 | if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) { | ||
192 | extern unsigned long sparc64_kern_pri_context; | ||
193 | |||
194 | /* Spitfire Errata #32 workaround */ | ||
195 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
196 | "flush %%g6" | ||
197 | : /* No outputs */ | ||
198 | : "r" (sparc64_kern_pri_context), | ||
199 | "r" (PRIMARY_CONTEXT), | ||
200 | "i" (ASI_DMMU)); | ||
201 | |||
202 | /* | ||
203 | * Locked down tlb entry. | ||
204 | */ | ||
205 | |||
206 | if (tlb_type == spitfire) | ||
207 | tte = spitfire_get_dtlb_data(SPITFIRE_HIGHEST_LOCKED_TLBENT); | ||
208 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) | ||
209 | tte = cheetah_get_ldtlb_data(CHEETAH_HIGHEST_LOCKED_TLBENT); | ||
210 | |||
211 | res = PROM_TRUE; | ||
212 | goto done; | ||
213 | } | ||
214 | |||
215 | if (va < PGDIR_SIZE) { | ||
216 | /* | ||
217 | * vmalloc or prom_inherited mapping. | ||
218 | */ | ||
219 | pgd_t *pgdp; | ||
220 | pud_t *pudp; | ||
221 | pmd_t *pmdp; | ||
222 | pte_t *ptep; | ||
223 | pte_t pte; | ||
224 | int error; | ||
225 | |||
226 | if ((va >= LOW_OBP_ADDRESS) && (va < HI_OBP_ADDRESS)) { | ||
227 | tte = prom_virt_to_phys(va, &error); | ||
228 | if (!error) | ||
229 | res = PROM_TRUE; | ||
230 | goto done; | ||
231 | } | ||
232 | pgdp = pgd_offset_k(va); | ||
233 | if (pgd_none(*pgdp)) | ||
234 | goto done; | ||
235 | pudp = pud_offset(pgdp, va); | ||
236 | if (pud_none(*pudp)) | ||
237 | goto done; | ||
238 | pmdp = pmd_offset(pudp, va); | ||
239 | if (pmd_none(*pmdp)) | ||
240 | goto done; | ||
241 | |||
242 | /* Preemption implicitly disabled by virtue of | ||
243 | * being called from inside OBP. | ||
244 | */ | ||
245 | ptep = pte_offset_kernel(pmdp, va); | ||
246 | pte = *ptep; | ||
247 | if (pte_present(pte)) { | ||
248 | tte = pte_val(pte); | ||
249 | res = PROM_TRUE; | ||
250 | } | ||
251 | goto done; | ||
252 | } | ||
253 | |||
254 | if (va < PAGE_OFFSET) { | ||
255 | /* | ||
256 | * No mappings here. | ||
257 | */ | ||
258 | goto done; | ||
259 | } | ||
260 | |||
261 | if (va & (1UL << 40)) { | ||
262 | /* | ||
263 | * I/O page. | ||
264 | */ | ||
265 | |||
266 | tte = (__pa(va) & _PAGE_PADDR) | | ||
267 | _PAGE_VALID | _PAGE_SZ4MB | | ||
268 | _PAGE_E | _PAGE_P | _PAGE_W; | ||
269 | res = PROM_TRUE; | ||
270 | goto done; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * Normal page. | ||
275 | */ | ||
276 | tte = (__pa(va) & _PAGE_PADDR) | | ||
277 | _PAGE_VALID | _PAGE_SZ4MB | | ||
278 | _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W; | ||
279 | res = PROM_TRUE; | ||
280 | |||
281 | done: | ||
282 | if (res == PROM_TRUE) { | ||
283 | args[2] = 3; | ||
284 | args[args[1] + 3] = 0; | ||
285 | args[args[1] + 4] = res; | ||
286 | args[args[1] + 5] = tte; | ||
287 | } else { | ||
288 | args[2] = 2; | ||
289 | args[args[1] + 3] = 0; | ||
290 | args[args[1] + 4] = res; | ||
291 | } | ||
292 | } else if (!strcmp(cmd, ".soft1")) { | ||
293 | unsigned long tte; | ||
294 | |||
295 | tte = args[3]; | ||
296 | prom_printf("%lx:\"%s%s%s%s%s\" ", | ||
297 | (tte & _PAGE_SOFT) >> 7, | ||
298 | tte & _PAGE_MODIFIED ? "M" : "-", | ||
299 | tte & _PAGE_ACCESSED ? "A" : "-", | ||
300 | tte & _PAGE_READ ? "W" : "-", | ||
301 | tte & _PAGE_WRITE ? "R" : "-", | ||
302 | tte & _PAGE_PRESENT ? "P" : "-"); | ||
303 | |||
304 | args[2] = 2; | ||
305 | args[args[1] + 3] = 0; | ||
306 | args[args[1] + 4] = PROM_TRUE; | ||
307 | } else if (!strcmp(cmd, ".soft2")) { | ||
308 | unsigned long tte; | ||
309 | |||
310 | tte = args[3]; | ||
311 | prom_printf("%lx ", (tte & 0x07FC000000000000UL) >> 50); | ||
312 | |||
313 | args[2] = 2; | ||
314 | args[args[1] + 3] = 0; | ||
315 | args[args[1] + 4] = PROM_TRUE; | ||
316 | } else { | ||
317 | prom_printf("unknown PROM `%s' command...\n", cmd); | ||
318 | } | ||
319 | unregister_console(&prom_console); | ||
320 | while (saved_console) { | ||
321 | cons = saved_console; | ||
322 | saved_console = cons->next; | ||
323 | register_console(cons); | ||
324 | } | ||
325 | spin_lock(&prom_entry_lock); | ||
326 | local_irq_restore(flags); | ||
327 | |||
328 | /* | ||
329 | * Restore in-interrupt status for a resume from obp. | ||
330 | */ | ||
331 | irq_enter(); | ||
332 | return 0; | ||
333 | } | ||
334 | |||
335 | unsigned int boot_flags = 0; | 76 | unsigned int boot_flags = 0; |
336 | #define BOOTME_DEBUG 0x1 | 77 | #define BOOTME_DEBUG 0x1 |
337 | #define BOOTME_SINGLE 0x2 | 78 | #define BOOTME_SINGLE 0x2 |
@@ -479,15 +220,99 @@ char reboot_command[COMMAND_LINE_SIZE]; | |||
479 | 220 | ||
480 | static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; | 221 | static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; |
481 | 222 | ||
482 | void register_prom_callbacks(void) | 223 | static void __init per_cpu_patch(void) |
483 | { | 224 | { |
484 | prom_setcallback(prom_callback); | 225 | struct cpuid_patch_entry *p; |
485 | prom_feval(": linux-va>tte-data 2 \" va>tte-data\" $callback drop ; " | 226 | unsigned long ver; |
486 | "' linux-va>tte-data to va>tte-data"); | 227 | int is_jbus; |
487 | prom_feval(": linux-.soft1 1 \" .soft1\" $callback 2drop ; " | 228 | |
488 | "' linux-.soft1 to .soft1"); | 229 | if (tlb_type == spitfire && !this_is_starfire) |
489 | prom_feval(": linux-.soft2 1 \" .soft2\" $callback 2drop ; " | 230 | return; |
490 | "' linux-.soft2 to .soft2"); | 231 | |
232 | is_jbus = 0; | ||
233 | if (tlb_type != hypervisor) { | ||
234 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | ||
235 | is_jbus = ((ver >> 32UL) == __JALAPENO_ID || | ||
236 | (ver >> 32UL) == __SERRANO_ID); | ||
237 | } | ||
238 | |||
239 | p = &__cpuid_patch; | ||
240 | while (p < &__cpuid_patch_end) { | ||
241 | unsigned long addr = p->addr; | ||
242 | unsigned int *insns; | ||
243 | |||
244 | switch (tlb_type) { | ||
245 | case spitfire: | ||
246 | insns = &p->starfire[0]; | ||
247 | break; | ||
248 | case cheetah: | ||
249 | case cheetah_plus: | ||
250 | if (is_jbus) | ||
251 | insns = &p->cheetah_jbus[0]; | ||
252 | else | ||
253 | insns = &p->cheetah_safari[0]; | ||
254 | break; | ||
255 | case hypervisor: | ||
256 | insns = &p->sun4v[0]; | ||
257 | break; | ||
258 | default: | ||
259 | prom_printf("Unknown cpu type, halting.\n"); | ||
260 | prom_halt(); | ||
261 | }; | ||
262 | |||
263 | *(unsigned int *) (addr + 0) = insns[0]; | ||
264 | wmb(); | ||
265 | __asm__ __volatile__("flush %0" : : "r" (addr + 0)); | ||
266 | |||
267 | *(unsigned int *) (addr + 4) = insns[1]; | ||
268 | wmb(); | ||
269 | __asm__ __volatile__("flush %0" : : "r" (addr + 4)); | ||
270 | |||
271 | *(unsigned int *) (addr + 8) = insns[2]; | ||
272 | wmb(); | ||
273 | __asm__ __volatile__("flush %0" : : "r" (addr + 8)); | ||
274 | |||
275 | *(unsigned int *) (addr + 12) = insns[3]; | ||
276 | wmb(); | ||
277 | __asm__ __volatile__("flush %0" : : "r" (addr + 12)); | ||
278 | |||
279 | p++; | ||
280 | } | ||
281 | } | ||
282 | |||
283 | static void __init sun4v_patch(void) | ||
284 | { | ||
285 | struct sun4v_1insn_patch_entry *p1; | ||
286 | struct sun4v_2insn_patch_entry *p2; | ||
287 | |||
288 | if (tlb_type != hypervisor) | ||
289 | return; | ||
290 | |||
291 | p1 = &__sun4v_1insn_patch; | ||
292 | while (p1 < &__sun4v_1insn_patch_end) { | ||
293 | unsigned long addr = p1->addr; | ||
294 | |||
295 | *(unsigned int *) (addr + 0) = p1->insn; | ||
296 | wmb(); | ||
297 | __asm__ __volatile__("flush %0" : : "r" (addr + 0)); | ||
298 | |||
299 | p1++; | ||
300 | } | ||
301 | |||
302 | p2 = &__sun4v_2insn_patch; | ||
303 | while (p2 < &__sun4v_2insn_patch_end) { | ||
304 | unsigned long addr = p2->addr; | ||
305 | |||
306 | *(unsigned int *) (addr + 0) = p2->insns[0]; | ||
307 | wmb(); | ||
308 | __asm__ __volatile__("flush %0" : : "r" (addr + 0)); | ||
309 | |||
310 | *(unsigned int *) (addr + 4) = p2->insns[1]; | ||
311 | wmb(); | ||
312 | __asm__ __volatile__("flush %0" : : "r" (addr + 4)); | ||
313 | |||
314 | p2++; | ||
315 | } | ||
491 | } | 316 | } |
492 | 317 | ||
493 | void __init setup_arch(char **cmdline_p) | 318 | void __init setup_arch(char **cmdline_p) |
@@ -496,7 +321,10 @@ void __init setup_arch(char **cmdline_p) | |||
496 | *cmdline_p = prom_getbootargs(); | 321 | *cmdline_p = prom_getbootargs(); |
497 | strcpy(saved_command_line, *cmdline_p); | 322 | strcpy(saved_command_line, *cmdline_p); |
498 | 323 | ||
499 | printk("ARCH: SUN4U\n"); | 324 | if (tlb_type == hypervisor) |
325 | printk("ARCH: SUN4V\n"); | ||
326 | else | ||
327 | printk("ARCH: SUN4U\n"); | ||
500 | 328 | ||
501 | #ifdef CONFIG_DUMMY_CONSOLE | 329 | #ifdef CONFIG_DUMMY_CONSOLE |
502 | conswitchp = &dummy_con; | 330 | conswitchp = &dummy_con; |
@@ -507,6 +335,13 @@ void __init setup_arch(char **cmdline_p) | |||
507 | /* Work out if we are starfire early on */ | 335 | /* Work out if we are starfire early on */ |
508 | check_if_starfire(); | 336 | check_if_starfire(); |
509 | 337 | ||
338 | /* Now we know enough to patch the get_cpuid sequences | ||
339 | * used by trap code. | ||
340 | */ | ||
341 | per_cpu_patch(); | ||
342 | |||
343 | sun4v_patch(); | ||
344 | |||
510 | boot_flags_init(*cmdline_p); | 345 | boot_flags_init(*cmdline_p); |
511 | 346 | ||
512 | idprom_init(); | 347 | idprom_init(); |
@@ -514,7 +349,7 @@ void __init setup_arch(char **cmdline_p) | |||
514 | if (!root_flags) | 349 | if (!root_flags) |
515 | root_mountflags &= ~MS_RDONLY; | 350 | root_mountflags &= ~MS_RDONLY; |
516 | ROOT_DEV = old_decode_dev(root_dev); | 351 | ROOT_DEV = old_decode_dev(root_dev); |
517 | #ifdef CONFIG_BLK_DEV_INITRD | 352 | #ifdef CONFIG_BLK_DEV_RAM |
518 | rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK; | 353 | rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK; |
519 | rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0); | 354 | rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0); |
520 | rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0); | 355 | rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0); |
@@ -544,6 +379,9 @@ void __init setup_arch(char **cmdline_p) | |||
544 | 379 | ||
545 | smp_setup_cpu_possible_map(); | 380 | smp_setup_cpu_possible_map(); |
546 | 381 | ||
382 | /* Get boot processor trap_block[] setup. */ | ||
383 | init_cur_cpu_trap(current_thread_info()); | ||
384 | |||
547 | paging_init(); | 385 | paging_init(); |
548 | } | 386 | } |
549 | 387 | ||
@@ -565,6 +403,12 @@ static int __init set_preferred_console(void) | |||
565 | serial_console = 2; | 403 | serial_console = 2; |
566 | } else if (idev == PROMDEV_IRSC && odev == PROMDEV_ORSC) { | 404 | } else if (idev == PROMDEV_IRSC && odev == PROMDEV_ORSC) { |
567 | serial_console = 3; | 405 | serial_console = 3; |
406 | } else if (idev == PROMDEV_IVCONS && odev == PROMDEV_OVCONS) { | ||
407 | /* sunhv_console_init() doesn't check the serial_console | ||
408 | * value anyways... | ||
409 | */ | ||
410 | serial_console = 4; | ||
411 | return add_preferred_console("ttyHV", 0, NULL); | ||
568 | } else { | 412 | } else { |
569 | prom_printf("Inconsistent console: " | 413 | prom_printf("Inconsistent console: " |
570 | "input %d, output %d\n", | 414 | "input %d, output %d\n", |
@@ -598,9 +442,8 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) | |||
598 | seq_printf(m, | 442 | seq_printf(m, |
599 | "cpu\t\t: %s\n" | 443 | "cpu\t\t: %s\n" |
600 | "fpu\t\t: %s\n" | 444 | "fpu\t\t: %s\n" |
601 | "promlib\t\t: Version 3 Revision %d\n" | 445 | "prom\t\t: %s\n" |
602 | "prom\t\t: %d.%d.%d\n" | 446 | "type\t\t: %s\n" |
603 | "type\t\t: sun4u\n" | ||
604 | "ncpus probed\t: %d\n" | 447 | "ncpus probed\t: %d\n" |
605 | "ncpus active\t: %d\n" | 448 | "ncpus active\t: %d\n" |
606 | "D$ parity tl1\t: %u\n" | 449 | "D$ parity tl1\t: %u\n" |
@@ -612,10 +455,10 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) | |||
612 | , | 455 | , |
613 | sparc_cpu_type, | 456 | sparc_cpu_type, |
614 | sparc_fpu_type, | 457 | sparc_fpu_type, |
615 | prom_rev, | 458 | prom_version, |
616 | prom_prev >> 16, | 459 | ((tlb_type == hypervisor) ? |
617 | (prom_prev >> 8) & 0xff, | 460 | "sun4v" : |
618 | prom_prev & 0xff, | 461 | "sun4u"), |
619 | ncpus_probed, | 462 | ncpus_probed, |
620 | num_online_cpus(), | 463 | num_online_cpus(), |
621 | dcache_parity_tl1_occurred, | 464 | dcache_parity_tl1_occurred, |
@@ -692,15 +535,11 @@ static int __init topology_init(void) | |||
692 | while (!cpu_find_by_instance(ncpus_probed, NULL, NULL)) | 535 | while (!cpu_find_by_instance(ncpus_probed, NULL, NULL)) |
693 | ncpus_probed++; | 536 | ncpus_probed++; |
694 | 537 | ||
695 | for (i = 0; i < NR_CPUS; i++) { | 538 | for_each_cpu(i) { |
696 | if (cpu_possible(i)) { | 539 | struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); |
697 | struct cpu *p = kmalloc(sizeof(*p), GFP_KERNEL); | 540 | if (p) { |
698 | 541 | register_cpu(p, i, NULL); | |
699 | if (p) { | 542 | err = 0; |
700 | memset(p, 0, sizeof(*p)); | ||
701 | register_cpu(p, i, NULL); | ||
702 | err = 0; | ||
703 | } | ||
704 | } | 543 | } |
705 | } | 544 | } |
706 | 545 | ||
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 1f7ad8a69052..373a701c90a5 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <asm/timer.h> | 38 | #include <asm/timer.h> |
39 | #include <asm/starfire.h> | 39 | #include <asm/starfire.h> |
40 | #include <asm/tlb.h> | 40 | #include <asm/tlb.h> |
41 | #include <asm/sections.h> | ||
41 | 42 | ||
42 | extern void calibrate_delay(void); | 43 | extern void calibrate_delay(void); |
43 | 44 | ||
@@ -46,6 +47,8 @@ static unsigned char boot_cpu_id; | |||
46 | 47 | ||
47 | cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; | 48 | cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; |
48 | cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; | 49 | cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; |
50 | cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly = | ||
51 | { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; | ||
49 | static cpumask_t smp_commenced_mask; | 52 | static cpumask_t smp_commenced_mask; |
50 | static cpumask_t cpu_callout_map; | 53 | static cpumask_t cpu_callout_map; |
51 | 54 | ||
@@ -77,7 +80,7 @@ void smp_bogo(struct seq_file *m) | |||
77 | 80 | ||
78 | void __init smp_store_cpu_info(int id) | 81 | void __init smp_store_cpu_info(int id) |
79 | { | 82 | { |
80 | int cpu_node; | 83 | int cpu_node, def; |
81 | 84 | ||
82 | /* multiplier and counter set by | 85 | /* multiplier and counter set by |
83 | smp_setup_percpu_timer() */ | 86 | smp_setup_percpu_timer() */ |
@@ -87,24 +90,32 @@ void __init smp_store_cpu_info(int id) | |||
87 | cpu_data(id).clock_tick = prom_getintdefault(cpu_node, | 90 | cpu_data(id).clock_tick = prom_getintdefault(cpu_node, |
88 | "clock-frequency", 0); | 91 | "clock-frequency", 0); |
89 | 92 | ||
90 | cpu_data(id).pgcache_size = 0; | 93 | def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024)); |
91 | cpu_data(id).pte_cache[0] = NULL; | ||
92 | cpu_data(id).pte_cache[1] = NULL; | ||
93 | cpu_data(id).pgd_cache = NULL; | ||
94 | cpu_data(id).idle_volume = 1; | ||
95 | |||
96 | cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size", | 94 | cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size", |
97 | 16 * 1024); | 95 | def); |
96 | |||
97 | def = 32; | ||
98 | cpu_data(id).dcache_line_size = | 98 | cpu_data(id).dcache_line_size = |
99 | prom_getintdefault(cpu_node, "dcache-line-size", 32); | 99 | prom_getintdefault(cpu_node, "dcache-line-size", def); |
100 | |||
101 | def = 16 * 1024; | ||
100 | cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size", | 102 | cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size", |
101 | 16 * 1024); | 103 | def); |
104 | |||
105 | def = 32; | ||
102 | cpu_data(id).icache_line_size = | 106 | cpu_data(id).icache_line_size = |
103 | prom_getintdefault(cpu_node, "icache-line-size", 32); | 107 | prom_getintdefault(cpu_node, "icache-line-size", def); |
108 | |||
109 | def = ((tlb_type == hypervisor) ? | ||
110 | (3 * 1024 * 1024) : | ||
111 | (4 * 1024 * 1024)); | ||
104 | cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size", | 112 | cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size", |
105 | 4 * 1024 * 1024); | 113 | def); |
114 | |||
115 | def = 64; | ||
106 | cpu_data(id).ecache_line_size = | 116 | cpu_data(id).ecache_line_size = |
107 | prom_getintdefault(cpu_node, "ecache-line-size", 64); | 117 | prom_getintdefault(cpu_node, "ecache-line-size", def); |
118 | |||
108 | printk("CPU[%d]: Caches " | 119 | printk("CPU[%d]: Caches " |
109 | "D[sz(%d):line_sz(%d)] " | 120 | "D[sz(%d):line_sz(%d)] " |
110 | "I[sz(%d):line_sz(%d)] " | 121 | "I[sz(%d):line_sz(%d)] " |
@@ -119,27 +130,16 @@ static void smp_setup_percpu_timer(void); | |||
119 | 130 | ||
120 | static volatile unsigned long callin_flag = 0; | 131 | static volatile unsigned long callin_flag = 0; |
121 | 132 | ||
122 | extern void inherit_locked_prom_mappings(int save_p); | ||
123 | |||
124 | static inline void cpu_setup_percpu_base(unsigned long cpu_id) | ||
125 | { | ||
126 | __asm__ __volatile__("mov %0, %%g5\n\t" | ||
127 | "stxa %0, [%1] %2\n\t" | ||
128 | "membar #Sync" | ||
129 | : /* no outputs */ | ||
130 | : "r" (__per_cpu_offset(cpu_id)), | ||
131 | "r" (TSB_REG), "i" (ASI_IMMU)); | ||
132 | } | ||
133 | |||
134 | void __init smp_callin(void) | 133 | void __init smp_callin(void) |
135 | { | 134 | { |
136 | int cpuid = hard_smp_processor_id(); | 135 | int cpuid = hard_smp_processor_id(); |
137 | 136 | ||
138 | inherit_locked_prom_mappings(0); | 137 | __local_per_cpu_offset = __per_cpu_offset(cpuid); |
139 | 138 | ||
140 | __flush_tlb_all(); | 139 | if (tlb_type == hypervisor) |
140 | sun4v_ktsb_register(); | ||
141 | 141 | ||
142 | cpu_setup_percpu_base(cpuid); | 142 | __flush_tlb_all(); |
143 | 143 | ||
144 | smp_setup_percpu_timer(); | 144 | smp_setup_percpu_timer(); |
145 | 145 | ||
@@ -316,6 +316,8 @@ static void smp_synchronize_one_tick(int cpu) | |||
316 | spin_unlock_irqrestore(&itc_sync_lock, flags); | 316 | spin_unlock_irqrestore(&itc_sync_lock, flags); |
317 | } | 317 | } |
318 | 318 | ||
319 | extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load); | ||
320 | |||
319 | extern unsigned long sparc64_cpu_startup; | 321 | extern unsigned long sparc64_cpu_startup; |
320 | 322 | ||
321 | /* The OBP cpu startup callback truncates the 3rd arg cookie to | 323 | /* The OBP cpu startup callback truncates the 3rd arg cookie to |
@@ -331,21 +333,31 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu) | |||
331 | unsigned long cookie = | 333 | unsigned long cookie = |
332 | (unsigned long)(&cpu_new_thread); | 334 | (unsigned long)(&cpu_new_thread); |
333 | struct task_struct *p; | 335 | struct task_struct *p; |
334 | int timeout, ret, cpu_node; | 336 | int timeout, ret; |
335 | 337 | ||
336 | p = fork_idle(cpu); | 338 | p = fork_idle(cpu); |
337 | callin_flag = 0; | 339 | callin_flag = 0; |
338 | cpu_new_thread = task_thread_info(p); | 340 | cpu_new_thread = task_thread_info(p); |
339 | cpu_set(cpu, cpu_callout_map); | 341 | cpu_set(cpu, cpu_callout_map); |
340 | 342 | ||
341 | cpu_find_by_mid(cpu, &cpu_node); | 343 | if (tlb_type == hypervisor) { |
342 | prom_startcpu(cpu_node, entry, cookie); | 344 | /* Alloc the mondo queues, cpu will load them. */ |
345 | sun4v_init_mondo_queues(0, cpu, 1, 0); | ||
346 | |||
347 | prom_startcpu_cpuid(cpu, entry, cookie); | ||
348 | } else { | ||
349 | int cpu_node; | ||
350 | |||
351 | cpu_find_by_mid(cpu, &cpu_node); | ||
352 | prom_startcpu(cpu_node, entry, cookie); | ||
353 | } | ||
343 | 354 | ||
344 | for (timeout = 0; timeout < 5000000; timeout++) { | 355 | for (timeout = 0; timeout < 5000000; timeout++) { |
345 | if (callin_flag) | 356 | if (callin_flag) |
346 | break; | 357 | break; |
347 | udelay(100); | 358 | udelay(100); |
348 | } | 359 | } |
360 | |||
349 | if (callin_flag) { | 361 | if (callin_flag) { |
350 | ret = 0; | 362 | ret = 0; |
351 | } else { | 363 | } else { |
@@ -441,7 +453,7 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c | |||
441 | static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) | 453 | static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) |
442 | { | 454 | { |
443 | u64 pstate, ver; | 455 | u64 pstate, ver; |
444 | int nack_busy_id, is_jalapeno; | 456 | int nack_busy_id, is_jbus; |
445 | 457 | ||
446 | if (cpus_empty(mask)) | 458 | if (cpus_empty(mask)) |
447 | return; | 459 | return; |
@@ -451,7 +463,8 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas | |||
451 | * derivative processor. | 463 | * derivative processor. |
452 | */ | 464 | */ |
453 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | 465 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); |
454 | is_jalapeno = ((ver >> 32) == 0x003e0016); | 466 | is_jbus = ((ver >> 32) == __JALAPENO_ID || |
467 | (ver >> 32) == __SERRANO_ID); | ||
455 | 468 | ||
456 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | 469 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); |
457 | 470 | ||
@@ -476,7 +489,7 @@ retry: | |||
476 | for_each_cpu_mask(i, mask) { | 489 | for_each_cpu_mask(i, mask) { |
477 | u64 target = (i << 14) | 0x70; | 490 | u64 target = (i << 14) | 0x70; |
478 | 491 | ||
479 | if (!is_jalapeno) | 492 | if (!is_jbus) |
480 | target |= (nack_busy_id << 24); | 493 | target |= (nack_busy_id << 24); |
481 | __asm__ __volatile__( | 494 | __asm__ __volatile__( |
482 | "stxa %%g0, [%0] %1\n\t" | 495 | "stxa %%g0, [%0] %1\n\t" |
@@ -529,7 +542,7 @@ retry: | |||
529 | for_each_cpu_mask(i, mask) { | 542 | for_each_cpu_mask(i, mask) { |
530 | u64 check_mask; | 543 | u64 check_mask; |
531 | 544 | ||
532 | if (is_jalapeno) | 545 | if (is_jbus) |
533 | check_mask = (0x2UL << (2*i)); | 546 | check_mask = (0x2UL << (2*i)); |
534 | else | 547 | else |
535 | check_mask = (0x2UL << | 548 | check_mask = (0x2UL << |
@@ -544,6 +557,155 @@ retry: | |||
544 | } | 557 | } |
545 | } | 558 | } |
546 | 559 | ||
560 | /* Multi-cpu list version. */ | ||
561 | static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) | ||
562 | { | ||
563 | struct trap_per_cpu *tb; | ||
564 | u16 *cpu_list; | ||
565 | u64 *mondo; | ||
566 | cpumask_t error_mask; | ||
567 | unsigned long flags, status; | ||
568 | int cnt, retries, this_cpu, prev_sent, i; | ||
569 | |||
570 | /* We have to do this whole thing with interrupts fully disabled. | ||
571 | * Otherwise if we send an xcall from interrupt context it will | ||
572 | * corrupt both our mondo block and cpu list state. | ||
573 | * | ||
574 | * One consequence of this is that we cannot use timeout mechanisms | ||
575 | * that depend upon interrupts being delivered locally. So, for | ||
576 | * example, we cannot sample jiffies and expect it to advance. | ||
577 | * | ||
578 | * Fortunately, udelay() uses %stick/%tick so we can use that. | ||
579 | */ | ||
580 | local_irq_save(flags); | ||
581 | |||
582 | this_cpu = smp_processor_id(); | ||
583 | tb = &trap_block[this_cpu]; | ||
584 | |||
585 | mondo = __va(tb->cpu_mondo_block_pa); | ||
586 | mondo[0] = data0; | ||
587 | mondo[1] = data1; | ||
588 | mondo[2] = data2; | ||
589 | wmb(); | ||
590 | |||
591 | cpu_list = __va(tb->cpu_list_pa); | ||
592 | |||
593 | /* Setup the initial cpu list. */ | ||
594 | cnt = 0; | ||
595 | for_each_cpu_mask(i, mask) | ||
596 | cpu_list[cnt++] = i; | ||
597 | |||
598 | cpus_clear(error_mask); | ||
599 | retries = 0; | ||
600 | prev_sent = 0; | ||
601 | do { | ||
602 | int forward_progress, n_sent; | ||
603 | |||
604 | status = sun4v_cpu_mondo_send(cnt, | ||
605 | tb->cpu_list_pa, | ||
606 | tb->cpu_mondo_block_pa); | ||
607 | |||
608 | /* HV_EOK means all cpus received the xcall, we're done. */ | ||
609 | if (likely(status == HV_EOK)) | ||
610 | break; | ||
611 | |||
612 | /* First, see if we made any forward progress. | ||
613 | * | ||
614 | * The hypervisor indicates successful sends by setting | ||
615 | * cpu list entries to the value 0xffff. | ||
616 | */ | ||
617 | n_sent = 0; | ||
618 | for (i = 0; i < cnt; i++) { | ||
619 | if (likely(cpu_list[i] == 0xffff)) | ||
620 | n_sent++; | ||
621 | } | ||
622 | |||
623 | forward_progress = 0; | ||
624 | if (n_sent > prev_sent) | ||
625 | forward_progress = 1; | ||
626 | |||
627 | prev_sent = n_sent; | ||
628 | |||
629 | /* If we get a HV_ECPUERROR, then one or more of the cpus | ||
630 | * in the list are in error state. Use the cpu_state() | ||
631 | * hypervisor call to find out which cpus are in error state. | ||
632 | */ | ||
633 | if (unlikely(status == HV_ECPUERROR)) { | ||
634 | for (i = 0; i < cnt; i++) { | ||
635 | long err; | ||
636 | u16 cpu; | ||
637 | |||
638 | cpu = cpu_list[i]; | ||
639 | if (cpu == 0xffff) | ||
640 | continue; | ||
641 | |||
642 | err = sun4v_cpu_state(cpu); | ||
643 | if (err >= 0 && | ||
644 | err == HV_CPU_STATE_ERROR) { | ||
645 | cpu_list[i] = 0xffff; | ||
646 | cpu_set(cpu, error_mask); | ||
647 | } | ||
648 | } | ||
649 | } else if (unlikely(status != HV_EWOULDBLOCK)) | ||
650 | goto fatal_mondo_error; | ||
651 | |||
652 | /* Don't bother rewriting the CPU list, just leave the | ||
653 | * 0xffff and non-0xffff entries in there and the | ||
654 | * hypervisor will do the right thing. | ||
655 | * | ||
656 | * Only advance timeout state if we didn't make any | ||
657 | * forward progress. | ||
658 | */ | ||
659 | if (unlikely(!forward_progress)) { | ||
660 | if (unlikely(++retries > 10000)) | ||
661 | goto fatal_mondo_timeout; | ||
662 | |||
663 | /* Delay a little bit to let other cpus catch up | ||
664 | * on their cpu mondo queue work. | ||
665 | */ | ||
666 | udelay(2 * cnt); | ||
667 | } | ||
668 | } while (1); | ||
669 | |||
670 | local_irq_restore(flags); | ||
671 | |||
672 | if (unlikely(!cpus_empty(error_mask))) | ||
673 | goto fatal_mondo_cpu_error; | ||
674 | |||
675 | return; | ||
676 | |||
677 | fatal_mondo_cpu_error: | ||
678 | printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " | ||
679 | "were in error state\n", | ||
680 | this_cpu); | ||
681 | printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu); | ||
682 | for_each_cpu_mask(i, error_mask) | ||
683 | printk("%d ", i); | ||
684 | printk("]\n"); | ||
685 | return; | ||
686 | |||
687 | fatal_mondo_timeout: | ||
688 | local_irq_restore(flags); | ||
689 | printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " | ||
690 | " progress after %d retries.\n", | ||
691 | this_cpu, retries); | ||
692 | goto dump_cpu_list_and_out; | ||
693 | |||
694 | fatal_mondo_error: | ||
695 | local_irq_restore(flags); | ||
696 | printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n", | ||
697 | this_cpu, status); | ||
698 | printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) " | ||
699 | "mondo_block_pa(%lx)\n", | ||
700 | this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); | ||
701 | |||
702 | dump_cpu_list_and_out: | ||
703 | printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu); | ||
704 | for (i = 0; i < cnt; i++) | ||
705 | printk("%u ", cpu_list[i]); | ||
706 | printk("]\n"); | ||
707 | } | ||
708 | |||
547 | /* Send cross call to all processors mentioned in MASK | 709 | /* Send cross call to all processors mentioned in MASK |
548 | * except self. | 710 | * except self. |
549 | */ | 711 | */ |
@@ -557,8 +719,10 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d | |||
557 | 719 | ||
558 | if (tlb_type == spitfire) | 720 | if (tlb_type == spitfire) |
559 | spitfire_xcall_deliver(data0, data1, data2, mask); | 721 | spitfire_xcall_deliver(data0, data1, data2, mask); |
560 | else | 722 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) |
561 | cheetah_xcall_deliver(data0, data1, data2, mask); | 723 | cheetah_xcall_deliver(data0, data1, data2, mask); |
724 | else | ||
725 | hypervisor_xcall_deliver(data0, data1, data2, mask); | ||
562 | /* NOTE: Caller runs local copy on master. */ | 726 | /* NOTE: Caller runs local copy on master. */ |
563 | 727 | ||
564 | put_cpu(); | 728 | put_cpu(); |
@@ -594,16 +758,13 @@ extern unsigned long xcall_call_function; | |||
594 | * You must not call this function with disabled interrupts or from a | 758 | * You must not call this function with disabled interrupts or from a |
595 | * hardware interrupt handler or from a bottom half handler. | 759 | * hardware interrupt handler or from a bottom half handler. |
596 | */ | 760 | */ |
597 | int smp_call_function(void (*func)(void *info), void *info, | 761 | static int smp_call_function_mask(void (*func)(void *info), void *info, |
598 | int nonatomic, int wait) | 762 | int nonatomic, int wait, cpumask_t mask) |
599 | { | 763 | { |
600 | struct call_data_struct data; | 764 | struct call_data_struct data; |
601 | int cpus = num_online_cpus() - 1; | 765 | int cpus; |
602 | long timeout; | 766 | long timeout; |
603 | 767 | ||
604 | if (!cpus) | ||
605 | return 0; | ||
606 | |||
607 | /* Can deadlock when called with interrupts disabled */ | 768 | /* Can deadlock when called with interrupts disabled */ |
608 | WARN_ON(irqs_disabled()); | 769 | WARN_ON(irqs_disabled()); |
609 | 770 | ||
@@ -614,9 +775,14 @@ int smp_call_function(void (*func)(void *info), void *info, | |||
614 | 775 | ||
615 | spin_lock(&call_lock); | 776 | spin_lock(&call_lock); |
616 | 777 | ||
778 | cpu_clear(smp_processor_id(), mask); | ||
779 | cpus = cpus_weight(mask); | ||
780 | if (!cpus) | ||
781 | goto out_unlock; | ||
782 | |||
617 | call_data = &data; | 783 | call_data = &data; |
618 | 784 | ||
619 | smp_cross_call(&xcall_call_function, 0, 0, 0); | 785 | smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); |
620 | 786 | ||
621 | /* | 787 | /* |
622 | * Wait for other cpus to complete function or at | 788 | * Wait for other cpus to complete function or at |
@@ -630,18 +796,25 @@ int smp_call_function(void (*func)(void *info), void *info, | |||
630 | udelay(1); | 796 | udelay(1); |
631 | } | 797 | } |
632 | 798 | ||
799 | out_unlock: | ||
633 | spin_unlock(&call_lock); | 800 | spin_unlock(&call_lock); |
634 | 801 | ||
635 | return 0; | 802 | return 0; |
636 | 803 | ||
637 | out_timeout: | 804 | out_timeout: |
638 | spin_unlock(&call_lock); | 805 | spin_unlock(&call_lock); |
639 | printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n", | 806 | printk("XCALL: Remote cpus not responding, ncpus=%d finished=%d\n", |
640 | (long) num_online_cpus() - 1L, | 807 | cpus, atomic_read(&data.finished)); |
641 | (long) atomic_read(&data.finished)); | ||
642 | return 0; | 808 | return 0; |
643 | } | 809 | } |
644 | 810 | ||
811 | int smp_call_function(void (*func)(void *info), void *info, | ||
812 | int nonatomic, int wait) | ||
813 | { | ||
814 | return smp_call_function_mask(func, info, nonatomic, wait, | ||
815 | cpu_online_map); | ||
816 | } | ||
817 | |||
645 | void smp_call_function_client(int irq, struct pt_regs *regs) | 818 | void smp_call_function_client(int irq, struct pt_regs *regs) |
646 | { | 819 | { |
647 | void (*func) (void *info) = call_data->func; | 820 | void (*func) (void *info) = call_data->func; |
@@ -659,13 +832,25 @@ void smp_call_function_client(int irq, struct pt_regs *regs) | |||
659 | } | 832 | } |
660 | } | 833 | } |
661 | 834 | ||
835 | static void tsb_sync(void *info) | ||
836 | { | ||
837 | struct mm_struct *mm = info; | ||
838 | |||
839 | if (current->active_mm == mm) | ||
840 | tsb_context_switch(mm); | ||
841 | } | ||
842 | |||
843 | void smp_tsb_sync(struct mm_struct *mm) | ||
844 | { | ||
845 | smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask); | ||
846 | } | ||
847 | |||
662 | extern unsigned long xcall_flush_tlb_mm; | 848 | extern unsigned long xcall_flush_tlb_mm; |
663 | extern unsigned long xcall_flush_tlb_pending; | 849 | extern unsigned long xcall_flush_tlb_pending; |
664 | extern unsigned long xcall_flush_tlb_kernel_range; | 850 | extern unsigned long xcall_flush_tlb_kernel_range; |
665 | extern unsigned long xcall_flush_tlb_all_spitfire; | ||
666 | extern unsigned long xcall_flush_tlb_all_cheetah; | ||
667 | extern unsigned long xcall_report_regs; | 851 | extern unsigned long xcall_report_regs; |
668 | extern unsigned long xcall_receive_signal; | 852 | extern unsigned long xcall_receive_signal; |
853 | extern unsigned long xcall_new_mmu_context_version; | ||
669 | 854 | ||
670 | #ifdef DCACHE_ALIASING_POSSIBLE | 855 | #ifdef DCACHE_ALIASING_POSSIBLE |
671 | extern unsigned long xcall_flush_dcache_page_cheetah; | 856 | extern unsigned long xcall_flush_dcache_page_cheetah; |
@@ -693,11 +878,17 @@ static __inline__ void __local_flush_dcache_page(struct page *page) | |||
693 | void smp_flush_dcache_page_impl(struct page *page, int cpu) | 878 | void smp_flush_dcache_page_impl(struct page *page, int cpu) |
694 | { | 879 | { |
695 | cpumask_t mask = cpumask_of_cpu(cpu); | 880 | cpumask_t mask = cpumask_of_cpu(cpu); |
696 | int this_cpu = get_cpu(); | 881 | int this_cpu; |
882 | |||
883 | if (tlb_type == hypervisor) | ||
884 | return; | ||
697 | 885 | ||
698 | #ifdef CONFIG_DEBUG_DCFLUSH | 886 | #ifdef CONFIG_DEBUG_DCFLUSH |
699 | atomic_inc(&dcpage_flushes); | 887 | atomic_inc(&dcpage_flushes); |
700 | #endif | 888 | #endif |
889 | |||
890 | this_cpu = get_cpu(); | ||
891 | |||
701 | if (cpu == this_cpu) { | 892 | if (cpu == this_cpu) { |
702 | __local_flush_dcache_page(page); | 893 | __local_flush_dcache_page(page); |
703 | } else if (cpu_online(cpu)) { | 894 | } else if (cpu_online(cpu)) { |
@@ -713,7 +904,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) | |||
713 | __pa(pg_addr), | 904 | __pa(pg_addr), |
714 | (u64) pg_addr, | 905 | (u64) pg_addr, |
715 | mask); | 906 | mask); |
716 | } else { | 907 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
717 | #ifdef DCACHE_ALIASING_POSSIBLE | 908 | #ifdef DCACHE_ALIASING_POSSIBLE |
718 | data0 = | 909 | data0 = |
719 | ((u64)&xcall_flush_dcache_page_cheetah); | 910 | ((u64)&xcall_flush_dcache_page_cheetah); |
@@ -735,7 +926,12 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |||
735 | void *pg_addr = page_address(page); | 926 | void *pg_addr = page_address(page); |
736 | cpumask_t mask = cpu_online_map; | 927 | cpumask_t mask = cpu_online_map; |
737 | u64 data0; | 928 | u64 data0; |
738 | int this_cpu = get_cpu(); | 929 | int this_cpu; |
930 | |||
931 | if (tlb_type == hypervisor) | ||
932 | return; | ||
933 | |||
934 | this_cpu = get_cpu(); | ||
739 | 935 | ||
740 | cpu_clear(this_cpu, mask); | 936 | cpu_clear(this_cpu, mask); |
741 | 937 | ||
@@ -752,7 +948,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |||
752 | __pa(pg_addr), | 948 | __pa(pg_addr), |
753 | (u64) pg_addr, | 949 | (u64) pg_addr, |
754 | mask); | 950 | mask); |
755 | } else { | 951 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
756 | #ifdef DCACHE_ALIASING_POSSIBLE | 952 | #ifdef DCACHE_ALIASING_POSSIBLE |
757 | data0 = ((u64)&xcall_flush_dcache_page_cheetah); | 953 | data0 = ((u64)&xcall_flush_dcache_page_cheetah); |
758 | cheetah_xcall_deliver(data0, | 954 | cheetah_xcall_deliver(data0, |
@@ -769,38 +965,58 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |||
769 | put_cpu(); | 965 | put_cpu(); |
770 | } | 966 | } |
771 | 967 | ||
968 | static void __smp_receive_signal_mask(cpumask_t mask) | ||
969 | { | ||
970 | smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask); | ||
971 | } | ||
972 | |||
772 | void smp_receive_signal(int cpu) | 973 | void smp_receive_signal(int cpu) |
773 | { | 974 | { |
774 | cpumask_t mask = cpumask_of_cpu(cpu); | 975 | cpumask_t mask = cpumask_of_cpu(cpu); |
775 | 976 | ||
776 | if (cpu_online(cpu)) { | 977 | if (cpu_online(cpu)) |
777 | u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff); | 978 | __smp_receive_signal_mask(mask); |
778 | |||
779 | if (tlb_type == spitfire) | ||
780 | spitfire_xcall_deliver(data0, 0, 0, mask); | ||
781 | else | ||
782 | cheetah_xcall_deliver(data0, 0, 0, mask); | ||
783 | } | ||
784 | } | 979 | } |
785 | 980 | ||
786 | void smp_receive_signal_client(int irq, struct pt_regs *regs) | 981 | void smp_receive_signal_client(int irq, struct pt_regs *regs) |
787 | { | 982 | { |
788 | /* Just return, rtrap takes care of the rest. */ | ||
789 | clear_softint(1 << irq); | 983 | clear_softint(1 << irq); |
790 | } | 984 | } |
791 | 985 | ||
792 | void smp_report_regs(void) | 986 | void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) |
793 | { | 987 | { |
794 | smp_cross_call(&xcall_report_regs, 0, 0, 0); | 988 | struct mm_struct *mm; |
989 | unsigned long flags; | ||
990 | |||
991 | clear_softint(1 << irq); | ||
992 | |||
993 | /* See if we need to allocate a new TLB context because | ||
994 | * the version of the one we are using is now out of date. | ||
995 | */ | ||
996 | mm = current->active_mm; | ||
997 | if (unlikely(!mm || (mm == &init_mm))) | ||
998 | return; | ||
999 | |||
1000 | spin_lock_irqsave(&mm->context.lock, flags); | ||
1001 | |||
1002 | if (unlikely(!CTX_VALID(mm->context))) | ||
1003 | get_new_mmu_context(mm); | ||
1004 | |||
1005 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
1006 | |||
1007 | load_secondary_context(mm); | ||
1008 | __flush_tlb_mm(CTX_HWBITS(mm->context), | ||
1009 | SECONDARY_CONTEXT); | ||
795 | } | 1010 | } |
796 | 1011 | ||
797 | void smp_flush_tlb_all(void) | 1012 | void smp_new_mmu_context_version(void) |
798 | { | 1013 | { |
799 | if (tlb_type == spitfire) | 1014 | smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); |
800 | smp_cross_call(&xcall_flush_tlb_all_spitfire, 0, 0, 0); | 1015 | } |
801 | else | 1016 | |
802 | smp_cross_call(&xcall_flush_tlb_all_cheetah, 0, 0, 0); | 1017 | void smp_report_regs(void) |
803 | __flush_tlb_all(); | 1018 | { |
1019 | smp_cross_call(&xcall_report_regs, 0, 0, 0); | ||
804 | } | 1020 | } |
805 | 1021 | ||
806 | /* We know that the window frames of the user have been flushed | 1022 | /* We know that the window frames of the user have been flushed |
@@ -944,24 +1160,19 @@ void smp_release(void) | |||
944 | * can service tlb flush xcalls... | 1160 | * can service tlb flush xcalls... |
945 | */ | 1161 | */ |
946 | extern void prom_world(int); | 1162 | extern void prom_world(int); |
947 | extern void save_alternate_globals(unsigned long *); | 1163 | |
948 | extern void restore_alternate_globals(unsigned long *); | ||
949 | void smp_penguin_jailcell(int irq, struct pt_regs *regs) | 1164 | void smp_penguin_jailcell(int irq, struct pt_regs *regs) |
950 | { | 1165 | { |
951 | unsigned long global_save[24]; | ||
952 | |||
953 | clear_softint(1 << irq); | 1166 | clear_softint(1 << irq); |
954 | 1167 | ||
955 | preempt_disable(); | 1168 | preempt_disable(); |
956 | 1169 | ||
957 | __asm__ __volatile__("flushw"); | 1170 | __asm__ __volatile__("flushw"); |
958 | save_alternate_globals(global_save); | ||
959 | prom_world(1); | 1171 | prom_world(1); |
960 | atomic_inc(&smp_capture_registry); | 1172 | atomic_inc(&smp_capture_registry); |
961 | membar_storeload_storestore(); | 1173 | membar_storeload_storestore(); |
962 | while (penguins_are_doing_time) | 1174 | while (penguins_are_doing_time) |
963 | rmb(); | 1175 | rmb(); |
964 | restore_alternate_globals(global_save); | ||
965 | atomic_dec(&smp_capture_registry); | 1176 | atomic_dec(&smp_capture_registry); |
966 | prom_world(0); | 1177 | prom_world(0); |
967 | 1178 | ||
@@ -1082,6 +1293,8 @@ int setup_profiling_timer(unsigned int multiplier) | |||
1082 | /* Constrain the number of cpus to max_cpus. */ | 1293 | /* Constrain the number of cpus to max_cpus. */ |
1083 | void __init smp_prepare_cpus(unsigned int max_cpus) | 1294 | void __init smp_prepare_cpus(unsigned int max_cpus) |
1084 | { | 1295 | { |
1296 | int i; | ||
1297 | |||
1085 | if (num_possible_cpus() > max_cpus) { | 1298 | if (num_possible_cpus() > max_cpus) { |
1086 | int instance, mid; | 1299 | int instance, mid; |
1087 | 1300 | ||
@@ -1096,6 +1309,20 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
1096 | } | 1309 | } |
1097 | } | 1310 | } |
1098 | 1311 | ||
1312 | for_each_cpu(i) { | ||
1313 | if (tlb_type == hypervisor) { | ||
1314 | int j; | ||
1315 | |||
1316 | /* XXX get this mapping from machine description */ | ||
1317 | for_each_cpu(j) { | ||
1318 | if ((j >> 2) == (i >> 2)) | ||
1319 | cpu_set(j, cpu_sibling_map[i]); | ||
1320 | } | ||
1321 | } else { | ||
1322 | cpu_set(i, cpu_sibling_map[i]); | ||
1323 | } | ||
1324 | } | ||
1325 | |||
1099 | smp_store_cpu_info(boot_cpu_id); | 1326 | smp_store_cpu_info(boot_cpu_id); |
1100 | } | 1327 | } |
1101 | 1328 | ||
@@ -1117,12 +1344,15 @@ void __init smp_setup_cpu_possible_map(void) | |||
1117 | 1344 | ||
1118 | void __devinit smp_prepare_boot_cpu(void) | 1345 | void __devinit smp_prepare_boot_cpu(void) |
1119 | { | 1346 | { |
1120 | if (hard_smp_processor_id() >= NR_CPUS) { | 1347 | int cpu = hard_smp_processor_id(); |
1348 | |||
1349 | if (cpu >= NR_CPUS) { | ||
1121 | prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); | 1350 | prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); |
1122 | prom_halt(); | 1351 | prom_halt(); |
1123 | } | 1352 | } |
1124 | 1353 | ||
1125 | current_thread_info()->cpu = hard_smp_processor_id(); | 1354 | current_thread_info()->cpu = cpu; |
1355 | __local_per_cpu_offset = __per_cpu_offset(cpu); | ||
1126 | 1356 | ||
1127 | cpu_set(smp_processor_id(), cpu_online_map); | 1357 | cpu_set(smp_processor_id(), cpu_online_map); |
1128 | cpu_set(smp_processor_id(), phys_cpu_present_map); | 1358 | cpu_set(smp_processor_id(), phys_cpu_present_map); |
@@ -1139,7 +1369,11 @@ int __devinit __cpu_up(unsigned int cpu) | |||
1139 | if (!cpu_isset(cpu, cpu_online_map)) { | 1369 | if (!cpu_isset(cpu, cpu_online_map)) { |
1140 | ret = -ENODEV; | 1370 | ret = -ENODEV; |
1141 | } else { | 1371 | } else { |
1142 | smp_synchronize_one_tick(cpu); | 1372 | /* On SUN4V, writes to %tick and %stick are |
1373 | * not allowed. | ||
1374 | */ | ||
1375 | if (tlb_type != hypervisor) | ||
1376 | smp_synchronize_one_tick(cpu); | ||
1143 | } | 1377 | } |
1144 | } | 1378 | } |
1145 | return ret; | 1379 | return ret; |
@@ -1183,12 +1417,9 @@ void __init setup_per_cpu_areas(void) | |||
1183 | { | 1417 | { |
1184 | unsigned long goal, size, i; | 1418 | unsigned long goal, size, i; |
1185 | char *ptr; | 1419 | char *ptr; |
1186 | /* Created by linker magic */ | ||
1187 | extern char __per_cpu_start[], __per_cpu_end[]; | ||
1188 | 1420 | ||
1189 | /* Copy section for each CPU (we discard the original) */ | 1421 | /* Copy section for each CPU (we discard the original) */ |
1190 | goal = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE); | 1422 | goal = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); |
1191 | |||
1192 | #ifdef CONFIG_MODULES | 1423 | #ifdef CONFIG_MODULES |
1193 | if (goal < PERCPU_ENOUGH_ROOM) | 1424 | if (goal < PERCPU_ENOUGH_ROOM) |
1194 | goal = PERCPU_ENOUGH_ROOM; | 1425 | goal = PERCPU_ENOUGH_ROOM; |
@@ -1197,31 +1428,10 @@ void __init setup_per_cpu_areas(void) | |||
1197 | for (size = 1UL; size < goal; size <<= 1UL) | 1428 | for (size = 1UL; size < goal; size <<= 1UL) |
1198 | __per_cpu_shift++; | 1429 | __per_cpu_shift++; |
1199 | 1430 | ||
1200 | /* Make sure the resulting __per_cpu_base value | 1431 | ptr = alloc_bootmem(size * NR_CPUS); |
1201 | * will fit in the 43-bit sign extended IMMU | ||
1202 | * TSB register. | ||
1203 | */ | ||
1204 | ptr = __alloc_bootmem(size * NR_CPUS, PAGE_SIZE, | ||
1205 | (unsigned long) __per_cpu_start); | ||
1206 | 1432 | ||
1207 | __per_cpu_base = ptr - __per_cpu_start; | 1433 | __per_cpu_base = ptr - __per_cpu_start; |
1208 | 1434 | ||
1209 | if ((__per_cpu_shift < PAGE_SHIFT) || | ||
1210 | (__per_cpu_base & ~PAGE_MASK) || | ||
1211 | (__per_cpu_base != (((long) __per_cpu_base << 20) >> 20))) { | ||
1212 | prom_printf("PER_CPU: Invalid layout, " | ||
1213 | "ptr[%p] shift[%lx] base[%lx]\n", | ||
1214 | ptr, __per_cpu_shift, __per_cpu_base); | ||
1215 | prom_halt(); | ||
1216 | } | ||
1217 | |||
1218 | for (i = 0; i < NR_CPUS; i++, ptr += size) | 1435 | for (i = 0; i < NR_CPUS; i++, ptr += size) |
1219 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | 1436 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); |
1220 | |||
1221 | /* Finally, load in the boot cpu's base value. | ||
1222 | * We abuse the IMMU TSB register for trap handler | ||
1223 | * entry and exit loading of %g5. That is why it | ||
1224 | * has to be page aligned. | ||
1225 | */ | ||
1226 | cpu_setup_percpu_base(hard_smp_processor_id()); | ||
1227 | } | 1437 | } |
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 3c06bfb92a8c..9914a17651b4 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c | |||
@@ -95,9 +95,6 @@ extern int __ashrdi3(int, int); | |||
95 | 95 | ||
96 | extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs); | 96 | extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs); |
97 | 97 | ||
98 | extern unsigned long phys_base; | ||
99 | extern unsigned long pfn_base; | ||
100 | |||
101 | extern unsigned int sys_call_table[]; | 98 | extern unsigned int sys_call_table[]; |
102 | 99 | ||
103 | extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); | 100 | extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); |
@@ -108,6 +105,14 @@ extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *, | |||
108 | extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *, | 105 | extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *, |
109 | unsigned long *, unsigned long *, unsigned long *); | 106 | unsigned long *, unsigned long *, unsigned long *); |
110 | 107 | ||
108 | extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *); | ||
109 | extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *, | ||
110 | unsigned long *); | ||
111 | extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *, | ||
112 | unsigned long *, unsigned long *); | ||
113 | extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *, | ||
114 | unsigned long *, unsigned long *, unsigned long *); | ||
115 | |||
111 | /* Per-CPU information table */ | 116 | /* Per-CPU information table */ |
112 | EXPORT_PER_CPU_SYMBOL(__cpu_data); | 117 | EXPORT_PER_CPU_SYMBOL(__cpu_data); |
113 | 118 | ||
@@ -241,10 +246,6 @@ EXPORT_SYMBOL(verify_compat_iovec); | |||
241 | #endif | 246 | #endif |
242 | 247 | ||
243 | EXPORT_SYMBOL(dump_fpu); | 248 | EXPORT_SYMBOL(dump_fpu); |
244 | EXPORT_SYMBOL(pte_alloc_one_kernel); | ||
245 | #ifndef CONFIG_SMP | ||
246 | EXPORT_SYMBOL(pgt_quicklists); | ||
247 | #endif | ||
248 | EXPORT_SYMBOL(put_fs_struct); | 249 | EXPORT_SYMBOL(put_fs_struct); |
249 | 250 | ||
250 | /* math-emu wants this */ | 251 | /* math-emu wants this */ |
@@ -339,14 +340,10 @@ EXPORT_SYMBOL(copy_to_user_fixup); | |||
339 | EXPORT_SYMBOL(copy_from_user_fixup); | 340 | EXPORT_SYMBOL(copy_from_user_fixup); |
340 | EXPORT_SYMBOL(copy_in_user_fixup); | 341 | EXPORT_SYMBOL(copy_in_user_fixup); |
341 | EXPORT_SYMBOL(__strncpy_from_user); | 342 | EXPORT_SYMBOL(__strncpy_from_user); |
342 | EXPORT_SYMBOL(__bzero_noasi); | 343 | EXPORT_SYMBOL(__clear_user); |
343 | 344 | ||
344 | /* Various address conversion macros use this. */ | 345 | /* Various address conversion macros use this. */ |
345 | EXPORT_SYMBOL(phys_base); | ||
346 | EXPORT_SYMBOL(pfn_base); | ||
347 | EXPORT_SYMBOL(sparc64_valid_addr_bitmap); | 346 | EXPORT_SYMBOL(sparc64_valid_addr_bitmap); |
348 | EXPORT_SYMBOL(page_to_pfn); | ||
349 | EXPORT_SYMBOL(pfn_to_page); | ||
350 | 347 | ||
351 | /* No version information on this, heavily used in inline asm, | 348 | /* No version information on this, heavily used in inline asm, |
352 | * and will always be 'void __ret_efault(void)'. | 349 | * and will always be 'void __ret_efault(void)'. |
@@ -392,4 +389,9 @@ EXPORT_SYMBOL(xor_vis_3); | |||
392 | EXPORT_SYMBOL(xor_vis_4); | 389 | EXPORT_SYMBOL(xor_vis_4); |
393 | EXPORT_SYMBOL(xor_vis_5); | 390 | EXPORT_SYMBOL(xor_vis_5); |
394 | 391 | ||
392 | EXPORT_SYMBOL(xor_niagara_2); | ||
393 | EXPORT_SYMBOL(xor_niagara_3); | ||
394 | EXPORT_SYMBOL(xor_niagara_4); | ||
395 | EXPORT_SYMBOL(xor_niagara_5); | ||
396 | |||
395 | EXPORT_SYMBOL(prom_palette); | 397 | EXPORT_SYMBOL(prom_palette); |
diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S new file mode 100644 index 000000000000..b49a68bdda43 --- /dev/null +++ b/arch/sparc64/kernel/sun4v_ivec.S | |||
@@ -0,0 +1,334 @@ | |||
1 | /* sun4v_ivec.S: Sun4v interrupt vector handling. | ||
2 | * | ||
3 | * Copyright (C) 2006 <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <asm/cpudata.h> | ||
7 | #include <asm/intr_queue.h> | ||
8 | |||
9 | .text | ||
10 | .align 32 | ||
11 | |||
12 | sun4v_cpu_mondo: | ||
13 | /* Head offset in %g2, tail offset in %g4. | ||
14 | * If they are the same, no work. | ||
15 | */ | ||
16 | mov INTRQ_CPU_MONDO_HEAD, %g2 | ||
17 | ldxa [%g2] ASI_QUEUE, %g2 | ||
18 | mov INTRQ_CPU_MONDO_TAIL, %g4 | ||
19 | ldxa [%g4] ASI_QUEUE, %g4 | ||
20 | cmp %g2, %g4 | ||
21 | be,pn %xcc, sun4v_cpu_mondo_queue_empty | ||
22 | nop | ||
23 | |||
24 | /* Get &trap_block[smp_processor_id()] into %g3. */ | ||
25 | ldxa [%g0] ASI_SCRATCHPAD, %g3 | ||
26 | sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 | ||
27 | |||
28 | /* Get CPU mondo queue base phys address into %g7. */ | ||
29 | ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 | ||
30 | |||
31 | /* Now get the cross-call arguments and handler PC, same | ||
32 | * layout as sun4u: | ||
33 | * | ||
34 | * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it | ||
35 | * high half is context arg to MMU flushes, into %g5 | ||
36 | * 2nd 64-bit word: 64-bit arg, load into %g1 | ||
37 | * 3rd 64-bit word: 64-bit arg, load into %g7 | ||
38 | */ | ||
39 | ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3 | ||
40 | add %g2, 0x8, %g2 | ||
41 | srlx %g3, 32, %g5 | ||
42 | ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 | ||
43 | add %g2, 0x8, %g2 | ||
44 | srl %g3, 0, %g3 | ||
45 | ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7 | ||
46 | add %g2, 0x40 - 0x8 - 0x8, %g2 | ||
47 | |||
48 | /* Update queue head pointer. */ | ||
49 | sethi %hi(8192 - 1), %g4 | ||
50 | or %g4, %lo(8192 - 1), %g4 | ||
51 | and %g2, %g4, %g2 | ||
52 | |||
53 | mov INTRQ_CPU_MONDO_HEAD, %g4 | ||
54 | stxa %g2, [%g4] ASI_QUEUE | ||
55 | membar #Sync | ||
56 | |||
57 | jmpl %g3, %g0 | ||
58 | nop | ||
59 | |||
60 | sun4v_cpu_mondo_queue_empty: | ||
61 | retry | ||
62 | |||
63 | sun4v_dev_mondo: | ||
64 | /* Head offset in %g2, tail offset in %g4. */ | ||
65 | mov INTRQ_DEVICE_MONDO_HEAD, %g2 | ||
66 | ldxa [%g2] ASI_QUEUE, %g2 | ||
67 | mov INTRQ_DEVICE_MONDO_TAIL, %g4 | ||
68 | ldxa [%g4] ASI_QUEUE, %g4 | ||
69 | cmp %g2, %g4 | ||
70 | be,pn %xcc, sun4v_dev_mondo_queue_empty | ||
71 | nop | ||
72 | |||
73 | /* Get &trap_block[smp_processor_id()] into %g3. */ | ||
74 | ldxa [%g0] ASI_SCRATCHPAD, %g3 | ||
75 | sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 | ||
76 | |||
77 | /* Get DEV mondo queue base phys address into %g5. */ | ||
78 | ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5 | ||
79 | |||
80 | /* Load IVEC into %g3. */ | ||
81 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
82 | add %g2, 0x40, %g2 | ||
83 | |||
84 | /* XXX There can be a full 64-byte block of data here. | ||
85 | * XXX This is how we can get at MSI vector data. | ||
86 | * XXX Current we do not capture this, but when we do we'll | ||
87 | * XXX need to add a 64-byte storage area in the struct ino_bucket | ||
88 | * XXX or the struct irq_desc. | ||
89 | */ | ||
90 | |||
91 | /* Update queue head pointer, this frees up some registers. */ | ||
92 | sethi %hi(8192 - 1), %g4 | ||
93 | or %g4, %lo(8192 - 1), %g4 | ||
94 | and %g2, %g4, %g2 | ||
95 | |||
96 | mov INTRQ_DEVICE_MONDO_HEAD, %g4 | ||
97 | stxa %g2, [%g4] ASI_QUEUE | ||
98 | membar #Sync | ||
99 | |||
100 | /* Get &__irq_work[smp_processor_id()] into %g1. */ | ||
101 | TRAP_LOAD_IRQ_WORK(%g1, %g4) | ||
102 | |||
103 | /* Get &ivector_table[IVEC] into %g4. */ | ||
104 | sethi %hi(ivector_table), %g4 | ||
105 | sllx %g3, 5, %g3 | ||
106 | or %g4, %lo(ivector_table), %g4 | ||
107 | add %g4, %g3, %g4 | ||
108 | |||
109 | /* Load IRQ %pil into %g5. */ | ||
110 | ldub [%g4 + 0x04], %g5 | ||
111 | |||
112 | /* Insert ivector_table[] entry into __irq_work[] queue. */ | ||
113 | sllx %g5, 2, %g3 | ||
114 | lduw [%g1 + %g3], %g2 /* g2 = irq_work(cpu, pil) */ | ||
115 | stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */ | ||
116 | stw %g4, [%g1 + %g3] /* irq_work(cpu, pil) = bucket */ | ||
117 | |||
118 | /* Signal the interrupt by setting (1 << pil) in %softint. */ | ||
119 | mov 1, %g2 | ||
120 | sllx %g2, %g5, %g2 | ||
121 | wr %g2, 0x0, %set_softint | ||
122 | |||
123 | sun4v_dev_mondo_queue_empty: | ||
124 | retry | ||
125 | |||
126 | sun4v_res_mondo: | ||
127 | /* Head offset in %g2, tail offset in %g4. */ | ||
128 | mov INTRQ_RESUM_MONDO_HEAD, %g2 | ||
129 | ldxa [%g2] ASI_QUEUE, %g2 | ||
130 | mov INTRQ_RESUM_MONDO_TAIL, %g4 | ||
131 | ldxa [%g4] ASI_QUEUE, %g4 | ||
132 | cmp %g2, %g4 | ||
133 | be,pn %xcc, sun4v_res_mondo_queue_empty | ||
134 | nop | ||
135 | |||
136 | /* Get &trap_block[smp_processor_id()] into %g3. */ | ||
137 | ldxa [%g0] ASI_SCRATCHPAD, %g3 | ||
138 | sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 | ||
139 | |||
140 | /* Get RES mondo queue base phys address into %g5. */ | ||
141 | ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5 | ||
142 | |||
143 | /* Get RES kernel buffer base phys address into %g7. */ | ||
144 | ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7 | ||
145 | |||
146 | /* If the first word is non-zero, queue is full. */ | ||
147 | ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 | ||
148 | brnz,pn %g1, sun4v_res_mondo_queue_full | ||
149 | nop | ||
150 | |||
151 | /* Remember this entry's offset in %g1. */ | ||
152 | mov %g2, %g1 | ||
153 | |||
154 | /* Copy 64-byte queue entry into kernel buffer. */ | ||
155 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
156 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
157 | add %g2, 0x08, %g2 | ||
158 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
159 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
160 | add %g2, 0x08, %g2 | ||
161 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
162 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
163 | add %g2, 0x08, %g2 | ||
164 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
165 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
166 | add %g2, 0x08, %g2 | ||
167 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
168 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
169 | add %g2, 0x08, %g2 | ||
170 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
171 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
172 | add %g2, 0x08, %g2 | ||
173 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
174 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
175 | add %g2, 0x08, %g2 | ||
176 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
177 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
178 | add %g2, 0x08, %g2 | ||
179 | |||
180 | /* Update queue head pointer. */ | ||
181 | sethi %hi(8192 - 1), %g4 | ||
182 | or %g4, %lo(8192 - 1), %g4 | ||
183 | and %g2, %g4, %g2 | ||
184 | |||
185 | mov INTRQ_RESUM_MONDO_HEAD, %g4 | ||
186 | stxa %g2, [%g4] ASI_QUEUE | ||
187 | membar #Sync | ||
188 | |||
189 | /* Disable interrupts and save register state so we can call | ||
190 | * C code. The etrap handling will leave %g4 in %l4 for us | ||
191 | * when it's done. | ||
192 | */ | ||
193 | rdpr %pil, %g2 | ||
194 | wrpr %g0, 15, %pil | ||
195 | mov %g1, %g4 | ||
196 | ba,pt %xcc, etrap_irq | ||
197 | rd %pc, %g7 | ||
198 | |||
199 | /* Log the event. */ | ||
200 | add %sp, PTREGS_OFF, %o0 | ||
201 | call sun4v_resum_error | ||
202 | mov %l4, %o1 | ||
203 | |||
204 | /* Return from trap. */ | ||
205 | ba,pt %xcc, rtrap_irq | ||
206 | nop | ||
207 | |||
208 | sun4v_res_mondo_queue_empty: | ||
209 | retry | ||
210 | |||
211 | sun4v_res_mondo_queue_full: | ||
212 | /* The queue is full, consolidate our damage by setting | ||
213 | * the head equal to the tail. We'll just trap again otherwise. | ||
214 | * Call C code to log the event. | ||
215 | */ | ||
216 | mov INTRQ_RESUM_MONDO_HEAD, %g2 | ||
217 | stxa %g4, [%g2] ASI_QUEUE | ||
218 | membar #Sync | ||
219 | |||
220 | rdpr %pil, %g2 | ||
221 | wrpr %g0, 15, %pil | ||
222 | ba,pt %xcc, etrap_irq | ||
223 | rd %pc, %g7 | ||
224 | |||
225 | call sun4v_resum_overflow | ||
226 | add %sp, PTREGS_OFF, %o0 | ||
227 | |||
228 | ba,pt %xcc, rtrap_irq | ||
229 | nop | ||
230 | |||
231 | sun4v_nonres_mondo: | ||
232 | /* Head offset in %g2, tail offset in %g4. */ | ||
233 | mov INTRQ_NONRESUM_MONDO_HEAD, %g2 | ||
234 | ldxa [%g2] ASI_QUEUE, %g2 | ||
235 | mov INTRQ_NONRESUM_MONDO_TAIL, %g4 | ||
236 | ldxa [%g4] ASI_QUEUE, %g4 | ||
237 | cmp %g2, %g4 | ||
238 | be,pn %xcc, sun4v_nonres_mondo_queue_empty | ||
239 | nop | ||
240 | |||
241 | /* Get &trap_block[smp_processor_id()] into %g3. */ | ||
242 | ldxa [%g0] ASI_SCRATCHPAD, %g3 | ||
243 | sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 | ||
244 | |||
245 | /* Get RES mondo queue base phys address into %g5. */ | ||
246 | ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5 | ||
247 | |||
248 | /* Get RES kernel buffer base phys address into %g7. */ | ||
249 | ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7 | ||
250 | |||
251 | /* If the first word is non-zero, queue is full. */ | ||
252 | ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 | ||
253 | brnz,pn %g1, sun4v_nonres_mondo_queue_full | ||
254 | nop | ||
255 | |||
256 | /* Remember this entry's offset in %g1. */ | ||
257 | mov %g2, %g1 | ||
258 | |||
259 | /* Copy 64-byte queue entry into kernel buffer. */ | ||
260 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
261 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
262 | add %g2, 0x08, %g2 | ||
263 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
264 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
265 | add %g2, 0x08, %g2 | ||
266 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
267 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
268 | add %g2, 0x08, %g2 | ||
269 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
270 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
271 | add %g2, 0x08, %g2 | ||
272 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
273 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
274 | add %g2, 0x08, %g2 | ||
275 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
276 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
277 | add %g2, 0x08, %g2 | ||
278 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
279 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
280 | add %g2, 0x08, %g2 | ||
281 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
282 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
283 | add %g2, 0x08, %g2 | ||
284 | |||
285 | /* Update queue head pointer. */ | ||
286 | sethi %hi(8192 - 1), %g4 | ||
287 | or %g4, %lo(8192 - 1), %g4 | ||
288 | and %g2, %g4, %g2 | ||
289 | |||
290 | mov INTRQ_NONRESUM_MONDO_HEAD, %g4 | ||
291 | stxa %g2, [%g4] ASI_QUEUE | ||
292 | membar #Sync | ||
293 | |||
294 | /* Disable interrupts and save register state so we can call | ||
295 | * C code. The etrap handling will leave %g4 in %l4 for us | ||
296 | * when it's done. | ||
297 | */ | ||
298 | rdpr %pil, %g2 | ||
299 | wrpr %g0, 15, %pil | ||
300 | mov %g1, %g4 | ||
301 | ba,pt %xcc, etrap_irq | ||
302 | rd %pc, %g7 | ||
303 | |||
304 | /* Log the event. */ | ||
305 | add %sp, PTREGS_OFF, %o0 | ||
306 | call sun4v_nonresum_error | ||
307 | mov %l4, %o1 | ||
308 | |||
309 | /* Return from trap. */ | ||
310 | ba,pt %xcc, rtrap_irq | ||
311 | nop | ||
312 | |||
313 | sun4v_nonres_mondo_queue_empty: | ||
314 | retry | ||
315 | |||
316 | sun4v_nonres_mondo_queue_full: | ||
317 | /* The queue is full, consolidate our damage by setting | ||
318 | * the head equal to the tail. We'll just trap again otherwise. | ||
319 | * Call C code to log the event. | ||
320 | */ | ||
321 | mov INTRQ_NONRESUM_MONDO_HEAD, %g2 | ||
322 | stxa %g4, [%g2] ASI_QUEUE | ||
323 | membar #Sync | ||
324 | |||
325 | rdpr %pil, %g2 | ||
326 | wrpr %g0, 15, %pil | ||
327 | ba,pt %xcc, etrap_irq | ||
328 | rd %pc, %g7 | ||
329 | |||
330 | call sun4v_nonresum_overflow | ||
331 | add %sp, PTREGS_OFF, %o0 | ||
332 | |||
333 | ba,pt %xcc, rtrap_irq | ||
334 | nop | ||
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S new file mode 100644 index 000000000000..ab23ddb7116e --- /dev/null +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S | |||
@@ -0,0 +1,421 @@ | |||
1 | /* sun4v_tlb_miss.S: Sun4v TLB miss handlers. | ||
2 | * | ||
3 | * Copyright (C) 2006 <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | .text | ||
7 | .align 32 | ||
8 | |||
9 | /* Load ITLB fault information into VADDR and CTX, using BASE. */ | ||
10 | #define LOAD_ITLB_INFO(BASE, VADDR, CTX) \ | ||
11 | ldx [BASE + HV_FAULT_I_ADDR_OFFSET], VADDR; \ | ||
12 | ldx [BASE + HV_FAULT_I_CTX_OFFSET], CTX; | ||
13 | |||
14 | /* Load DTLB fault information into VADDR and CTX, using BASE. */ | ||
15 | #define LOAD_DTLB_INFO(BASE, VADDR, CTX) \ | ||
16 | ldx [BASE + HV_FAULT_D_ADDR_OFFSET], VADDR; \ | ||
17 | ldx [BASE + HV_FAULT_D_CTX_OFFSET], CTX; | ||
18 | |||
19 | /* DEST = (VADDR >> 22) | ||
20 | * | ||
21 | * Branch to ZERO_CTX_LABEL if context is zero. | ||
22 | */ | ||
23 | #define COMPUTE_TAG_TARGET(DEST, VADDR, CTX, ZERO_CTX_LABEL) \ | ||
24 | srlx VADDR, 22, DEST; \ | ||
25 | brz,pn CTX, ZERO_CTX_LABEL; \ | ||
26 | nop; | ||
27 | |||
28 | /* Create TSB pointer. This is something like: | ||
29 | * | ||
30 | * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; | ||
31 | * tsb_base = tsb_reg & ~0x7UL; | ||
32 | * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask); | ||
33 | * tsb_ptr = tsb_base + (tsb_index * 16); | ||
34 | */ | ||
35 | #define COMPUTE_TSB_PTR(TSB_PTR, VADDR, TMP1, TMP2) \ | ||
36 | and TSB_PTR, 0x7, TMP1; \ | ||
37 | mov 512, TMP2; \ | ||
38 | andn TSB_PTR, 0x7, TSB_PTR; \ | ||
39 | sllx TMP2, TMP1, TMP2; \ | ||
40 | srlx VADDR, PAGE_SHIFT, TMP1; \ | ||
41 | sub TMP2, 1, TMP2; \ | ||
42 | and TMP1, TMP2, TMP1; \ | ||
43 | sllx TMP1, 4, TMP1; \ | ||
44 | add TSB_PTR, TMP1, TSB_PTR; | ||
45 | |||
46 | sun4v_itlb_miss: | ||
47 | /* Load MMU Miss base into %g2. */ | ||
48 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
49 | |||
50 | /* Load UTSB reg into %g1. */ | ||
51 | mov SCRATCHPAD_UTSBREG1, %g1 | ||
52 | ldxa [%g1] ASI_SCRATCHPAD, %g1 | ||
53 | |||
54 | LOAD_ITLB_INFO(%g2, %g4, %g5) | ||
55 | COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v) | ||
56 | COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7) | ||
57 | |||
58 | /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ | ||
59 | ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 | ||
60 | cmp %g2, %g6 | ||
61 | bne,a,pn %xcc, tsb_miss_page_table_walk | ||
62 | mov FAULT_CODE_ITLB, %g3 | ||
63 | andcc %g3, _PAGE_EXEC_4V, %g0 | ||
64 | be,a,pn %xcc, tsb_do_fault | ||
65 | mov FAULT_CODE_ITLB, %g3 | ||
66 | |||
67 | /* We have a valid entry, make hypervisor call to load | ||
68 | * I-TLB and return from trap. | ||
69 | * | ||
70 | * %g3: PTE | ||
71 | * %g4: vaddr | ||
72 | */ | ||
73 | sun4v_itlb_load: | ||
74 | ldxa [%g0] ASI_SCRATCHPAD, %g6 | ||
75 | mov %o0, %g1 ! save %o0 | ||
76 | mov %o1, %g2 ! save %o1 | ||
77 | mov %o2, %g5 ! save %o2 | ||
78 | mov %o3, %g7 ! save %o3 | ||
79 | mov %g4, %o0 ! vaddr | ||
80 | ldx [%g6 + HV_FAULT_I_CTX_OFFSET], %o1 ! ctx | ||
81 | mov %g3, %o2 ! PTE | ||
82 | mov HV_MMU_IMMU, %o3 ! flags | ||
83 | ta HV_MMU_MAP_ADDR_TRAP | ||
84 | brnz,pn %o0, sun4v_itlb_error | ||
85 | mov %g2, %o1 ! restore %o1 | ||
86 | mov %g1, %o0 ! restore %o0 | ||
87 | mov %g5, %o2 ! restore %o2 | ||
88 | mov %g7, %o3 ! restore %o3 | ||
89 | |||
90 | retry | ||
91 | |||
92 | sun4v_dtlb_miss: | ||
93 | /* Load MMU Miss base into %g2. */ | ||
94 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
95 | |||
96 | /* Load UTSB reg into %g1. */ | ||
97 | mov SCRATCHPAD_UTSBREG1, %g1 | ||
98 | ldxa [%g1] ASI_SCRATCHPAD, %g1 | ||
99 | |||
100 | LOAD_DTLB_INFO(%g2, %g4, %g5) | ||
101 | COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v) | ||
102 | COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7) | ||
103 | |||
104 | /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ | ||
105 | ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 | ||
106 | cmp %g2, %g6 | ||
107 | bne,a,pn %xcc, tsb_miss_page_table_walk | ||
108 | mov FAULT_CODE_DTLB, %g3 | ||
109 | |||
110 | /* We have a valid entry, make hypervisor call to load | ||
111 | * D-TLB and return from trap. | ||
112 | * | ||
113 | * %g3: PTE | ||
114 | * %g4: vaddr | ||
115 | */ | ||
116 | sun4v_dtlb_load: | ||
117 | ldxa [%g0] ASI_SCRATCHPAD, %g6 | ||
118 | mov %o0, %g1 ! save %o0 | ||
119 | mov %o1, %g2 ! save %o1 | ||
120 | mov %o2, %g5 ! save %o2 | ||
121 | mov %o3, %g7 ! save %o3 | ||
122 | mov %g4, %o0 ! vaddr | ||
123 | ldx [%g6 + HV_FAULT_D_CTX_OFFSET], %o1 ! ctx | ||
124 | mov %g3, %o2 ! PTE | ||
125 | mov HV_MMU_DMMU, %o3 ! flags | ||
126 | ta HV_MMU_MAP_ADDR_TRAP | ||
127 | brnz,pn %o0, sun4v_dtlb_error | ||
128 | mov %g2, %o1 ! restore %o1 | ||
129 | mov %g1, %o0 ! restore %o0 | ||
130 | mov %g5, %o2 ! restore %o2 | ||
131 | mov %g7, %o3 ! restore %o3 | ||
132 | |||
133 | retry | ||
134 | |||
135 | sun4v_dtlb_prot: | ||
136 | SET_GL(1) | ||
137 | |||
138 | /* Load MMU Miss base into %g5. */ | ||
139 | ldxa [%g0] ASI_SCRATCHPAD, %g5 | ||
140 | |||
141 | ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5 | ||
142 | rdpr %tl, %g1 | ||
143 | cmp %g1, 1 | ||
144 | bgu,pn %xcc, winfix_trampoline | ||
145 | nop | ||
146 | ba,pt %xcc, sparc64_realfault_common | ||
147 | mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 | ||
148 | |||
149 | /* Called from trap table: | ||
150 | * %g4: vaddr | ||
151 | * %g5: context | ||
152 | * %g6: TAG TARGET | ||
153 | */ | ||
154 | sun4v_itsb_miss: | ||
155 | mov SCRATCHPAD_UTSBREG1, %g1 | ||
156 | ldxa [%g1] ASI_SCRATCHPAD, %g1 | ||
157 | brz,pn %g5, kvmap_itlb_4v | ||
158 | mov FAULT_CODE_ITLB, %g3 | ||
159 | ba,a,pt %xcc, sun4v_tsb_miss_common | ||
160 | |||
161 | /* Called from trap table: | ||
162 | * %g4: vaddr | ||
163 | * %g5: context | ||
164 | * %g6: TAG TARGET | ||
165 | */ | ||
166 | sun4v_dtsb_miss: | ||
167 | mov SCRATCHPAD_UTSBREG1, %g1 | ||
168 | ldxa [%g1] ASI_SCRATCHPAD, %g1 | ||
169 | brz,pn %g5, kvmap_dtlb_4v | ||
170 | mov FAULT_CODE_DTLB, %g3 | ||
171 | |||
172 | /* fallthrough */ | ||
173 | |||
174 | /* Create TSB pointer into %g1. This is something like: | ||
175 | * | ||
176 | * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; | ||
177 | * tsb_base = tsb_reg & ~0x7UL; | ||
178 | * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask); | ||
179 | * tsb_ptr = tsb_base + (tsb_index * 16); | ||
180 | */ | ||
181 | sun4v_tsb_miss_common: | ||
182 | COMPUTE_TSB_PTR(%g1, %g4, %g5, %g7) | ||
183 | |||
184 | /* Branch directly to page table lookup. We have SCRATCHPAD_MMU_MISS | ||
185 | * still in %g2, so it's quite trivial to get at the PGD PHYS value | ||
186 | * so we can preload it into %g7. | ||
187 | */ | ||
188 | sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2 | ||
189 | ba,pt %xcc, tsb_miss_page_table_walk_sun4v_fastpath | ||
190 | ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7 | ||
191 | |||
192 | sun4v_itlb_error: | ||
193 | sethi %hi(sun4v_err_itlb_vaddr), %g1 | ||
194 | stx %g4, [%g1 + %lo(sun4v_err_itlb_vaddr)] | ||
195 | sethi %hi(sun4v_err_itlb_ctx), %g1 | ||
196 | ldxa [%g0] ASI_SCRATCHPAD, %g6 | ||
197 | ldx [%g6 + HV_FAULT_I_CTX_OFFSET], %o1 | ||
198 | stx %o1, [%g1 + %lo(sun4v_err_itlb_ctx)] | ||
199 | sethi %hi(sun4v_err_itlb_pte), %g1 | ||
200 | stx %g3, [%g1 + %lo(sun4v_err_itlb_pte)] | ||
201 | sethi %hi(sun4v_err_itlb_error), %g1 | ||
202 | stx %o0, [%g1 + %lo(sun4v_err_itlb_error)] | ||
203 | |||
204 | rdpr %tl, %g4 | ||
205 | cmp %g4, 1 | ||
206 | ble,pt %icc, 1f | ||
207 | sethi %hi(2f), %g7 | ||
208 | ba,pt %xcc, etraptl1 | ||
209 | or %g7, %lo(2f), %g7 | ||
210 | |||
211 | 1: ba,pt %xcc, etrap | ||
212 | 2: or %g7, %lo(2b), %g7 | ||
213 | call sun4v_itlb_error_report | ||
214 | add %sp, PTREGS_OFF, %o0 | ||
215 | |||
216 | /* NOTREACHED */ | ||
217 | |||
218 | sun4v_dtlb_error: | ||
219 | sethi %hi(sun4v_err_dtlb_vaddr), %g1 | ||
220 | stx %g4, [%g1 + %lo(sun4v_err_dtlb_vaddr)] | ||
221 | sethi %hi(sun4v_err_dtlb_ctx), %g1 | ||
222 | ldxa [%g0] ASI_SCRATCHPAD, %g6 | ||
223 | ldx [%g6 + HV_FAULT_D_CTX_OFFSET], %o1 | ||
224 | stx %o1, [%g1 + %lo(sun4v_err_dtlb_ctx)] | ||
225 | sethi %hi(sun4v_err_dtlb_pte), %g1 | ||
226 | stx %g3, [%g1 + %lo(sun4v_err_dtlb_pte)] | ||
227 | sethi %hi(sun4v_err_dtlb_error), %g1 | ||
228 | stx %o0, [%g1 + %lo(sun4v_err_dtlb_error)] | ||
229 | |||
230 | rdpr %tl, %g4 | ||
231 | cmp %g4, 1 | ||
232 | ble,pt %icc, 1f | ||
233 | sethi %hi(2f), %g7 | ||
234 | ba,pt %xcc, etraptl1 | ||
235 | or %g7, %lo(2f), %g7 | ||
236 | |||
237 | 1: ba,pt %xcc, etrap | ||
238 | 2: or %g7, %lo(2b), %g7 | ||
239 | call sun4v_dtlb_error_report | ||
240 | add %sp, PTREGS_OFF, %o0 | ||
241 | |||
242 | /* NOTREACHED */ | ||
243 | |||
244 | /* Instruction Access Exception, tl0. */ | ||
245 | sun4v_iacc: | ||
246 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
247 | ldx [%g2 + HV_FAULT_I_TYPE_OFFSET], %g3 | ||
248 | ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4 | ||
249 | ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5 | ||
250 | sllx %g3, 16, %g3 | ||
251 | or %g5, %g3, %g5 | ||
252 | ba,pt %xcc, etrap | ||
253 | rd %pc, %g7 | ||
254 | mov %l4, %o1 | ||
255 | mov %l5, %o2 | ||
256 | call sun4v_insn_access_exception | ||
257 | add %sp, PTREGS_OFF, %o0 | ||
258 | ba,a,pt %xcc, rtrap_clr_l6 | ||
259 | |||
260 | /* Instruction Access Exception, tl1. */ | ||
261 | sun4v_iacc_tl1: | ||
262 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
263 | ldx [%g2 + HV_FAULT_I_TYPE_OFFSET], %g3 | ||
264 | ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4 | ||
265 | ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5 | ||
266 | sllx %g3, 16, %g3 | ||
267 | or %g5, %g3, %g5 | ||
268 | ba,pt %xcc, etraptl1 | ||
269 | rd %pc, %g7 | ||
270 | mov %l4, %o1 | ||
271 | mov %l5, %o2 | ||
272 | call sun4v_insn_access_exception_tl1 | ||
273 | add %sp, PTREGS_OFF, %o0 | ||
274 | ba,a,pt %xcc, rtrap_clr_l6 | ||
275 | |||
276 | /* Data Access Exception, tl0. */ | ||
277 | sun4v_dacc: | ||
278 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
279 | ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 | ||
280 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 | ||
281 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 | ||
282 | sllx %g3, 16, %g3 | ||
283 | or %g5, %g3, %g5 | ||
284 | ba,pt %xcc, etrap | ||
285 | rd %pc, %g7 | ||
286 | mov %l4, %o1 | ||
287 | mov %l5, %o2 | ||
288 | call sun4v_data_access_exception | ||
289 | add %sp, PTREGS_OFF, %o0 | ||
290 | ba,a,pt %xcc, rtrap_clr_l6 | ||
291 | |||
292 | /* Data Access Exception, tl1. */ | ||
293 | sun4v_dacc_tl1: | ||
294 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
295 | ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 | ||
296 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 | ||
297 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 | ||
298 | sllx %g3, 16, %g3 | ||
299 | or %g5, %g3, %g5 | ||
300 | ba,pt %xcc, etraptl1 | ||
301 | rd %pc, %g7 | ||
302 | mov %l4, %o1 | ||
303 | mov %l5, %o2 | ||
304 | call sun4v_data_access_exception_tl1 | ||
305 | add %sp, PTREGS_OFF, %o0 | ||
306 | ba,a,pt %xcc, rtrap_clr_l6 | ||
307 | |||
308 | /* Memory Address Unaligned. */ | ||
309 | sun4v_mna: | ||
310 | /* Window fixup? */ | ||
311 | rdpr %tl, %g2 | ||
312 | cmp %g2, 1 | ||
313 | ble,pt %icc, 1f | ||
314 | nop | ||
315 | |||
316 | SET_GL(1) | ||
317 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
318 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g5 | ||
319 | mov HV_FAULT_TYPE_UNALIGNED, %g3 | ||
320 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g4 | ||
321 | sllx %g3, 16, %g3 | ||
322 | or %g4, %g3, %g4 | ||
323 | ba,pt %xcc, winfix_mna | ||
324 | rdpr %tpc, %g3 | ||
325 | /* not reached */ | ||
326 | |||
327 | 1: ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
328 | mov HV_FAULT_TYPE_UNALIGNED, %g3 | ||
329 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 | ||
330 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 | ||
331 | sllx %g3, 16, %g3 | ||
332 | or %g5, %g3, %g5 | ||
333 | |||
334 | ba,pt %xcc, etrap | ||
335 | rd %pc, %g7 | ||
336 | mov %l4, %o1 | ||
337 | mov %l5, %o2 | ||
338 | call sun4v_do_mna | ||
339 | add %sp, PTREGS_OFF, %o0 | ||
340 | ba,a,pt %xcc, rtrap_clr_l6 | ||
341 | |||
342 | /* Privileged Action. */ | ||
343 | sun4v_privact: | ||
344 | ba,pt %xcc, etrap | ||
345 | rd %pc, %g7 | ||
346 | call do_privact | ||
347 | add %sp, PTREGS_OFF, %o0 | ||
348 | ba,a,pt %xcc, rtrap_clr_l6 | ||
349 | |||
350 | /* Unaligned ldd float, tl0. */ | ||
351 | sun4v_lddfmna: | ||
352 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
353 | ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 | ||
354 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 | ||
355 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 | ||
356 | sllx %g3, 16, %g3 | ||
357 | or %g5, %g3, %g5 | ||
358 | ba,pt %xcc, etrap | ||
359 | rd %pc, %g7 | ||
360 | mov %l4, %o1 | ||
361 | mov %l5, %o2 | ||
362 | call handle_lddfmna | ||
363 | add %sp, PTREGS_OFF, %o0 | ||
364 | ba,a,pt %xcc, rtrap_clr_l6 | ||
365 | |||
366 | /* Unaligned std float, tl0. */ | ||
367 | sun4v_stdfmna: | ||
368 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
369 | ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 | ||
370 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 | ||
371 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 | ||
372 | sllx %g3, 16, %g3 | ||
373 | or %g5, %g3, %g5 | ||
374 | ba,pt %xcc, etrap | ||
375 | rd %pc, %g7 | ||
376 | mov %l4, %o1 | ||
377 | mov %l5, %o2 | ||
378 | call handle_stdfmna | ||
379 | add %sp, PTREGS_OFF, %o0 | ||
380 | ba,a,pt %xcc, rtrap_clr_l6 | ||
381 | |||
382 | #define BRANCH_ALWAYS 0x10680000 | ||
383 | #define NOP 0x01000000 | ||
384 | #define SUN4V_DO_PATCH(OLD, NEW) \ | ||
385 | sethi %hi(NEW), %g1; \ | ||
386 | or %g1, %lo(NEW), %g1; \ | ||
387 | sethi %hi(OLD), %g2; \ | ||
388 | or %g2, %lo(OLD), %g2; \ | ||
389 | sub %g1, %g2, %g1; \ | ||
390 | sethi %hi(BRANCH_ALWAYS), %g3; \ | ||
391 | sll %g1, 11, %g1; \ | ||
392 | srl %g1, 11 + 2, %g1; \ | ||
393 | or %g3, %lo(BRANCH_ALWAYS), %g3; \ | ||
394 | or %g3, %g1, %g3; \ | ||
395 | stw %g3, [%g2]; \ | ||
396 | sethi %hi(NOP), %g3; \ | ||
397 | or %g3, %lo(NOP), %g3; \ | ||
398 | stw %g3, [%g2 + 0x4]; \ | ||
399 | flush %g2; | ||
400 | |||
401 | .globl sun4v_patch_tlb_handlers | ||
402 | .type sun4v_patch_tlb_handlers,#function | ||
403 | sun4v_patch_tlb_handlers: | ||
404 | SUN4V_DO_PATCH(tl0_iamiss, sun4v_itlb_miss) | ||
405 | SUN4V_DO_PATCH(tl1_iamiss, sun4v_itlb_miss) | ||
406 | SUN4V_DO_PATCH(tl0_damiss, sun4v_dtlb_miss) | ||
407 | SUN4V_DO_PATCH(tl1_damiss, sun4v_dtlb_miss) | ||
408 | SUN4V_DO_PATCH(tl0_daprot, sun4v_dtlb_prot) | ||
409 | SUN4V_DO_PATCH(tl1_daprot, sun4v_dtlb_prot) | ||
410 | SUN4V_DO_PATCH(tl0_iax, sun4v_iacc) | ||
411 | SUN4V_DO_PATCH(tl1_iax, sun4v_iacc_tl1) | ||
412 | SUN4V_DO_PATCH(tl0_dax, sun4v_dacc) | ||
413 | SUN4V_DO_PATCH(tl1_dax, sun4v_dacc_tl1) | ||
414 | SUN4V_DO_PATCH(tl0_mna, sun4v_mna) | ||
415 | SUN4V_DO_PATCH(tl1_mna, sun4v_mna) | ||
416 | SUN4V_DO_PATCH(tl0_lddfmna, sun4v_lddfmna) | ||
417 | SUN4V_DO_PATCH(tl0_stdfmna, sun4v_stdfmna) | ||
418 | SUN4V_DO_PATCH(tl0_privact, sun4v_privact) | ||
419 | retl | ||
420 | nop | ||
421 | .size sun4v_patch_tlb_handlers,.-sun4v_patch_tlb_handlers | ||
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c index 5f8c822a2b4a..7a869138c37f 100644 --- a/arch/sparc64/kernel/sys_sparc.c +++ b/arch/sparc64/kernel/sys_sparc.c | |||
@@ -25,25 +25,93 @@ | |||
25 | #include <linux/syscalls.h> | 25 | #include <linux/syscalls.h> |
26 | #include <linux/ipc.h> | 26 | #include <linux/ipc.h> |
27 | #include <linux/personality.h> | 27 | #include <linux/personality.h> |
28 | #include <linux/random.h> | ||
28 | 29 | ||
29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
30 | #include <asm/ipc.h> | 31 | #include <asm/ipc.h> |
31 | #include <asm/utrap.h> | 32 | #include <asm/utrap.h> |
32 | #include <asm/perfctr.h> | 33 | #include <asm/perfctr.h> |
34 | #include <asm/a.out.h> | ||
33 | 35 | ||
34 | /* #define DEBUG_UNIMP_SYSCALL */ | 36 | /* #define DEBUG_UNIMP_SYSCALL */ |
35 | 37 | ||
36 | /* XXX Make this per-binary type, this way we can detect the type of | ||
37 | * XXX a binary. Every Sparc executable calls this very early on. | ||
38 | */ | ||
39 | asmlinkage unsigned long sys_getpagesize(void) | 38 | asmlinkage unsigned long sys_getpagesize(void) |
40 | { | 39 | { |
41 | return PAGE_SIZE; | 40 | return PAGE_SIZE; |
42 | } | 41 | } |
43 | 42 | ||
44 | #define COLOUR_ALIGN(addr,pgoff) \ | 43 | #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL)) |
45 | ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ | 44 | #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL)) |
46 | (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) | 45 | |
46 | /* Does addr --> addr+len fall within 4GB of the VA-space hole or | ||
47 | * overflow past the end of the 64-bit address space? | ||
48 | */ | ||
49 | static inline int invalid_64bit_range(unsigned long addr, unsigned long len) | ||
50 | { | ||
51 | unsigned long va_exclude_start, va_exclude_end; | ||
52 | |||
53 | va_exclude_start = VA_EXCLUDE_START; | ||
54 | va_exclude_end = VA_EXCLUDE_END; | ||
55 | |||
56 | if (unlikely(len >= va_exclude_start)) | ||
57 | return 1; | ||
58 | |||
59 | if (unlikely((addr + len) < addr)) | ||
60 | return 1; | ||
61 | |||
62 | if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) || | ||
63 | ((addr + len) >= va_exclude_start && | ||
64 | (addr + len) < va_exclude_end))) | ||
65 | return 1; | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | /* Does start,end straddle the VA-space hole? */ | ||
71 | static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end) | ||
72 | { | ||
73 | unsigned long va_exclude_start, va_exclude_end; | ||
74 | |||
75 | va_exclude_start = VA_EXCLUDE_START; | ||
76 | va_exclude_end = VA_EXCLUDE_END; | ||
77 | |||
78 | if (likely(start < va_exclude_start && end < va_exclude_start)) | ||
79 | return 0; | ||
80 | |||
81 | if (likely(start >= va_exclude_end && end >= va_exclude_end)) | ||
82 | return 0; | ||
83 | |||
84 | return 1; | ||
85 | } | ||
86 | |||
87 | /* These functions differ from the default implementations in | ||
88 | * mm/mmap.c in two ways: | ||
89 | * | ||
90 | * 1) For file backed MAP_SHARED mmap()'s we D-cache color align, | ||
91 | * for fixed such mappings we just validate what the user gave us. | ||
92 | * 2) For 64-bit tasks we avoid mapping anything within 4GB of | ||
93 | * the spitfire/niagara VA-hole. | ||
94 | */ | ||
95 | |||
96 | static inline unsigned long COLOUR_ALIGN(unsigned long addr, | ||
97 | unsigned long pgoff) | ||
98 | { | ||
99 | unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1); | ||
100 | unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1); | ||
101 | |||
102 | return base + off; | ||
103 | } | ||
104 | |||
105 | static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, | ||
106 | unsigned long pgoff) | ||
107 | { | ||
108 | unsigned long base = addr & ~(SHMLBA-1); | ||
109 | unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1); | ||
110 | |||
111 | if (base + off <= addr) | ||
112 | return base + off; | ||
113 | return base - off; | ||
114 | } | ||
47 | 115 | ||
48 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) | 116 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) |
49 | { | 117 | { |
@@ -64,8 +132,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi | |||
64 | } | 132 | } |
65 | 133 | ||
66 | if (test_thread_flag(TIF_32BIT)) | 134 | if (test_thread_flag(TIF_32BIT)) |
67 | task_size = 0xf0000000UL; | 135 | task_size = STACK_TOP32; |
68 | if (len > task_size || len > -PAGE_OFFSET) | 136 | if (unlikely(len > task_size || len >= VA_EXCLUDE_START)) |
69 | return -ENOMEM; | 137 | return -ENOMEM; |
70 | 138 | ||
71 | do_color_align = 0; | 139 | do_color_align = 0; |
@@ -84,11 +152,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi | |||
84 | return addr; | 152 | return addr; |
85 | } | 153 | } |
86 | 154 | ||
87 | if (len <= mm->cached_hole_size) { | 155 | if (len > mm->cached_hole_size) { |
156 | start_addr = addr = mm->free_area_cache; | ||
157 | } else { | ||
158 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
88 | mm->cached_hole_size = 0; | 159 | mm->cached_hole_size = 0; |
89 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
90 | } | 160 | } |
91 | start_addr = addr = mm->free_area_cache; | ||
92 | 161 | ||
93 | task_size -= len; | 162 | task_size -= len; |
94 | 163 | ||
@@ -100,11 +169,12 @@ full_search: | |||
100 | 169 | ||
101 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | 170 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { |
102 | /* At this point: (!vma || addr < vma->vm_end). */ | 171 | /* At this point: (!vma || addr < vma->vm_end). */ |
103 | if (addr < PAGE_OFFSET && -PAGE_OFFSET - len < addr) { | 172 | if (addr < VA_EXCLUDE_START && |
104 | addr = PAGE_OFFSET; | 173 | (addr + len) >= VA_EXCLUDE_START) { |
105 | vma = find_vma(mm, PAGE_OFFSET); | 174 | addr = VA_EXCLUDE_END; |
175 | vma = find_vma(mm, VA_EXCLUDE_END); | ||
106 | } | 176 | } |
107 | if (task_size < addr) { | 177 | if (unlikely(task_size < addr)) { |
108 | if (start_addr != TASK_UNMAPPED_BASE) { | 178 | if (start_addr != TASK_UNMAPPED_BASE) { |
109 | start_addr = addr = TASK_UNMAPPED_BASE; | 179 | start_addr = addr = TASK_UNMAPPED_BASE; |
110 | mm->cached_hole_size = 0; | 180 | mm->cached_hole_size = 0; |
@@ -112,7 +182,7 @@ full_search: | |||
112 | } | 182 | } |
113 | return -ENOMEM; | 183 | return -ENOMEM; |
114 | } | 184 | } |
115 | if (!vma || addr + len <= vma->vm_start) { | 185 | if (likely(!vma || addr + len <= vma->vm_start)) { |
116 | /* | 186 | /* |
117 | * Remember the place where we stopped the search: | 187 | * Remember the place where we stopped the search: |
118 | */ | 188 | */ |
@@ -128,6 +198,121 @@ full_search: | |||
128 | } | 198 | } |
129 | } | 199 | } |
130 | 200 | ||
201 | unsigned long | ||
202 | arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | ||
203 | const unsigned long len, const unsigned long pgoff, | ||
204 | const unsigned long flags) | ||
205 | { | ||
206 | struct vm_area_struct *vma; | ||
207 | struct mm_struct *mm = current->mm; | ||
208 | unsigned long task_size = STACK_TOP32; | ||
209 | unsigned long addr = addr0; | ||
210 | int do_color_align; | ||
211 | |||
212 | /* This should only ever run for 32-bit processes. */ | ||
213 | BUG_ON(!test_thread_flag(TIF_32BIT)); | ||
214 | |||
215 | if (flags & MAP_FIXED) { | ||
216 | /* We do not accept a shared mapping if it would violate | ||
217 | * cache aliasing constraints. | ||
218 | */ | ||
219 | if ((flags & MAP_SHARED) && | ||
220 | ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) | ||
221 | return -EINVAL; | ||
222 | return addr; | ||
223 | } | ||
224 | |||
225 | if (unlikely(len > task_size)) | ||
226 | return -ENOMEM; | ||
227 | |||
228 | do_color_align = 0; | ||
229 | if (filp || (flags & MAP_SHARED)) | ||
230 | do_color_align = 1; | ||
231 | |||
232 | /* requesting a specific address */ | ||
233 | if (addr) { | ||
234 | if (do_color_align) | ||
235 | addr = COLOUR_ALIGN(addr, pgoff); | ||
236 | else | ||
237 | addr = PAGE_ALIGN(addr); | ||
238 | |||
239 | vma = find_vma(mm, addr); | ||
240 | if (task_size - len >= addr && | ||
241 | (!vma || addr + len <= vma->vm_start)) | ||
242 | return addr; | ||
243 | } | ||
244 | |||
245 | /* check if free_area_cache is useful for us */ | ||
246 | if (len <= mm->cached_hole_size) { | ||
247 | mm->cached_hole_size = 0; | ||
248 | mm->free_area_cache = mm->mmap_base; | ||
249 | } | ||
250 | |||
251 | /* either no address requested or can't fit in requested address hole */ | ||
252 | addr = mm->free_area_cache; | ||
253 | if (do_color_align) { | ||
254 | unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff); | ||
255 | |||
256 | addr = base + len; | ||
257 | } | ||
258 | |||
259 | /* make sure it can fit in the remaining address space */ | ||
260 | if (likely(addr > len)) { | ||
261 | vma = find_vma(mm, addr-len); | ||
262 | if (!vma || addr <= vma->vm_start) { | ||
263 | /* remember the address as a hint for next time */ | ||
264 | return (mm->free_area_cache = addr-len); | ||
265 | } | ||
266 | } | ||
267 | |||
268 | if (unlikely(mm->mmap_base < len)) | ||
269 | goto bottomup; | ||
270 | |||
271 | addr = mm->mmap_base-len; | ||
272 | if (do_color_align) | ||
273 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); | ||
274 | |||
275 | do { | ||
276 | /* | ||
277 | * Lookup failure means no vma is above this address, | ||
278 | * else if new region fits below vma->vm_start, | ||
279 | * return with success: | ||
280 | */ | ||
281 | vma = find_vma(mm, addr); | ||
282 | if (likely(!vma || addr+len <= vma->vm_start)) { | ||
283 | /* remember the address as a hint for next time */ | ||
284 | return (mm->free_area_cache = addr); | ||
285 | } | ||
286 | |||
287 | /* remember the largest hole we saw so far */ | ||
288 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
289 | mm->cached_hole_size = vma->vm_start - addr; | ||
290 | |||
291 | /* try just below the current vma->vm_start */ | ||
292 | addr = vma->vm_start-len; | ||
293 | if (do_color_align) | ||
294 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); | ||
295 | } while (likely(len < vma->vm_start)); | ||
296 | |||
297 | bottomup: | ||
298 | /* | ||
299 | * A failed mmap() very likely causes application failure, | ||
300 | * so fall back to the bottom-up function here. This scenario | ||
301 | * can happen with large stack limits and large mmap() | ||
302 | * allocations. | ||
303 | */ | ||
304 | mm->cached_hole_size = ~0UL; | ||
305 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
306 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); | ||
307 | /* | ||
308 | * Restore the topdown base: | ||
309 | */ | ||
310 | mm->free_area_cache = mm->mmap_base; | ||
311 | mm->cached_hole_size = ~0UL; | ||
312 | |||
313 | return addr; | ||
314 | } | ||
315 | |||
131 | /* Try to align mapping such that we align it as much as possible. */ | 316 | /* Try to align mapping such that we align it as much as possible. */ |
132 | unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) | 317 | unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) |
133 | { | 318 | { |
@@ -171,15 +356,57 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u | |||
171 | return addr; | 356 | return addr; |
172 | } | 357 | } |
173 | 358 | ||
359 | /* Essentially the same as PowerPC... */ | ||
360 | void arch_pick_mmap_layout(struct mm_struct *mm) | ||
361 | { | ||
362 | unsigned long random_factor = 0UL; | ||
363 | |||
364 | if (current->flags & PF_RANDOMIZE) { | ||
365 | random_factor = get_random_int(); | ||
366 | if (test_thread_flag(TIF_32BIT)) | ||
367 | random_factor &= ((1 * 1024 * 1024) - 1); | ||
368 | else | ||
369 | random_factor = ((random_factor << PAGE_SHIFT) & | ||
370 | 0xffffffffUL); | ||
371 | } | ||
372 | |||
373 | /* | ||
374 | * Fall back to the standard layout if the personality | ||
375 | * bit is set, or if the expected stack growth is unlimited: | ||
376 | */ | ||
377 | if (!test_thread_flag(TIF_32BIT) || | ||
378 | (current->personality & ADDR_COMPAT_LAYOUT) || | ||
379 | current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY || | ||
380 | sysctl_legacy_va_layout) { | ||
381 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | ||
382 | mm->get_unmapped_area = arch_get_unmapped_area; | ||
383 | mm->unmap_area = arch_unmap_area; | ||
384 | } else { | ||
385 | /* We know it's 32-bit */ | ||
386 | unsigned long task_size = STACK_TOP32; | ||
387 | unsigned long gap; | ||
388 | |||
389 | gap = current->signal->rlim[RLIMIT_STACK].rlim_cur; | ||
390 | if (gap < 128 * 1024 * 1024) | ||
391 | gap = 128 * 1024 * 1024; | ||
392 | if (gap > (task_size / 6 * 5)) | ||
393 | gap = (task_size / 6 * 5); | ||
394 | |||
395 | mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); | ||
396 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | ||
397 | mm->unmap_area = arch_unmap_area_topdown; | ||
398 | } | ||
399 | } | ||
400 | |||
174 | asmlinkage unsigned long sparc_brk(unsigned long brk) | 401 | asmlinkage unsigned long sparc_brk(unsigned long brk) |
175 | { | 402 | { |
176 | /* People could try to be nasty and use ta 0x6d in 32bit programs */ | 403 | /* People could try to be nasty and use ta 0x6d in 32bit programs */ |
177 | if (test_thread_flag(TIF_32BIT) && | 404 | if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32) |
178 | brk >= 0xf0000000UL) | ||
179 | return current->mm->brk; | 405 | return current->mm->brk; |
180 | 406 | ||
181 | if ((current->mm->brk & PAGE_OFFSET) != (brk & PAGE_OFFSET)) | 407 | if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk))) |
182 | return current->mm->brk; | 408 | return current->mm->brk; |
409 | |||
183 | return sys_brk(brk); | 410 | return sys_brk(brk); |
184 | } | 411 | } |
185 | 412 | ||
@@ -340,13 +567,16 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, | |||
340 | retval = -EINVAL; | 567 | retval = -EINVAL; |
341 | 568 | ||
342 | if (test_thread_flag(TIF_32BIT)) { | 569 | if (test_thread_flag(TIF_32BIT)) { |
343 | if (len > 0xf0000000UL || | 570 | if (len >= STACK_TOP32) |
344 | ((flags & MAP_FIXED) && addr > 0xf0000000UL - len)) | 571 | goto out_putf; |
572 | |||
573 | if ((flags & MAP_FIXED) && addr > STACK_TOP32 - len) | ||
345 | goto out_putf; | 574 | goto out_putf; |
346 | } else { | 575 | } else { |
347 | if (len > -PAGE_OFFSET || | 576 | if (len >= VA_EXCLUDE_START) |
348 | ((flags & MAP_FIXED) && | 577 | goto out_putf; |
349 | addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET)) | 578 | |
579 | if ((flags & MAP_FIXED) && invalid_64bit_range(addr, len)) | ||
350 | goto out_putf; | 580 | goto out_putf; |
351 | } | 581 | } |
352 | 582 | ||
@@ -365,9 +595,9 @@ asmlinkage long sys64_munmap(unsigned long addr, size_t len) | |||
365 | { | 595 | { |
366 | long ret; | 596 | long ret; |
367 | 597 | ||
368 | if (len > -PAGE_OFFSET || | 598 | if (invalid_64bit_range(addr, len)) |
369 | (addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET)) | ||
370 | return -EINVAL; | 599 | return -EINVAL; |
600 | |||
371 | down_write(¤t->mm->mmap_sem); | 601 | down_write(¤t->mm->mmap_sem); |
372 | ret = do_munmap(current->mm, addr, len); | 602 | ret = do_munmap(current->mm, addr, len); |
373 | up_write(¤t->mm->mmap_sem); | 603 | up_write(¤t->mm->mmap_sem); |
@@ -384,18 +614,19 @@ asmlinkage unsigned long sys64_mremap(unsigned long addr, | |||
384 | { | 614 | { |
385 | struct vm_area_struct *vma; | 615 | struct vm_area_struct *vma; |
386 | unsigned long ret = -EINVAL; | 616 | unsigned long ret = -EINVAL; |
617 | |||
387 | if (test_thread_flag(TIF_32BIT)) | 618 | if (test_thread_flag(TIF_32BIT)) |
388 | goto out; | 619 | goto out; |
389 | if (old_len > -PAGE_OFFSET || new_len > -PAGE_OFFSET) | 620 | if (unlikely(new_len >= VA_EXCLUDE_START)) |
390 | goto out; | 621 | goto out; |
391 | if (addr < PAGE_OFFSET && addr + old_len > -PAGE_OFFSET) | 622 | if (unlikely(invalid_64bit_range(addr, old_len))) |
392 | goto out; | 623 | goto out; |
624 | |||
393 | down_write(¤t->mm->mmap_sem); | 625 | down_write(¤t->mm->mmap_sem); |
394 | if (flags & MREMAP_FIXED) { | 626 | if (flags & MREMAP_FIXED) { |
395 | if (new_addr < PAGE_OFFSET && | 627 | if (invalid_64bit_range(new_addr, new_len)) |
396 | new_addr + new_len > -PAGE_OFFSET) | ||
397 | goto out_sem; | 628 | goto out_sem; |
398 | } else if (addr < PAGE_OFFSET && addr + new_len > -PAGE_OFFSET) { | 629 | } else if (invalid_64bit_range(addr, new_len)) { |
399 | unsigned long map_flags = 0; | 630 | unsigned long map_flags = 0; |
400 | struct file *file = NULL; | 631 | struct file *file = NULL; |
401 | 632 | ||
@@ -554,12 +785,10 @@ asmlinkage long sys_utrap_install(utrap_entry_t type, | |||
554 | } | 785 | } |
555 | if (!current_thread_info()->utraps) { | 786 | if (!current_thread_info()->utraps) { |
556 | current_thread_info()->utraps = | 787 | current_thread_info()->utraps = |
557 | kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL); | 788 | kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL); |
558 | if (!current_thread_info()->utraps) | 789 | if (!current_thread_info()->utraps) |
559 | return -ENOMEM; | 790 | return -ENOMEM; |
560 | current_thread_info()->utraps[0] = 1; | 791 | current_thread_info()->utraps[0] = 1; |
561 | memset(current_thread_info()->utraps+1, 0, | ||
562 | UT_TRAP_INSTRUCTION_31*sizeof(long)); | ||
563 | } else { | 792 | } else { |
564 | if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p && | 793 | if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p && |
565 | current_thread_info()->utraps[0] > 1) { | 794 | current_thread_info()->utraps[0] > 1) { |
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c index 417727bd87ba..0e41df024489 100644 --- a/arch/sparc64/kernel/sys_sparc32.c +++ b/arch/sparc64/kernel/sys_sparc32.c | |||
@@ -62,6 +62,7 @@ | |||
62 | #include <asm/fpumacro.h> | 62 | #include <asm/fpumacro.h> |
63 | #include <asm/semaphore.h> | 63 | #include <asm/semaphore.h> |
64 | #include <asm/mmu_context.h> | 64 | #include <asm/mmu_context.h> |
65 | #include <asm/a.out.h> | ||
65 | 66 | ||
66 | asmlinkage long sys32_chown16(const char __user * filename, u16 user, u16 group) | 67 | asmlinkage long sys32_chown16(const char __user * filename, u16 user, u16 group) |
67 | { | 68 | { |
@@ -1039,15 +1040,15 @@ asmlinkage unsigned long sys32_mremap(unsigned long addr, | |||
1039 | unsigned long ret = -EINVAL; | 1040 | unsigned long ret = -EINVAL; |
1040 | unsigned long new_addr = __new_addr; | 1041 | unsigned long new_addr = __new_addr; |
1041 | 1042 | ||
1042 | if (old_len > 0xf0000000UL || new_len > 0xf0000000UL) | 1043 | if (old_len > STACK_TOP32 || new_len > STACK_TOP32) |
1043 | goto out; | 1044 | goto out; |
1044 | if (addr > 0xf0000000UL - old_len) | 1045 | if (addr > STACK_TOP32 - old_len) |
1045 | goto out; | 1046 | goto out; |
1046 | down_write(¤t->mm->mmap_sem); | 1047 | down_write(¤t->mm->mmap_sem); |
1047 | if (flags & MREMAP_FIXED) { | 1048 | if (flags & MREMAP_FIXED) { |
1048 | if (new_addr > 0xf0000000UL - new_len) | 1049 | if (new_addr > STACK_TOP32 - new_len) |
1049 | goto out_sem; | 1050 | goto out_sem; |
1050 | } else if (addr > 0xf0000000UL - new_len) { | 1051 | } else if (addr > STACK_TOP32 - new_len) { |
1051 | unsigned long map_flags = 0; | 1052 | unsigned long map_flags = 0; |
1052 | struct file *file = NULL; | 1053 | struct file *file = NULL; |
1053 | 1054 | ||
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index a22930d62adf..7d61f1bfd3d3 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include <linux/cpufreq.h> | 30 | #include <linux/cpufreq.h> |
31 | #include <linux/percpu.h> | 31 | #include <linux/percpu.h> |
32 | #include <linux/profile.h> | 32 | #include <linux/profile.h> |
33 | #include <linux/miscdevice.h> | ||
34 | #include <linux/rtc.h> | ||
33 | 35 | ||
34 | #include <asm/oplib.h> | 36 | #include <asm/oplib.h> |
35 | #include <asm/mostek.h> | 37 | #include <asm/mostek.h> |
@@ -45,6 +47,7 @@ | |||
45 | #include <asm/smp.h> | 47 | #include <asm/smp.h> |
46 | #include <asm/sections.h> | 48 | #include <asm/sections.h> |
47 | #include <asm/cpudata.h> | 49 | #include <asm/cpudata.h> |
50 | #include <asm/uaccess.h> | ||
48 | 51 | ||
49 | DEFINE_SPINLOCK(mostek_lock); | 52 | DEFINE_SPINLOCK(mostek_lock); |
50 | DEFINE_SPINLOCK(rtc_lock); | 53 | DEFINE_SPINLOCK(rtc_lock); |
@@ -193,16 +196,22 @@ struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations; | |||
193 | 196 | ||
194 | static void stick_init_tick(unsigned long offset) | 197 | static void stick_init_tick(unsigned long offset) |
195 | { | 198 | { |
196 | tick_disable_protection(); | 199 | /* Writes to the %tick and %stick register are not |
197 | 200 | * allowed on sun4v. The Hypervisor controls that | |
198 | /* Let the user get at STICK too. */ | 201 | * bit, per-strand. |
199 | __asm__ __volatile__( | 202 | */ |
200 | " rd %%asr24, %%g2\n" | 203 | if (tlb_type != hypervisor) { |
201 | " andn %%g2, %0, %%g2\n" | 204 | tick_disable_protection(); |
202 | " wr %%g2, 0, %%asr24" | 205 | |
203 | : /* no outputs */ | 206 | /* Let the user get at STICK too. */ |
204 | : "r" (TICK_PRIV_BIT) | 207 | __asm__ __volatile__( |
205 | : "g1", "g2"); | 208 | " rd %%asr24, %%g2\n" |
209 | " andn %%g2, %0, %%g2\n" | ||
210 | " wr %%g2, 0, %%asr24" | ||
211 | : /* no outputs */ | ||
212 | : "r" (TICK_PRIV_BIT) | ||
213 | : "g1", "g2"); | ||
214 | } | ||
206 | 215 | ||
207 | __asm__ __volatile__( | 216 | __asm__ __volatile__( |
208 | " rd %%asr24, %%g1\n" | 217 | " rd %%asr24, %%g1\n" |
@@ -683,6 +692,83 @@ static void __init set_system_time(void) | |||
683 | } | 692 | } |
684 | } | 693 | } |
685 | 694 | ||
695 | /* davem suggests we keep this within the 4M locked kernel image */ | ||
696 | static u32 starfire_get_time(void) | ||
697 | { | ||
698 | static char obp_gettod[32]; | ||
699 | static u32 unix_tod; | ||
700 | |||
701 | sprintf(obp_gettod, "h# %08x unix-gettod", | ||
702 | (unsigned int) (long) &unix_tod); | ||
703 | prom_feval(obp_gettod); | ||
704 | |||
705 | return unix_tod; | ||
706 | } | ||
707 | |||
708 | static int starfire_set_time(u32 val) | ||
709 | { | ||
710 | /* Do nothing, time is set using the service processor | ||
711 | * console on this platform. | ||
712 | */ | ||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | static u32 hypervisor_get_time(void) | ||
717 | { | ||
718 | register unsigned long func asm("%o5"); | ||
719 | register unsigned long arg0 asm("%o0"); | ||
720 | register unsigned long arg1 asm("%o1"); | ||
721 | int retries = 10000; | ||
722 | |||
723 | retry: | ||
724 | func = HV_FAST_TOD_GET; | ||
725 | arg0 = 0; | ||
726 | arg1 = 0; | ||
727 | __asm__ __volatile__("ta %6" | ||
728 | : "=&r" (func), "=&r" (arg0), "=&r" (arg1) | ||
729 | : "0" (func), "1" (arg0), "2" (arg1), | ||
730 | "i" (HV_FAST_TRAP)); | ||
731 | if (arg0 == HV_EOK) | ||
732 | return arg1; | ||
733 | if (arg0 == HV_EWOULDBLOCK) { | ||
734 | if (--retries > 0) { | ||
735 | udelay(100); | ||
736 | goto retry; | ||
737 | } | ||
738 | printk(KERN_WARNING "SUN4V: tod_get() timed out.\n"); | ||
739 | return 0; | ||
740 | } | ||
741 | printk(KERN_WARNING "SUN4V: tod_get() not supported.\n"); | ||
742 | return 0; | ||
743 | } | ||
744 | |||
745 | static int hypervisor_set_time(u32 secs) | ||
746 | { | ||
747 | register unsigned long func asm("%o5"); | ||
748 | register unsigned long arg0 asm("%o0"); | ||
749 | int retries = 10000; | ||
750 | |||
751 | retry: | ||
752 | func = HV_FAST_TOD_SET; | ||
753 | arg0 = secs; | ||
754 | __asm__ __volatile__("ta %4" | ||
755 | : "=&r" (func), "=&r" (arg0) | ||
756 | : "0" (func), "1" (arg0), | ||
757 | "i" (HV_FAST_TRAP)); | ||
758 | if (arg0 == HV_EOK) | ||
759 | return 0; | ||
760 | if (arg0 == HV_EWOULDBLOCK) { | ||
761 | if (--retries > 0) { | ||
762 | udelay(100); | ||
763 | goto retry; | ||
764 | } | ||
765 | printk(KERN_WARNING "SUN4V: tod_set() timed out.\n"); | ||
766 | return -EAGAIN; | ||
767 | } | ||
768 | printk(KERN_WARNING "SUN4V: tod_set() not supported.\n"); | ||
769 | return -EOPNOTSUPP; | ||
770 | } | ||
771 | |||
686 | void __init clock_probe(void) | 772 | void __init clock_probe(void) |
687 | { | 773 | { |
688 | struct linux_prom_registers clk_reg[2]; | 774 | struct linux_prom_registers clk_reg[2]; |
@@ -702,14 +788,14 @@ void __init clock_probe(void) | |||
702 | 788 | ||
703 | 789 | ||
704 | if (this_is_starfire) { | 790 | if (this_is_starfire) { |
705 | /* davem suggests we keep this within the 4M locked kernel image */ | 791 | xtime.tv_sec = starfire_get_time(); |
706 | static char obp_gettod[256]; | 792 | xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); |
707 | static u32 unix_tod; | 793 | set_normalized_timespec(&wall_to_monotonic, |
708 | 794 | -xtime.tv_sec, -xtime.tv_nsec); | |
709 | sprintf(obp_gettod, "h# %08x unix-gettod", | 795 | return; |
710 | (unsigned int) (long) &unix_tod); | 796 | } |
711 | prom_feval(obp_gettod); | 797 | if (tlb_type == hypervisor) { |
712 | xtime.tv_sec = unix_tod; | 798 | xtime.tv_sec = hypervisor_get_time(); |
713 | xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); | 799 | xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); |
714 | set_normalized_timespec(&wall_to_monotonic, | 800 | set_normalized_timespec(&wall_to_monotonic, |
715 | -xtime.tv_sec, -xtime.tv_nsec); | 801 | -xtime.tv_sec, -xtime.tv_nsec); |
@@ -981,11 +1067,10 @@ static void sparc64_start_timers(irqreturn_t (*cfunc)(int, void *, struct pt_reg | |||
981 | } | 1067 | } |
982 | 1068 | ||
983 | struct freq_table { | 1069 | struct freq_table { |
984 | unsigned long udelay_val_ref; | ||
985 | unsigned long clock_tick_ref; | 1070 | unsigned long clock_tick_ref; |
986 | unsigned int ref_freq; | 1071 | unsigned int ref_freq; |
987 | }; | 1072 | }; |
988 | static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0, 0 }; | 1073 | static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0 }; |
989 | 1074 | ||
990 | unsigned long sparc64_get_clock_tick(unsigned int cpu) | 1075 | unsigned long sparc64_get_clock_tick(unsigned int cpu) |
991 | { | 1076 | { |
@@ -1007,16 +1092,11 @@ static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val | |||
1007 | 1092 | ||
1008 | if (!ft->ref_freq) { | 1093 | if (!ft->ref_freq) { |
1009 | ft->ref_freq = freq->old; | 1094 | ft->ref_freq = freq->old; |
1010 | ft->udelay_val_ref = cpu_data(cpu).udelay_val; | ||
1011 | ft->clock_tick_ref = cpu_data(cpu).clock_tick; | 1095 | ft->clock_tick_ref = cpu_data(cpu).clock_tick; |
1012 | } | 1096 | } |
1013 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | 1097 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || |
1014 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || | 1098 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || |
1015 | (val == CPUFREQ_RESUMECHANGE)) { | 1099 | (val == CPUFREQ_RESUMECHANGE)) { |
1016 | cpu_data(cpu).udelay_val = | ||
1017 | cpufreq_scale(ft->udelay_val_ref, | ||
1018 | ft->ref_freq, | ||
1019 | freq->new); | ||
1020 | cpu_data(cpu).clock_tick = | 1100 | cpu_data(cpu).clock_tick = |
1021 | cpufreq_scale(ft->clock_tick_ref, | 1101 | cpufreq_scale(ft->clock_tick_ref, |
1022 | ft->ref_freq, | 1102 | ft->ref_freq, |
@@ -1179,3 +1259,246 @@ static int set_rtc_mmss(unsigned long nowtime) | |||
1179 | return retval; | 1259 | return retval; |
1180 | } | 1260 | } |
1181 | } | 1261 | } |
1262 | |||
1263 | #define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */ | ||
1264 | static unsigned char mini_rtc_status; /* bitmapped status byte. */ | ||
1265 | |||
1266 | /* months start at 0 now */ | ||
1267 | static unsigned char days_in_mo[] = | ||
1268 | {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; | ||
1269 | |||
1270 | #define FEBRUARY 2 | ||
1271 | #define STARTOFTIME 1970 | ||
1272 | #define SECDAY 86400L | ||
1273 | #define SECYR (SECDAY * 365) | ||
1274 | #define leapyear(year) ((year) % 4 == 0 && \ | ||
1275 | ((year) % 100 != 0 || (year) % 400 == 0)) | ||
1276 | #define days_in_year(a) (leapyear(a) ? 366 : 365) | ||
1277 | #define days_in_month(a) (month_days[(a) - 1]) | ||
1278 | |||
1279 | static int month_days[12] = { | ||
1280 | 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 | ||
1281 | }; | ||
1282 | |||
1283 | /* | ||
1284 | * This only works for the Gregorian calendar - i.e. after 1752 (in the UK) | ||
1285 | */ | ||
1286 | static void GregorianDay(struct rtc_time * tm) | ||
1287 | { | ||
1288 | int leapsToDate; | ||
1289 | int lastYear; | ||
1290 | int day; | ||
1291 | int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; | ||
1292 | |||
1293 | lastYear = tm->tm_year - 1; | ||
1294 | |||
1295 | /* | ||
1296 | * Number of leap corrections to apply up to end of last year | ||
1297 | */ | ||
1298 | leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400; | ||
1299 | |||
1300 | /* | ||
1301 | * This year is a leap year if it is divisible by 4 except when it is | ||
1302 | * divisible by 100 unless it is divisible by 400 | ||
1303 | * | ||
1304 | * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was | ||
1305 | */ | ||
1306 | day = tm->tm_mon > 2 && leapyear(tm->tm_year); | ||
1307 | |||
1308 | day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] + | ||
1309 | tm->tm_mday; | ||
1310 | |||
1311 | tm->tm_wday = day % 7; | ||
1312 | } | ||
1313 | |||
1314 | static void to_tm(int tim, struct rtc_time *tm) | ||
1315 | { | ||
1316 | register int i; | ||
1317 | register long hms, day; | ||
1318 | |||
1319 | day = tim / SECDAY; | ||
1320 | hms = tim % SECDAY; | ||
1321 | |||
1322 | /* Hours, minutes, seconds are easy */ | ||
1323 | tm->tm_hour = hms / 3600; | ||
1324 | tm->tm_min = (hms % 3600) / 60; | ||
1325 | tm->tm_sec = (hms % 3600) % 60; | ||
1326 | |||
1327 | /* Number of years in days */ | ||
1328 | for (i = STARTOFTIME; day >= days_in_year(i); i++) | ||
1329 | day -= days_in_year(i); | ||
1330 | tm->tm_year = i; | ||
1331 | |||
1332 | /* Number of months in days left */ | ||
1333 | if (leapyear(tm->tm_year)) | ||
1334 | days_in_month(FEBRUARY) = 29; | ||
1335 | for (i = 1; day >= days_in_month(i); i++) | ||
1336 | day -= days_in_month(i); | ||
1337 | days_in_month(FEBRUARY) = 28; | ||
1338 | tm->tm_mon = i; | ||
1339 | |||
1340 | /* Days are what is left over (+1) from all that. */ | ||
1341 | tm->tm_mday = day + 1; | ||
1342 | |||
1343 | /* | ||
1344 | * Determine the day of week | ||
1345 | */ | ||
1346 | GregorianDay(tm); | ||
1347 | } | ||
1348 | |||
1349 | /* Both Starfire and SUN4V give us seconds since Jan 1st, 1970, | ||
1350 | * aka Unix time. So we have to convert to/from rtc_time. | ||
1351 | */ | ||
1352 | static inline void mini_get_rtc_time(struct rtc_time *time) | ||
1353 | { | ||
1354 | unsigned long flags; | ||
1355 | u32 seconds; | ||
1356 | |||
1357 | spin_lock_irqsave(&rtc_lock, flags); | ||
1358 | seconds = 0; | ||
1359 | if (this_is_starfire) | ||
1360 | seconds = starfire_get_time(); | ||
1361 | else if (tlb_type == hypervisor) | ||
1362 | seconds = hypervisor_get_time(); | ||
1363 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
1364 | |||
1365 | to_tm(seconds, time); | ||
1366 | time->tm_year -= 1900; | ||
1367 | time->tm_mon -= 1; | ||
1368 | } | ||
1369 | |||
1370 | static inline int mini_set_rtc_time(struct rtc_time *time) | ||
1371 | { | ||
1372 | u32 seconds = mktime(time->tm_year + 1900, time->tm_mon + 1, | ||
1373 | time->tm_mday, time->tm_hour, | ||
1374 | time->tm_min, time->tm_sec); | ||
1375 | unsigned long flags; | ||
1376 | int err; | ||
1377 | |||
1378 | spin_lock_irqsave(&rtc_lock, flags); | ||
1379 | err = -ENODEV; | ||
1380 | if (this_is_starfire) | ||
1381 | err = starfire_set_time(seconds); | ||
1382 | else if (tlb_type == hypervisor) | ||
1383 | err = hypervisor_set_time(seconds); | ||
1384 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
1385 | |||
1386 | return err; | ||
1387 | } | ||
1388 | |||
1389 | static int mini_rtc_ioctl(struct inode *inode, struct file *file, | ||
1390 | unsigned int cmd, unsigned long arg) | ||
1391 | { | ||
1392 | struct rtc_time wtime; | ||
1393 | void __user *argp = (void __user *)arg; | ||
1394 | |||
1395 | switch (cmd) { | ||
1396 | |||
1397 | case RTC_PLL_GET: | ||
1398 | return -EINVAL; | ||
1399 | |||
1400 | case RTC_PLL_SET: | ||
1401 | return -EINVAL; | ||
1402 | |||
1403 | case RTC_UIE_OFF: /* disable ints from RTC updates. */ | ||
1404 | return 0; | ||
1405 | |||
1406 | case RTC_UIE_ON: /* enable ints for RTC updates. */ | ||
1407 | return -EINVAL; | ||
1408 | |||
1409 | case RTC_RD_TIME: /* Read the time/date from RTC */ | ||
1410 | /* this doesn't get week-day, who cares */ | ||
1411 | memset(&wtime, 0, sizeof(wtime)); | ||
1412 | mini_get_rtc_time(&wtime); | ||
1413 | |||
1414 | return copy_to_user(argp, &wtime, sizeof(wtime)) ? -EFAULT : 0; | ||
1415 | |||
1416 | case RTC_SET_TIME: /* Set the RTC */ | ||
1417 | { | ||
1418 | int year; | ||
1419 | unsigned char leap_yr; | ||
1420 | |||
1421 | if (!capable(CAP_SYS_TIME)) | ||
1422 | return -EACCES; | ||
1423 | |||
1424 | if (copy_from_user(&wtime, argp, sizeof(wtime))) | ||
1425 | return -EFAULT; | ||
1426 | |||
1427 | year = wtime.tm_year + 1900; | ||
1428 | leap_yr = ((!(year % 4) && (year % 100)) || | ||
1429 | !(year % 400)); | ||
1430 | |||
1431 | if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) || (wtime.tm_mday < 1)) | ||
1432 | return -EINVAL; | ||
1433 | |||
1434 | if (wtime.tm_mday < 0 || wtime.tm_mday > | ||
1435 | (days_in_mo[wtime.tm_mon] + ((wtime.tm_mon == 1) && leap_yr))) | ||
1436 | return -EINVAL; | ||
1437 | |||
1438 | if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 || | ||
1439 | wtime.tm_min < 0 || wtime.tm_min >= 60 || | ||
1440 | wtime.tm_sec < 0 || wtime.tm_sec >= 60) | ||
1441 | return -EINVAL; | ||
1442 | |||
1443 | return mini_set_rtc_time(&wtime); | ||
1444 | } | ||
1445 | } | ||
1446 | |||
1447 | return -EINVAL; | ||
1448 | } | ||
1449 | |||
1450 | static int mini_rtc_open(struct inode *inode, struct file *file) | ||
1451 | { | ||
1452 | if (mini_rtc_status & RTC_IS_OPEN) | ||
1453 | return -EBUSY; | ||
1454 | |||
1455 | mini_rtc_status |= RTC_IS_OPEN; | ||
1456 | |||
1457 | return 0; | ||
1458 | } | ||
1459 | |||
1460 | static int mini_rtc_release(struct inode *inode, struct file *file) | ||
1461 | { | ||
1462 | mini_rtc_status &= ~RTC_IS_OPEN; | ||
1463 | return 0; | ||
1464 | } | ||
1465 | |||
1466 | |||
1467 | static struct file_operations mini_rtc_fops = { | ||
1468 | .owner = THIS_MODULE, | ||
1469 | .ioctl = mini_rtc_ioctl, | ||
1470 | .open = mini_rtc_open, | ||
1471 | .release = mini_rtc_release, | ||
1472 | }; | ||
1473 | |||
1474 | static struct miscdevice rtc_mini_dev = | ||
1475 | { | ||
1476 | .minor = RTC_MINOR, | ||
1477 | .name = "rtc", | ||
1478 | .fops = &mini_rtc_fops, | ||
1479 | }; | ||
1480 | |||
1481 | static int __init rtc_mini_init(void) | ||
1482 | { | ||
1483 | int retval; | ||
1484 | |||
1485 | if (tlb_type != hypervisor && !this_is_starfire) | ||
1486 | return -ENODEV; | ||
1487 | |||
1488 | printk(KERN_INFO "Mini RTC Driver\n"); | ||
1489 | |||
1490 | retval = misc_register(&rtc_mini_dev); | ||
1491 | if (retval < 0) | ||
1492 | return retval; | ||
1493 | |||
1494 | return 0; | ||
1495 | } | ||
1496 | |||
1497 | static void __exit rtc_mini_exit(void) | ||
1498 | { | ||
1499 | misc_deregister(&rtc_mini_dev); | ||
1500 | } | ||
1501 | |||
1502 | |||
1503 | module_init(rtc_mini_init); | ||
1504 | module_exit(rtc_mini_exit); | ||
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index 9478551cb020..a4dc01a3d238 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <asm/processor.h> | 16 | #include <asm/processor.h> |
17 | #include <asm/thread_info.h> | 17 | #include <asm/thread_info.h> |
18 | #include <asm/mmu.h> | 18 | #include <asm/mmu.h> |
19 | #include <asm/hypervisor.h> | ||
20 | #include <asm/cpudata.h> | ||
19 | 21 | ||
20 | .data | 22 | .data |
21 | .align 8 | 23 | .align 8 |
@@ -28,14 +30,19 @@ itlb_load: | |||
28 | dtlb_load: | 30 | dtlb_load: |
29 | .asciz "SUNW,dtlb-load" | 31 | .asciz "SUNW,dtlb-load" |
30 | 32 | ||
33 | /* XXX __cpuinit this thing XXX */ | ||
34 | #define TRAMP_STACK_SIZE 1024 | ||
35 | .align 16 | ||
36 | tramp_stack: | ||
37 | .skip TRAMP_STACK_SIZE | ||
38 | |||
31 | .text | 39 | .text |
32 | .align 8 | 40 | .align 8 |
33 | .globl sparc64_cpu_startup, sparc64_cpu_startup_end | 41 | .globl sparc64_cpu_startup, sparc64_cpu_startup_end |
34 | sparc64_cpu_startup: | 42 | sparc64_cpu_startup: |
35 | flushw | 43 | BRANCH_IF_SUN4V(g1, niagara_startup) |
36 | 44 | BRANCH_IF_CHEETAH_BASE(g1, g5, cheetah_startup) | |
37 | BRANCH_IF_CHEETAH_BASE(g1,g5,cheetah_startup) | 45 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1, g5, cheetah_plus_startup) |
38 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g5,cheetah_plus_startup) | ||
39 | 46 | ||
40 | ba,pt %xcc, spitfire_startup | 47 | ba,pt %xcc, spitfire_startup |
41 | nop | 48 | nop |
@@ -55,6 +62,7 @@ cheetah_startup: | |||
55 | or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5 | 62 | or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5 |
56 | stxa %g5, [%g0] ASI_DCU_CONTROL_REG | 63 | stxa %g5, [%g0] ASI_DCU_CONTROL_REG |
57 | membar #Sync | 64 | membar #Sync |
65 | /* fallthru */ | ||
58 | 66 | ||
59 | cheetah_generic_startup: | 67 | cheetah_generic_startup: |
60 | mov TSB_EXTENSION_P, %g3 | 68 | mov TSB_EXTENSION_P, %g3 |
@@ -70,7 +78,9 @@ cheetah_generic_startup: | |||
70 | stxa %g0, [%g3] ASI_DMMU | 78 | stxa %g0, [%g3] ASI_DMMU |
71 | stxa %g0, [%g3] ASI_IMMU | 79 | stxa %g0, [%g3] ASI_IMMU |
72 | membar #Sync | 80 | membar #Sync |
81 | /* fallthru */ | ||
73 | 82 | ||
83 | niagara_startup: | ||
74 | /* Disable STICK_INT interrupts. */ | 84 | /* Disable STICK_INT interrupts. */ |
75 | sethi %hi(0x80000000), %g5 | 85 | sethi %hi(0x80000000), %g5 |
76 | sllx %g5, 32, %g5 | 86 | sllx %g5, 32, %g5 |
@@ -85,17 +95,17 @@ spitfire_startup: | |||
85 | membar #Sync | 95 | membar #Sync |
86 | 96 | ||
87 | startup_continue: | 97 | startup_continue: |
88 | wrpr %g0, 15, %pil | ||
89 | |||
90 | sethi %hi(0x80000000), %g2 | 98 | sethi %hi(0x80000000), %g2 |
91 | sllx %g2, 32, %g2 | 99 | sllx %g2, 32, %g2 |
92 | wr %g2, 0, %tick_cmpr | 100 | wr %g2, 0, %tick_cmpr |
93 | 101 | ||
102 | mov %o0, %l0 | ||
103 | |||
104 | BRANCH_IF_SUN4V(g1, niagara_lock_tlb) | ||
105 | |||
94 | /* Call OBP by hand to lock KERNBASE into i/d tlbs. | 106 | /* Call OBP by hand to lock KERNBASE into i/d tlbs. |
95 | * We lock 2 consequetive entries if we are 'bigkernel'. | 107 | * We lock 2 consequetive entries if we are 'bigkernel'. |
96 | */ | 108 | */ |
97 | mov %o0, %l0 | ||
98 | |||
99 | sethi %hi(prom_entry_lock), %g2 | 109 | sethi %hi(prom_entry_lock), %g2 |
100 | 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 | 110 | 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 |
101 | membar #StoreLoad | #StoreStore | 111 | membar #StoreLoad | #StoreStore |
@@ -105,7 +115,6 @@ startup_continue: | |||
105 | sethi %hi(p1275buf), %g2 | 115 | sethi %hi(p1275buf), %g2 |
106 | or %g2, %lo(p1275buf), %g2 | 116 | or %g2, %lo(p1275buf), %g2 |
107 | ldx [%g2 + 0x10], %l2 | 117 | ldx [%g2 + 0x10], %l2 |
108 | mov %sp, %l1 | ||
109 | add %l2, -(192 + 128), %sp | 118 | add %l2, -(192 + 128), %sp |
110 | flushw | 119 | flushw |
111 | 120 | ||
@@ -142,8 +151,7 @@ startup_continue: | |||
142 | 151 | ||
143 | sethi %hi(bigkernel), %g2 | 152 | sethi %hi(bigkernel), %g2 |
144 | lduw [%g2 + %lo(bigkernel)], %g2 | 153 | lduw [%g2 + %lo(bigkernel)], %g2 |
145 | cmp %g2, 0 | 154 | brz,pt %g2, do_dtlb |
146 | be,pt %icc, do_dtlb | ||
147 | nop | 155 | nop |
148 | 156 | ||
149 | sethi %hi(call_method), %g2 | 157 | sethi %hi(call_method), %g2 |
@@ -214,8 +222,7 @@ do_dtlb: | |||
214 | 222 | ||
215 | sethi %hi(bigkernel), %g2 | 223 | sethi %hi(bigkernel), %g2 |
216 | lduw [%g2 + %lo(bigkernel)], %g2 | 224 | lduw [%g2 + %lo(bigkernel)], %g2 |
217 | cmp %g2, 0 | 225 | brz,pt %g2, do_unlock |
218 | be,pt %icc, do_unlock | ||
219 | nop | 226 | nop |
220 | 227 | ||
221 | sethi %hi(call_method), %g2 | 228 | sethi %hi(call_method), %g2 |
@@ -257,99 +264,180 @@ do_unlock: | |||
257 | stb %g0, [%g2 + %lo(prom_entry_lock)] | 264 | stb %g0, [%g2 + %lo(prom_entry_lock)] |
258 | membar #StoreStore | #StoreLoad | 265 | membar #StoreStore | #StoreLoad |
259 | 266 | ||
260 | mov %l1, %sp | 267 | ba,pt %xcc, after_lock_tlb |
261 | flushw | 268 | nop |
269 | |||
270 | niagara_lock_tlb: | ||
271 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 | ||
272 | sethi %hi(KERNBASE), %o0 | ||
273 | clr %o1 | ||
274 | sethi %hi(kern_locked_tte_data), %o2 | ||
275 | ldx [%o2 + %lo(kern_locked_tte_data)], %o2 | ||
276 | mov HV_MMU_IMMU, %o3 | ||
277 | ta HV_FAST_TRAP | ||
278 | |||
279 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 | ||
280 | sethi %hi(KERNBASE), %o0 | ||
281 | clr %o1 | ||
282 | sethi %hi(kern_locked_tte_data), %o2 | ||
283 | ldx [%o2 + %lo(kern_locked_tte_data)], %o2 | ||
284 | mov HV_MMU_DMMU, %o3 | ||
285 | ta HV_FAST_TRAP | ||
262 | 286 | ||
263 | mov %l0, %o0 | 287 | sethi %hi(bigkernel), %g2 |
288 | lduw [%g2 + %lo(bigkernel)], %g2 | ||
289 | brz,pt %g2, after_lock_tlb | ||
290 | nop | ||
264 | 291 | ||
292 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 | ||
293 | sethi %hi(KERNBASE + 0x400000), %o0 | ||
294 | clr %o1 | ||
295 | sethi %hi(kern_locked_tte_data), %o2 | ||
296 | ldx [%o2 + %lo(kern_locked_tte_data)], %o2 | ||
297 | sethi %hi(0x400000), %o3 | ||
298 | add %o2, %o3, %o2 | ||
299 | mov HV_MMU_IMMU, %o3 | ||
300 | ta HV_FAST_TRAP | ||
301 | |||
302 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 | ||
303 | sethi %hi(KERNBASE + 0x400000), %o0 | ||
304 | clr %o1 | ||
305 | sethi %hi(kern_locked_tte_data), %o2 | ||
306 | ldx [%o2 + %lo(kern_locked_tte_data)], %o2 | ||
307 | sethi %hi(0x400000), %o3 | ||
308 | add %o2, %o3, %o2 | ||
309 | mov HV_MMU_DMMU, %o3 | ||
310 | ta HV_FAST_TRAP | ||
311 | |||
312 | after_lock_tlb: | ||
265 | wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate | 313 | wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate |
266 | wr %g0, 0, %fprs | 314 | wr %g0, 0, %fprs |
267 | 315 | ||
268 | /* XXX Buggy PROM... */ | ||
269 | srl %o0, 0, %o0 | ||
270 | ldx [%o0], %g6 | ||
271 | |||
272 | wr %g0, ASI_P, %asi | 316 | wr %g0, ASI_P, %asi |
273 | 317 | ||
274 | mov PRIMARY_CONTEXT, %g7 | 318 | mov PRIMARY_CONTEXT, %g7 |
275 | stxa %g0, [%g7] ASI_DMMU | 319 | |
320 | 661: stxa %g0, [%g7] ASI_DMMU | ||
321 | .section .sun4v_1insn_patch, "ax" | ||
322 | .word 661b | ||
323 | stxa %g0, [%g7] ASI_MMU | ||
324 | .previous | ||
325 | |||
276 | membar #Sync | 326 | membar #Sync |
277 | mov SECONDARY_CONTEXT, %g7 | 327 | mov SECONDARY_CONTEXT, %g7 |
278 | stxa %g0, [%g7] ASI_DMMU | 328 | |
329 | 661: stxa %g0, [%g7] ASI_DMMU | ||
330 | .section .sun4v_1insn_patch, "ax" | ||
331 | .word 661b | ||
332 | stxa %g0, [%g7] ASI_MMU | ||
333 | .previous | ||
334 | |||
279 | membar #Sync | 335 | membar #Sync |
280 | 336 | ||
281 | mov 1, %g5 | 337 | /* Everything we do here, until we properly take over the |
282 | sllx %g5, THREAD_SHIFT, %g5 | 338 | * trap table, must be done with extreme care. We cannot |
283 | sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 | 339 | * make any references to %g6 (current thread pointer), |
284 | add %g6, %g5, %sp | 340 | * %g4 (current task pointer), or %g5 (base of current cpu's |
341 | * per-cpu area) until we properly take over the trap table | ||
342 | * from the firmware and hypervisor. | ||
343 | * | ||
344 | * Get onto temporary stack which is in the locked kernel image. | ||
345 | */ | ||
346 | sethi %hi(tramp_stack), %g1 | ||
347 | or %g1, %lo(tramp_stack), %g1 | ||
348 | add %g1, TRAMP_STACK_SIZE, %g1 | ||
349 | sub %g1, STACKFRAME_SZ + STACK_BIAS, %sp | ||
285 | mov 0, %fp | 350 | mov 0, %fp |
286 | 351 | ||
287 | wrpr %g0, 0, %wstate | 352 | /* Put garbage in these registers to trap any access to them. */ |
288 | wrpr %g0, 0, %tl | 353 | set 0xdeadbeef, %g4 |
354 | set 0xdeadbeef, %g5 | ||
355 | set 0xdeadbeef, %g6 | ||
289 | 356 | ||
290 | /* Setup the trap globals, then we can resurface. */ | 357 | call init_irqwork_curcpu |
291 | rdpr %pstate, %o1 | 358 | nop |
292 | mov %g6, %o2 | ||
293 | wrpr %o1, PSTATE_AG, %pstate | ||
294 | sethi %hi(sparc64_ttable_tl0), %g5 | ||
295 | wrpr %g5, %tba | ||
296 | mov %o2, %g6 | ||
297 | |||
298 | wrpr %o1, PSTATE_MG, %pstate | ||
299 | #define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000) | ||
300 | #define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) | ||
301 | |||
302 | mov TSB_REG, %g1 | ||
303 | stxa %g0, [%g1] ASI_DMMU | ||
304 | membar #Sync | ||
305 | mov TLB_SFSR, %g1 | ||
306 | sethi %uhi(KERN_HIGHBITS), %g2 | ||
307 | or %g2, %ulo(KERN_HIGHBITS), %g2 | ||
308 | sllx %g2, 32, %g2 | ||
309 | or %g2, KERN_LOWBITS, %g2 | ||
310 | 359 | ||
311 | BRANCH_IF_ANY_CHEETAH(g3,g7,9f) | 360 | sethi %hi(tlb_type), %g3 |
361 | lduw [%g3 + %lo(tlb_type)], %g2 | ||
362 | cmp %g2, 3 | ||
363 | bne,pt %icc, 1f | ||
364 | nop | ||
312 | 365 | ||
313 | ba,pt %xcc, 1f | 366 | call hard_smp_processor_id |
314 | nop | 367 | nop |
368 | |||
369 | mov %o0, %o1 | ||
370 | mov 0, %o0 | ||
371 | mov 0, %o2 | ||
372 | call sun4v_init_mondo_queues | ||
373 | mov 1, %o3 | ||
315 | 374 | ||
316 | 9: | 375 | 1: call init_cur_cpu_trap |
317 | sethi %uhi(VPTE_BASE_CHEETAH), %g3 | 376 | ldx [%l0], %o0 |
318 | or %g3, %ulo(VPTE_BASE_CHEETAH), %g3 | 377 | |
319 | ba,pt %xcc, 2f | 378 | /* Start using proper page size encodings in ctx register. */ |
320 | sllx %g3, 32, %g3 | 379 | sethi %hi(sparc64_kern_pri_context), %g3 |
321 | 1: | 380 | ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 |
322 | sethi %uhi(VPTE_BASE_SPITFIRE), %g3 | 381 | mov PRIMARY_CONTEXT, %g1 |
323 | or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3 | ||
324 | sllx %g3, 32, %g3 | ||
325 | 382 | ||
326 | 2: | 383 | 661: stxa %g2, [%g1] ASI_DMMU |
327 | clr %g7 | 384 | .section .sun4v_1insn_patch, "ax" |
328 | #undef KERN_HIGHBITS | 385 | .word 661b |
329 | #undef KERN_LOWBITS | 386 | stxa %g2, [%g1] ASI_MMU |
387 | .previous | ||
330 | 388 | ||
331 | wrpr %o1, 0x0, %pstate | 389 | membar #Sync |
332 | ldx [%g6 + TI_TASK], %g4 | ||
333 | 390 | ||
334 | wrpr %g0, 0, %wstate | 391 | wrpr %g0, 0, %wstate |
335 | 392 | ||
336 | call init_irqwork_curcpu | 393 | /* As a hack, put &init_thread_union into %g6. |
394 | * prom_world() loads from here to restore the %asi | ||
395 | * register. | ||
396 | */ | ||
397 | sethi %hi(init_thread_union), %g6 | ||
398 | or %g6, %lo(init_thread_union), %g6 | ||
399 | |||
400 | sethi %hi(is_sun4v), %o0 | ||
401 | lduw [%o0 + %lo(is_sun4v)], %o0 | ||
402 | brz,pt %o0, 1f | ||
337 | nop | 403 | nop |
338 | 404 | ||
339 | /* Start using proper page size encodings in ctx register. */ | 405 | TRAP_LOAD_TRAP_BLOCK(%g2, %g3) |
340 | sethi %hi(sparc64_kern_pri_context), %g3 | 406 | add %g2, TRAP_PER_CPU_FAULT_INFO, %g2 |
341 | ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 | 407 | stxa %g2, [%g0] ASI_SCRATCHPAD |
342 | mov PRIMARY_CONTEXT, %g1 | 408 | |
343 | stxa %g2, [%g1] ASI_DMMU | 409 | /* Compute physical address: |
344 | membar #Sync | 410 | * |
411 | * paddr = kern_base + (mmfsa_vaddr - KERNBASE) | ||
412 | */ | ||
413 | sethi %hi(KERNBASE), %g3 | ||
414 | sub %g2, %g3, %g2 | ||
415 | sethi %hi(kern_base), %g3 | ||
416 | ldx [%g3 + %lo(kern_base)], %g3 | ||
417 | add %g2, %g3, %o1 | ||
418 | |||
419 | call prom_set_trap_table_sun4v | ||
420 | sethi %hi(sparc64_ttable_tl0), %o0 | ||
421 | |||
422 | ba,pt %xcc, 2f | ||
423 | nop | ||
424 | |||
425 | 1: call prom_set_trap_table | ||
426 | sethi %hi(sparc64_ttable_tl0), %o0 | ||
427 | |||
428 | 2: ldx [%l0], %g6 | ||
429 | ldx [%g6 + TI_TASK], %g4 | ||
430 | |||
431 | mov 1, %g5 | ||
432 | sllx %g5, THREAD_SHIFT, %g5 | ||
433 | sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 | ||
434 | add %g6, %g5, %sp | ||
435 | mov 0, %fp | ||
345 | 436 | ||
346 | rdpr %pstate, %o1 | 437 | rdpr %pstate, %o1 |
347 | or %o1, PSTATE_IE, %o1 | 438 | or %o1, PSTATE_IE, %o1 |
348 | wrpr %o1, 0, %pstate | 439 | wrpr %o1, 0, %pstate |
349 | 440 | ||
350 | call prom_set_trap_table | ||
351 | sethi %hi(sparc64_ttable_tl0), %o0 | ||
352 | |||
353 | call smp_callin | 441 | call smp_callin |
354 | nop | 442 | nop |
355 | call cpu_idle | 443 | call cpu_idle |
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 8d44ae5a15e3..7f7dba0ca96a 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
39 | #include <asm/timer.h> | 39 | #include <asm/timer.h> |
40 | #include <asm/kdebug.h> | 40 | #include <asm/kdebug.h> |
41 | #include <asm/head.h> | ||
41 | #ifdef CONFIG_KMOD | 42 | #ifdef CONFIG_KMOD |
42 | #include <linux/kmod.h> | 43 | #include <linux/kmod.h> |
43 | #endif | 44 | #endif |
@@ -72,12 +73,14 @@ struct tl1_traplog { | |||
72 | 73 | ||
73 | static void dump_tl1_traplog(struct tl1_traplog *p) | 74 | static void dump_tl1_traplog(struct tl1_traplog *p) |
74 | { | 75 | { |
75 | int i; | 76 | int i, limit; |
77 | |||
78 | printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, " | ||
79 | "dumping track stack.\n", p->tl); | ||
76 | 80 | ||
77 | printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n", | 81 | limit = (tlb_type == hypervisor) ? 2 : 4; |
78 | p->tl); | 82 | for (i = 0; i < limit; i++) { |
79 | for (i = 0; i < 4; i++) { | 83 | printk(KERN_EMERG |
80 | printk(KERN_CRIT | ||
81 | "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] " | 84 | "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] " |
82 | "TNPC[%016lx] TT[%lx]\n", | 85 | "TNPC[%016lx] TT[%lx]\n", |
83 | i + 1, | 86 | i + 1, |
@@ -179,6 +182,45 @@ void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr | |||
179 | spitfire_insn_access_exception(regs, sfsr, sfar); | 182 | spitfire_insn_access_exception(regs, sfsr, sfar); |
180 | } | 183 | } |
181 | 184 | ||
185 | void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) | ||
186 | { | ||
187 | unsigned short type = (type_ctx >> 16); | ||
188 | unsigned short ctx = (type_ctx & 0xffff); | ||
189 | siginfo_t info; | ||
190 | |||
191 | if (notify_die(DIE_TRAP, "instruction access exception", regs, | ||
192 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) | ||
193 | return; | ||
194 | |||
195 | if (regs->tstate & TSTATE_PRIV) { | ||
196 | printk("sun4v_insn_access_exception: ADDR[%016lx] " | ||
197 | "CTX[%04x] TYPE[%04x], going.\n", | ||
198 | addr, ctx, type); | ||
199 | die_if_kernel("Iax", regs); | ||
200 | } | ||
201 | |||
202 | if (test_thread_flag(TIF_32BIT)) { | ||
203 | regs->tpc &= 0xffffffff; | ||
204 | regs->tnpc &= 0xffffffff; | ||
205 | } | ||
206 | info.si_signo = SIGSEGV; | ||
207 | info.si_errno = 0; | ||
208 | info.si_code = SEGV_MAPERR; | ||
209 | info.si_addr = (void __user *) addr; | ||
210 | info.si_trapno = 0; | ||
211 | force_sig_info(SIGSEGV, &info, current); | ||
212 | } | ||
213 | |||
214 | void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) | ||
215 | { | ||
216 | if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs, | ||
217 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) | ||
218 | return; | ||
219 | |||
220 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
221 | sun4v_insn_access_exception(regs, addr, type_ctx); | ||
222 | } | ||
223 | |||
182 | void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) | 224 | void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) |
183 | { | 225 | { |
184 | siginfo_t info; | 226 | siginfo_t info; |
@@ -227,6 +269,45 @@ void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr | |||
227 | spitfire_data_access_exception(regs, sfsr, sfar); | 269 | spitfire_data_access_exception(regs, sfsr, sfar); |
228 | } | 270 | } |
229 | 271 | ||
272 | void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) | ||
273 | { | ||
274 | unsigned short type = (type_ctx >> 16); | ||
275 | unsigned short ctx = (type_ctx & 0xffff); | ||
276 | siginfo_t info; | ||
277 | |||
278 | if (notify_die(DIE_TRAP, "data access exception", regs, | ||
279 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) | ||
280 | return; | ||
281 | |||
282 | if (regs->tstate & TSTATE_PRIV) { | ||
283 | printk("sun4v_data_access_exception: ADDR[%016lx] " | ||
284 | "CTX[%04x] TYPE[%04x], going.\n", | ||
285 | addr, ctx, type); | ||
286 | die_if_kernel("Dax", regs); | ||
287 | } | ||
288 | |||
289 | if (test_thread_flag(TIF_32BIT)) { | ||
290 | regs->tpc &= 0xffffffff; | ||
291 | regs->tnpc &= 0xffffffff; | ||
292 | } | ||
293 | info.si_signo = SIGSEGV; | ||
294 | info.si_errno = 0; | ||
295 | info.si_code = SEGV_MAPERR; | ||
296 | info.si_addr = (void __user *) addr; | ||
297 | info.si_trapno = 0; | ||
298 | force_sig_info(SIGSEGV, &info, current); | ||
299 | } | ||
300 | |||
301 | void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) | ||
302 | { | ||
303 | if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs, | ||
304 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) | ||
305 | return; | ||
306 | |||
307 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
308 | sun4v_data_access_exception(regs, addr, type_ctx); | ||
309 | } | ||
310 | |||
230 | #ifdef CONFIG_PCI | 311 | #ifdef CONFIG_PCI |
231 | /* This is really pathetic... */ | 312 | /* This is really pathetic... */ |
232 | extern volatile int pci_poke_in_progress; | 313 | extern volatile int pci_poke_in_progress; |
@@ -788,7 +869,8 @@ void __init cheetah_ecache_flush_init(void) | |||
788 | cheetah_error_log[i].afsr = CHAFSR_INVALID; | 869 | cheetah_error_log[i].afsr = CHAFSR_INVALID; |
789 | 870 | ||
790 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | 871 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); |
791 | if ((ver >> 32) == 0x003e0016) { | 872 | if ((ver >> 32) == __JALAPENO_ID || |
873 | (ver >> 32) == __SERRANO_ID) { | ||
792 | cheetah_error_table = &__jalapeno_error_table[0]; | 874 | cheetah_error_table = &__jalapeno_error_table[0]; |
793 | cheetah_afsr_errors = JPAFSR_ERRORS; | 875 | cheetah_afsr_errors = JPAFSR_ERRORS; |
794 | } else if ((ver >> 32) == 0x003e0015) { | 876 | } else if ((ver >> 32) == 0x003e0015) { |
@@ -1666,6 +1748,238 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs) | |||
1666 | regs->tpc); | 1748 | regs->tpc); |
1667 | } | 1749 | } |
1668 | 1750 | ||
1751 | struct sun4v_error_entry { | ||
1752 | u64 err_handle; | ||
1753 | u64 err_stick; | ||
1754 | |||
1755 | u32 err_type; | ||
1756 | #define SUN4V_ERR_TYPE_UNDEFINED 0 | ||
1757 | #define SUN4V_ERR_TYPE_UNCORRECTED_RES 1 | ||
1758 | #define SUN4V_ERR_TYPE_PRECISE_NONRES 2 | ||
1759 | #define SUN4V_ERR_TYPE_DEFERRED_NONRES 3 | ||
1760 | #define SUN4V_ERR_TYPE_WARNING_RES 4 | ||
1761 | |||
1762 | u32 err_attrs; | ||
1763 | #define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001 | ||
1764 | #define SUN4V_ERR_ATTRS_MEMORY 0x00000002 | ||
1765 | #define SUN4V_ERR_ATTRS_PIO 0x00000004 | ||
1766 | #define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008 | ||
1767 | #define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010 | ||
1768 | #define SUN4V_ERR_ATTRS_USER_MODE 0x01000000 | ||
1769 | #define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000 | ||
1770 | #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000 | ||
1771 | |||
1772 | u64 err_raddr; | ||
1773 | u32 err_size; | ||
1774 | u16 err_cpu; | ||
1775 | u16 err_pad; | ||
1776 | }; | ||
1777 | |||
1778 | static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0); | ||
1779 | static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0); | ||
1780 | |||
1781 | static const char *sun4v_err_type_to_str(u32 type) | ||
1782 | { | ||
1783 | switch (type) { | ||
1784 | case SUN4V_ERR_TYPE_UNDEFINED: | ||
1785 | return "undefined"; | ||
1786 | case SUN4V_ERR_TYPE_UNCORRECTED_RES: | ||
1787 | return "uncorrected resumable"; | ||
1788 | case SUN4V_ERR_TYPE_PRECISE_NONRES: | ||
1789 | return "precise nonresumable"; | ||
1790 | case SUN4V_ERR_TYPE_DEFERRED_NONRES: | ||
1791 | return "deferred nonresumable"; | ||
1792 | case SUN4V_ERR_TYPE_WARNING_RES: | ||
1793 | return "warning resumable"; | ||
1794 | default: | ||
1795 | return "unknown"; | ||
1796 | }; | ||
1797 | } | ||
1798 | |||
1799 | static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt) | ||
1800 | { | ||
1801 | int cnt; | ||
1802 | |||
1803 | printk("%s: Reporting on cpu %d\n", pfx, cpu); | ||
1804 | printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n", | ||
1805 | pfx, | ||
1806 | ent->err_handle, ent->err_stick, | ||
1807 | ent->err_type, | ||
1808 | sun4v_err_type_to_str(ent->err_type)); | ||
1809 | printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n", | ||
1810 | pfx, | ||
1811 | ent->err_attrs, | ||
1812 | ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ? | ||
1813 | "processor" : ""), | ||
1814 | ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ? | ||
1815 | "memory" : ""), | ||
1816 | ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ? | ||
1817 | "pio" : ""), | ||
1818 | ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ? | ||
1819 | "integer-regs" : ""), | ||
1820 | ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ? | ||
1821 | "fpu-regs" : ""), | ||
1822 | ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ? | ||
1823 | "user" : ""), | ||
1824 | ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ? | ||
1825 | "privileged" : ""), | ||
1826 | ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ? | ||
1827 | "queue-full" : "")); | ||
1828 | printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n", | ||
1829 | pfx, | ||
1830 | ent->err_raddr, ent->err_size, ent->err_cpu); | ||
1831 | |||
1832 | if ((cnt = atomic_read(ocnt)) != 0) { | ||
1833 | atomic_set(ocnt, 0); | ||
1834 | wmb(); | ||
1835 | printk("%s: Queue overflowed %d times.\n", | ||
1836 | pfx, cnt); | ||
1837 | } | ||
1838 | } | ||
1839 | |||
1840 | /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate. | ||
1841 | * Log the event and clear the first word of the entry. | ||
1842 | */ | ||
1843 | void sun4v_resum_error(struct pt_regs *regs, unsigned long offset) | ||
1844 | { | ||
1845 | struct sun4v_error_entry *ent, local_copy; | ||
1846 | struct trap_per_cpu *tb; | ||
1847 | unsigned long paddr; | ||
1848 | int cpu; | ||
1849 | |||
1850 | cpu = get_cpu(); | ||
1851 | |||
1852 | tb = &trap_block[cpu]; | ||
1853 | paddr = tb->resum_kernel_buf_pa + offset; | ||
1854 | ent = __va(paddr); | ||
1855 | |||
1856 | memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry)); | ||
1857 | |||
1858 | /* We have a local copy now, so release the entry. */ | ||
1859 | ent->err_handle = 0; | ||
1860 | wmb(); | ||
1861 | |||
1862 | put_cpu(); | ||
1863 | |||
1864 | sun4v_log_error(&local_copy, cpu, | ||
1865 | KERN_ERR "RESUMABLE ERROR", | ||
1866 | &sun4v_resum_oflow_cnt); | ||
1867 | } | ||
1868 | |||
1869 | /* If we try to printk() we'll probably make matters worse, by trying | ||
1870 | * to retake locks this cpu already holds or causing more errors. So | ||
1871 | * just bump a counter, and we'll report these counter bumps above. | ||
1872 | */ | ||
1873 | void sun4v_resum_overflow(struct pt_regs *regs) | ||
1874 | { | ||
1875 | atomic_inc(&sun4v_resum_oflow_cnt); | ||
1876 | } | ||
1877 | |||
1878 | /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate. | ||
1879 | * Log the event, clear the first word of the entry, and die. | ||
1880 | */ | ||
1881 | void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset) | ||
1882 | { | ||
1883 | struct sun4v_error_entry *ent, local_copy; | ||
1884 | struct trap_per_cpu *tb; | ||
1885 | unsigned long paddr; | ||
1886 | int cpu; | ||
1887 | |||
1888 | cpu = get_cpu(); | ||
1889 | |||
1890 | tb = &trap_block[cpu]; | ||
1891 | paddr = tb->nonresum_kernel_buf_pa + offset; | ||
1892 | ent = __va(paddr); | ||
1893 | |||
1894 | memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry)); | ||
1895 | |||
1896 | /* We have a local copy now, so release the entry. */ | ||
1897 | ent->err_handle = 0; | ||
1898 | wmb(); | ||
1899 | |||
1900 | put_cpu(); | ||
1901 | |||
1902 | #ifdef CONFIG_PCI | ||
1903 | /* Check for the special PCI poke sequence. */ | ||
1904 | if (pci_poke_in_progress && pci_poke_cpu == cpu) { | ||
1905 | pci_poke_faulted = 1; | ||
1906 | regs->tpc += 4; | ||
1907 | regs->tnpc = regs->tpc + 4; | ||
1908 | return; | ||
1909 | } | ||
1910 | #endif | ||
1911 | |||
1912 | sun4v_log_error(&local_copy, cpu, | ||
1913 | KERN_EMERG "NON-RESUMABLE ERROR", | ||
1914 | &sun4v_nonresum_oflow_cnt); | ||
1915 | |||
1916 | panic("Non-resumable error."); | ||
1917 | } | ||
1918 | |||
1919 | /* If we try to printk() we'll probably make matters worse, by trying | ||
1920 | * to retake locks this cpu already holds or causing more errors. So | ||
1921 | * just bump a counter, and we'll report these counter bumps above. | ||
1922 | */ | ||
1923 | void sun4v_nonresum_overflow(struct pt_regs *regs) | ||
1924 | { | ||
1925 | /* XXX Actually even this can make not that much sense. Perhaps | ||
1926 | * XXX we should just pull the plug and panic directly from here? | ||
1927 | */ | ||
1928 | atomic_inc(&sun4v_nonresum_oflow_cnt); | ||
1929 | } | ||
1930 | |||
1931 | unsigned long sun4v_err_itlb_vaddr; | ||
1932 | unsigned long sun4v_err_itlb_ctx; | ||
1933 | unsigned long sun4v_err_itlb_pte; | ||
1934 | unsigned long sun4v_err_itlb_error; | ||
1935 | |||
1936 | void sun4v_itlb_error_report(struct pt_regs *regs, int tl) | ||
1937 | { | ||
1938 | if (tl > 1) | ||
1939 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
1940 | |||
1941 | printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n", | ||
1942 | regs->tpc, tl); | ||
1943 | printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] " | ||
1944 | "pte[%lx] error[%lx]\n", | ||
1945 | sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx, | ||
1946 | sun4v_err_itlb_pte, sun4v_err_itlb_error); | ||
1947 | |||
1948 | prom_halt(); | ||
1949 | } | ||
1950 | |||
1951 | unsigned long sun4v_err_dtlb_vaddr; | ||
1952 | unsigned long sun4v_err_dtlb_ctx; | ||
1953 | unsigned long sun4v_err_dtlb_pte; | ||
1954 | unsigned long sun4v_err_dtlb_error; | ||
1955 | |||
1956 | void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) | ||
1957 | { | ||
1958 | if (tl > 1) | ||
1959 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
1960 | |||
1961 | printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n", | ||
1962 | regs->tpc, tl); | ||
1963 | printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] " | ||
1964 | "pte[%lx] error[%lx]\n", | ||
1965 | sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx, | ||
1966 | sun4v_err_dtlb_pte, sun4v_err_dtlb_error); | ||
1967 | |||
1968 | prom_halt(); | ||
1969 | } | ||
1970 | |||
1971 | void hypervisor_tlbop_error(unsigned long err, unsigned long op) | ||
1972 | { | ||
1973 | printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n", | ||
1974 | err, op); | ||
1975 | } | ||
1976 | |||
1977 | void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op) | ||
1978 | { | ||
1979 | printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n", | ||
1980 | err, op); | ||
1981 | } | ||
1982 | |||
1669 | void do_fpe_common(struct pt_regs *regs) | 1983 | void do_fpe_common(struct pt_regs *regs) |
1670 | { | 1984 | { |
1671 | if (regs->tstate & TSTATE_PRIV) { | 1985 | if (regs->tstate & TSTATE_PRIV) { |
@@ -1924,10 +2238,11 @@ void die_if_kernel(char *str, struct pt_regs *regs) | |||
1924 | } | 2238 | } |
1925 | user_instruction_dump ((unsigned int __user *) regs->tpc); | 2239 | user_instruction_dump ((unsigned int __user *) regs->tpc); |
1926 | } | 2240 | } |
2241 | #if 0 | ||
1927 | #ifdef CONFIG_SMP | 2242 | #ifdef CONFIG_SMP |
1928 | smp_report_regs(); | 2243 | smp_report_regs(); |
1929 | #endif | 2244 | #endif |
1930 | 2245 | #endif | |
1931 | if (regs->tstate & TSTATE_PRIV) | 2246 | if (regs->tstate & TSTATE_PRIV) |
1932 | do_exit(SIGKILL); | 2247 | do_exit(SIGKILL); |
1933 | do_exit(SIGSEGV); | 2248 | do_exit(SIGSEGV); |
@@ -1958,6 +2273,11 @@ void do_illegal_instruction(struct pt_regs *regs) | |||
1958 | } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ { | 2273 | } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ { |
1959 | if (handle_ldf_stq(insn, regs)) | 2274 | if (handle_ldf_stq(insn, regs)) |
1960 | return; | 2275 | return; |
2276 | } else if (tlb_type == hypervisor) { | ||
2277 | extern int vis_emul(struct pt_regs *, unsigned int); | ||
2278 | |||
2279 | if (!vis_emul(regs, insn)) | ||
2280 | return; | ||
1961 | } | 2281 | } |
1962 | } | 2282 | } |
1963 | info.si_signo = SIGILL; | 2283 | info.si_signo = SIGILL; |
@@ -1968,6 +2288,8 @@ void do_illegal_instruction(struct pt_regs *regs) | |||
1968 | force_sig_info(SIGILL, &info, current); | 2288 | force_sig_info(SIGILL, &info, current); |
1969 | } | 2289 | } |
1970 | 2290 | ||
2291 | extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn); | ||
2292 | |||
1971 | void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) | 2293 | void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) |
1972 | { | 2294 | { |
1973 | siginfo_t info; | 2295 | siginfo_t info; |
@@ -1977,13 +2299,7 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo | |||
1977 | return; | 2299 | return; |
1978 | 2300 | ||
1979 | if (regs->tstate & TSTATE_PRIV) { | 2301 | if (regs->tstate & TSTATE_PRIV) { |
1980 | extern void kernel_unaligned_trap(struct pt_regs *regs, | 2302 | kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); |
1981 | unsigned int insn, | ||
1982 | unsigned long sfar, | ||
1983 | unsigned long sfsr); | ||
1984 | |||
1985 | kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc), | ||
1986 | sfar, sfsr); | ||
1987 | return; | 2303 | return; |
1988 | } | 2304 | } |
1989 | info.si_signo = SIGBUS; | 2305 | info.si_signo = SIGBUS; |
@@ -1994,6 +2310,26 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo | |||
1994 | force_sig_info(SIGBUS, &info, current); | 2310 | force_sig_info(SIGBUS, &info, current); |
1995 | } | 2311 | } |
1996 | 2312 | ||
2313 | void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) | ||
2314 | { | ||
2315 | siginfo_t info; | ||
2316 | |||
2317 | if (notify_die(DIE_TRAP, "memory address unaligned", regs, | ||
2318 | 0, 0x34, SIGSEGV) == NOTIFY_STOP) | ||
2319 | return; | ||
2320 | |||
2321 | if (regs->tstate & TSTATE_PRIV) { | ||
2322 | kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); | ||
2323 | return; | ||
2324 | } | ||
2325 | info.si_signo = SIGBUS; | ||
2326 | info.si_errno = 0; | ||
2327 | info.si_code = BUS_ADRALN; | ||
2328 | info.si_addr = (void __user *) addr; | ||
2329 | info.si_trapno = 0; | ||
2330 | force_sig_info(SIGBUS, &info, current); | ||
2331 | } | ||
2332 | |||
1997 | void do_privop(struct pt_regs *regs) | 2333 | void do_privop(struct pt_regs *regs) |
1998 | { | 2334 | { |
1999 | siginfo_t info; | 2335 | siginfo_t info; |
@@ -2130,7 +2466,22 @@ void do_getpsr(struct pt_regs *regs) | |||
2130 | } | 2466 | } |
2131 | } | 2467 | } |
2132 | 2468 | ||
2469 | struct trap_per_cpu trap_block[NR_CPUS]; | ||
2470 | |||
2471 | /* This can get invoked before sched_init() so play it super safe | ||
2472 | * and use hard_smp_processor_id(). | ||
2473 | */ | ||
2474 | void init_cur_cpu_trap(struct thread_info *t) | ||
2475 | { | ||
2476 | int cpu = hard_smp_processor_id(); | ||
2477 | struct trap_per_cpu *p = &trap_block[cpu]; | ||
2478 | |||
2479 | p->thread = t; | ||
2480 | p->pgd_paddr = 0; | ||
2481 | } | ||
2482 | |||
2133 | extern void thread_info_offsets_are_bolixed_dave(void); | 2483 | extern void thread_info_offsets_are_bolixed_dave(void); |
2484 | extern void trap_per_cpu_offsets_are_bolixed_dave(void); | ||
2134 | 2485 | ||
2135 | /* Only invoked on boot processor. */ | 2486 | /* Only invoked on boot processor. */ |
2136 | void __init trap_init(void) | 2487 | void __init trap_init(void) |
@@ -2154,7 +2505,6 @@ void __init trap_init(void) | |||
2154 | TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) || | 2505 | TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) || |
2155 | TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) || | 2506 | TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) || |
2156 | TI_PCR != offsetof(struct thread_info, pcr_reg) || | 2507 | TI_PCR != offsetof(struct thread_info, pcr_reg) || |
2157 | TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) || | ||
2158 | TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || | 2508 | TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || |
2159 | TI_NEW_CHILD != offsetof(struct thread_info, new_child) || | 2509 | TI_NEW_CHILD != offsetof(struct thread_info, new_child) || |
2160 | TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) || | 2510 | TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) || |
@@ -2165,6 +2515,29 @@ void __init trap_init(void) | |||
2165 | (TI_FPREGS & (64 - 1))) | 2515 | (TI_FPREGS & (64 - 1))) |
2166 | thread_info_offsets_are_bolixed_dave(); | 2516 | thread_info_offsets_are_bolixed_dave(); |
2167 | 2517 | ||
2518 | if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) || | ||
2519 | (TRAP_PER_CPU_PGD_PADDR != | ||
2520 | offsetof(struct trap_per_cpu, pgd_paddr)) || | ||
2521 | (TRAP_PER_CPU_CPU_MONDO_PA != | ||
2522 | offsetof(struct trap_per_cpu, cpu_mondo_pa)) || | ||
2523 | (TRAP_PER_CPU_DEV_MONDO_PA != | ||
2524 | offsetof(struct trap_per_cpu, dev_mondo_pa)) || | ||
2525 | (TRAP_PER_CPU_RESUM_MONDO_PA != | ||
2526 | offsetof(struct trap_per_cpu, resum_mondo_pa)) || | ||
2527 | (TRAP_PER_CPU_RESUM_KBUF_PA != | ||
2528 | offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) || | ||
2529 | (TRAP_PER_CPU_NONRESUM_MONDO_PA != | ||
2530 | offsetof(struct trap_per_cpu, nonresum_mondo_pa)) || | ||
2531 | (TRAP_PER_CPU_NONRESUM_KBUF_PA != | ||
2532 | offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) || | ||
2533 | (TRAP_PER_CPU_FAULT_INFO != | ||
2534 | offsetof(struct trap_per_cpu, fault_info)) || | ||
2535 | (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA != | ||
2536 | offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) || | ||
2537 | (TRAP_PER_CPU_CPU_LIST_PA != | ||
2538 | offsetof(struct trap_per_cpu, cpu_list_pa))) | ||
2539 | trap_per_cpu_offsets_are_bolixed_dave(); | ||
2540 | |||
2168 | /* Attach to the address space of init_task. On SMP we | 2541 | /* Attach to the address space of init_task. On SMP we |
2169 | * do this in smp.c:smp_callin for other cpus. | 2542 | * do this in smp.c:smp_callin for other cpus. |
2170 | */ | 2543 | */ |
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S new file mode 100644 index 000000000000..118baea44f69 --- /dev/null +++ b/arch/sparc64/kernel/tsb.S | |||
@@ -0,0 +1,442 @@ | |||
1 | /* tsb.S: Sparc64 TSB table handling. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <asm/tsb.h> | ||
7 | #include <asm/hypervisor.h> | ||
8 | |||
9 | .text | ||
10 | .align 32 | ||
11 | |||
12 | /* Invoked from TLB miss handler, we are in the | ||
13 | * MMU global registers and they are setup like | ||
14 | * this: | ||
15 | * | ||
16 | * %g1: TSB entry pointer | ||
17 | * %g2: available temporary | ||
18 | * %g3: FAULT_CODE_{D,I}TLB | ||
19 | * %g4: available temporary | ||
20 | * %g5: available temporary | ||
21 | * %g6: TAG TARGET | ||
22 | * %g7: available temporary, will be loaded by us with | ||
23 | * the physical address base of the linux page | ||
24 | * tables for the current address space | ||
25 | */ | ||
26 | tsb_miss_dtlb: | ||
27 | mov TLB_TAG_ACCESS, %g4 | ||
28 | ba,pt %xcc, tsb_miss_page_table_walk | ||
29 | ldxa [%g4] ASI_DMMU, %g4 | ||
30 | |||
31 | tsb_miss_itlb: | ||
32 | mov TLB_TAG_ACCESS, %g4 | ||
33 | ba,pt %xcc, tsb_miss_page_table_walk | ||
34 | ldxa [%g4] ASI_IMMU, %g4 | ||
35 | |||
36 | /* At this point we have: | ||
37 | * %g1 -- TSB entry address | ||
38 | * %g3 -- FAULT_CODE_{D,I}TLB | ||
39 | * %g4 -- missing virtual address | ||
40 | * %g6 -- TAG TARGET (vaddr >> 22) | ||
41 | */ | ||
42 | tsb_miss_page_table_walk: | ||
43 | TRAP_LOAD_PGD_PHYS(%g7, %g5) | ||
44 | |||
45 | /* And now we have the PGD base physical address in %g7. */ | ||
46 | tsb_miss_page_table_walk_sun4v_fastpath: | ||
47 | USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) | ||
48 | |||
49 | /* At this point we have: | ||
50 | * %g1 -- TSB entry address | ||
51 | * %g3 -- FAULT_CODE_{D,I}TLB | ||
52 | * %g5 -- physical address of PTE in Linux page tables | ||
53 | * %g6 -- TAG TARGET (vaddr >> 22) | ||
54 | */ | ||
55 | tsb_reload: | ||
56 | TSB_LOCK_TAG(%g1, %g2, %g7) | ||
57 | |||
58 | /* Load and check PTE. */ | ||
59 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 | ||
60 | mov 1, %g7 | ||
61 | sllx %g7, TSB_TAG_INVALID_BIT, %g7 | ||
62 | brgez,a,pn %g5, tsb_do_fault | ||
63 | TSB_STORE(%g1, %g7) | ||
64 | |||
65 | TSB_WRITE(%g1, %g5, %g6) | ||
66 | |||
67 | /* Finally, load TLB and return from trap. */ | ||
68 | tsb_tlb_reload: | ||
69 | cmp %g3, FAULT_CODE_DTLB | ||
70 | bne,pn %xcc, tsb_itlb_load | ||
71 | nop | ||
72 | |||
73 | tsb_dtlb_load: | ||
74 | |||
75 | 661: stxa %g5, [%g0] ASI_DTLB_DATA_IN | ||
76 | retry | ||
77 | .section .sun4v_2insn_patch, "ax" | ||
78 | .word 661b | ||
79 | nop | ||
80 | nop | ||
81 | .previous | ||
82 | |||
83 | /* For sun4v the ASI_DTLB_DATA_IN store and the retry | ||
84 | * instruction get nop'd out and we get here to branch | ||
85 | * to the sun4v tlb load code. The registers are setup | ||
86 | * as follows: | ||
87 | * | ||
88 | * %g4: vaddr | ||
89 | * %g5: PTE | ||
90 | * %g6: TAG | ||
91 | * | ||
92 | * The sun4v TLB load wants the PTE in %g3 so we fix that | ||
93 | * up here. | ||
94 | */ | ||
95 | ba,pt %xcc, sun4v_dtlb_load | ||
96 | mov %g5, %g3 | ||
97 | |||
98 | tsb_itlb_load: | ||
99 | /* Executable bit must be set. */ | ||
100 | 661: andcc %g5, _PAGE_EXEC_4U, %g0 | ||
101 | .section .sun4v_1insn_patch, "ax" | ||
102 | .word 661b | ||
103 | andcc %g5, _PAGE_EXEC_4V, %g0 | ||
104 | .previous | ||
105 | |||
106 | be,pn %xcc, tsb_do_fault | ||
107 | nop | ||
108 | |||
109 | 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN | ||
110 | retry | ||
111 | .section .sun4v_2insn_patch, "ax" | ||
112 | .word 661b | ||
113 | nop | ||
114 | nop | ||
115 | .previous | ||
116 | |||
117 | /* For sun4v the ASI_ITLB_DATA_IN store and the retry | ||
118 | * instruction get nop'd out and we get here to branch | ||
119 | * to the sun4v tlb load code. The registers are setup | ||
120 | * as follows: | ||
121 | * | ||
122 | * %g4: vaddr | ||
123 | * %g5: PTE | ||
124 | * %g6: TAG | ||
125 | * | ||
126 | * The sun4v TLB load wants the PTE in %g3 so we fix that | ||
127 | * up here. | ||
128 | */ | ||
129 | ba,pt %xcc, sun4v_itlb_load | ||
130 | mov %g5, %g3 | ||
131 | |||
132 | /* No valid entry in the page tables, do full fault | ||
133 | * processing. | ||
134 | */ | ||
135 | |||
136 | .globl tsb_do_fault | ||
137 | tsb_do_fault: | ||
138 | cmp %g3, FAULT_CODE_DTLB | ||
139 | |||
140 | 661: rdpr %pstate, %g5 | ||
141 | wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate | ||
142 | .section .sun4v_2insn_patch, "ax" | ||
143 | .word 661b | ||
144 | SET_GL(1) | ||
145 | ldxa [%g0] ASI_SCRATCHPAD, %g4 | ||
146 | .previous | ||
147 | |||
148 | bne,pn %xcc, tsb_do_itlb_fault | ||
149 | nop | ||
150 | |||
151 | tsb_do_dtlb_fault: | ||
152 | rdpr %tl, %g3 | ||
153 | cmp %g3, 1 | ||
154 | |||
155 | 661: mov TLB_TAG_ACCESS, %g4 | ||
156 | ldxa [%g4] ASI_DMMU, %g5 | ||
157 | .section .sun4v_2insn_patch, "ax" | ||
158 | .word 661b | ||
159 | ldx [%g4 + HV_FAULT_D_ADDR_OFFSET], %g5 | ||
160 | nop | ||
161 | .previous | ||
162 | |||
163 | be,pt %xcc, sparc64_realfault_common | ||
164 | mov FAULT_CODE_DTLB, %g4 | ||
165 | ba,pt %xcc, winfix_trampoline | ||
166 | nop | ||
167 | |||
168 | tsb_do_itlb_fault: | ||
169 | rdpr %tpc, %g5 | ||
170 | ba,pt %xcc, sparc64_realfault_common | ||
171 | mov FAULT_CODE_ITLB, %g4 | ||
172 | |||
173 | .globl sparc64_realfault_common | ||
174 | sparc64_realfault_common: | ||
175 | /* fault code in %g4, fault address in %g5, etrap will | ||
176 | * preserve these two values in %l4 and %l5 respectively | ||
177 | */ | ||
178 | ba,pt %xcc, etrap ! Save trap state | ||
179 | 1: rd %pc, %g7 ! ... | ||
180 | stb %l4, [%g6 + TI_FAULT_CODE] ! Save fault code | ||
181 | stx %l5, [%g6 + TI_FAULT_ADDR] ! Save fault address | ||
182 | call do_sparc64_fault ! Call fault handler | ||
183 | add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg | ||
184 | ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state | ||
185 | nop ! Delay slot (fill me) | ||
186 | |||
187 | winfix_trampoline: | ||
188 | rdpr %tpc, %g3 ! Prepare winfixup TNPC | ||
189 | or %g3, 0x7c, %g3 ! Compute branch offset | ||
190 | wrpr %g3, %tnpc ! Write it into TNPC | ||
191 | done ! Trap return | ||
192 | |||
193 | /* Insert an entry into the TSB. | ||
194 | * | ||
195 | * %o0: TSB entry pointer (virt or phys address) | ||
196 | * %o1: tag | ||
197 | * %o2: pte | ||
198 | */ | ||
199 | .align 32 | ||
200 | .globl __tsb_insert | ||
201 | __tsb_insert: | ||
202 | rdpr %pstate, %o5 | ||
203 | wrpr %o5, PSTATE_IE, %pstate | ||
204 | TSB_LOCK_TAG(%o0, %g2, %g3) | ||
205 | TSB_WRITE(%o0, %o2, %o1) | ||
206 | wrpr %o5, %pstate | ||
207 | retl | ||
208 | nop | ||
209 | .size __tsb_insert, .-__tsb_insert | ||
210 | |||
211 | /* Flush the given TSB entry if it has the matching | ||
212 | * tag. | ||
213 | * | ||
214 | * %o0: TSB entry pointer (virt or phys address) | ||
215 | * %o1: tag | ||
216 | */ | ||
217 | .align 32 | ||
218 | .globl tsb_flush | ||
219 | .type tsb_flush,#function | ||
220 | tsb_flush: | ||
221 | sethi %hi(TSB_TAG_LOCK_HIGH), %g2 | ||
222 | 1: TSB_LOAD_TAG(%o0, %g1) | ||
223 | srlx %g1, 32, %o3 | ||
224 | andcc %o3, %g2, %g0 | ||
225 | bne,pn %icc, 1b | ||
226 | membar #LoadLoad | ||
227 | cmp %g1, %o1 | ||
228 | mov 1, %o3 | ||
229 | bne,pt %xcc, 2f | ||
230 | sllx %o3, TSB_TAG_INVALID_BIT, %o3 | ||
231 | TSB_CAS_TAG(%o0, %g1, %o3) | ||
232 | cmp %g1, %o3 | ||
233 | bne,pn %xcc, 1b | ||
234 | nop | ||
235 | 2: retl | ||
236 | TSB_MEMBAR | ||
237 | .size tsb_flush, .-tsb_flush | ||
238 | |||
239 | /* Reload MMU related context switch state at | ||
240 | * schedule() time. | ||
241 | * | ||
242 | * %o0: page table physical address | ||
243 | * %o1: TSB register value | ||
244 | * %o2: TSB virtual address | ||
245 | * %o3: TSB mapping locked PTE | ||
246 | * %o4: Hypervisor TSB descriptor physical address | ||
247 | * | ||
248 | * We have to run this whole thing with interrupts | ||
249 | * disabled so that the current cpu doesn't change | ||
250 | * due to preemption. | ||
251 | */ | ||
252 | .align 32 | ||
253 | .globl __tsb_context_switch | ||
254 | .type __tsb_context_switch,#function | ||
255 | __tsb_context_switch: | ||
256 | rdpr %pstate, %o5 | ||
257 | wrpr %o5, PSTATE_IE, %pstate | ||
258 | |||
259 | ldub [%g6 + TI_CPU], %g1 | ||
260 | sethi %hi(trap_block), %g2 | ||
261 | sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1 | ||
262 | or %g2, %lo(trap_block), %g2 | ||
263 | add %g2, %g1, %g2 | ||
264 | stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] | ||
265 | |||
266 | sethi %hi(tlb_type), %g1 | ||
267 | lduw [%g1 + %lo(tlb_type)], %g1 | ||
268 | cmp %g1, 3 | ||
269 | bne,pt %icc, 1f | ||
270 | nop | ||
271 | |||
272 | /* Hypervisor TSB switch. */ | ||
273 | mov SCRATCHPAD_UTSBREG1, %g1 | ||
274 | stxa %o1, [%g1] ASI_SCRATCHPAD | ||
275 | mov -1, %g2 | ||
276 | mov SCRATCHPAD_UTSBREG2, %g1 | ||
277 | stxa %g2, [%g1] ASI_SCRATCHPAD | ||
278 | |||
279 | /* Save away %o5's %pstate, we have to use %o5 for | ||
280 | * the hypervisor call. | ||
281 | */ | ||
282 | mov %o5, %g1 | ||
283 | |||
284 | mov HV_FAST_MMU_TSB_CTXNON0, %o5 | ||
285 | mov 1, %o0 | ||
286 | mov %o4, %o1 | ||
287 | ta HV_FAST_TRAP | ||
288 | |||
289 | /* Finish up and restore %o5. */ | ||
290 | ba,pt %xcc, 9f | ||
291 | mov %g1, %o5 | ||
292 | |||
293 | /* SUN4U TSB switch. */ | ||
294 | 1: mov TSB_REG, %g1 | ||
295 | stxa %o1, [%g1] ASI_DMMU | ||
296 | membar #Sync | ||
297 | stxa %o1, [%g1] ASI_IMMU | ||
298 | membar #Sync | ||
299 | |||
300 | 2: brz %o2, 9f | ||
301 | nop | ||
302 | |||
303 | sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2 | ||
304 | mov TLB_TAG_ACCESS, %g1 | ||
305 | lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2 | ||
306 | stxa %o2, [%g1] ASI_DMMU | ||
307 | membar #Sync | ||
308 | sllx %g2, 3, %g2 | ||
309 | stxa %o3, [%g2] ASI_DTLB_DATA_ACCESS | ||
310 | membar #Sync | ||
311 | 9: | ||
312 | wrpr %o5, %pstate | ||
313 | |||
314 | retl | ||
315 | nop | ||
316 | .size __tsb_context_switch, .-__tsb_context_switch | ||
317 | |||
318 | #define TSB_PASS_BITS ((1 << TSB_TAG_LOCK_BIT) | \ | ||
319 | (1 << TSB_TAG_INVALID_BIT)) | ||
320 | |||
321 | .align 32 | ||
322 | .globl copy_tsb | ||
323 | .type copy_tsb,#function | ||
324 | copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size | ||
325 | * %o2=new_tsb_base, %o3=new_tsb_size | ||
326 | */ | ||
327 | sethi %uhi(TSB_PASS_BITS), %g7 | ||
328 | srlx %o3, 4, %o3 | ||
329 | add %o0, %o1, %g1 /* end of old tsb */ | ||
330 | sllx %g7, 32, %g7 | ||
331 | sub %o3, 1, %o3 /* %o3 == new tsb hash mask */ | ||
332 | |||
333 | 661: prefetcha [%o0] ASI_N, #one_read | ||
334 | .section .tsb_phys_patch, "ax" | ||
335 | .word 661b | ||
336 | prefetcha [%o0] ASI_PHYS_USE_EC, #one_read | ||
337 | .previous | ||
338 | |||
339 | 90: andcc %o0, (64 - 1), %g0 | ||
340 | bne 1f | ||
341 | add %o0, 64, %o5 | ||
342 | |||
343 | 661: prefetcha [%o5] ASI_N, #one_read | ||
344 | .section .tsb_phys_patch, "ax" | ||
345 | .word 661b | ||
346 | prefetcha [%o5] ASI_PHYS_USE_EC, #one_read | ||
347 | .previous | ||
348 | |||
349 | 1: TSB_LOAD_QUAD(%o0, %g2) /* %g2/%g3 == TSB entry */ | ||
350 | andcc %g2, %g7, %g0 /* LOCK or INVALID set? */ | ||
351 | bne,pn %xcc, 80f /* Skip it */ | ||
352 | sllx %g2, 22, %o4 /* TAG --> VADDR */ | ||
353 | |||
354 | /* This can definitely be computed faster... */ | ||
355 | srlx %o0, 4, %o5 /* Build index */ | ||
356 | and %o5, 511, %o5 /* Mask index */ | ||
357 | sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */ | ||
358 | or %o4, %o5, %o4 /* Full VADDR. */ | ||
359 | srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */ | ||
360 | and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */ | ||
361 | sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */ | ||
362 | TSB_STORE(%o2 + %o4, %g2) /* Store TAG */ | ||
363 | add %o4, 0x8, %o4 /* Advance to TTE */ | ||
364 | TSB_STORE(%o2 + %o4, %g3) /* Store TTE */ | ||
365 | |||
366 | 80: add %o0, 16, %o0 | ||
367 | cmp %o0, %g1 | ||
368 | bne,pt %xcc, 90b | ||
369 | nop | ||
370 | |||
371 | retl | ||
372 | TSB_MEMBAR | ||
373 | .size copy_tsb, .-copy_tsb | ||
374 | |||
375 | /* Set the invalid bit in all TSB entries. */ | ||
376 | .align 32 | ||
377 | .globl tsb_init | ||
378 | .type tsb_init,#function | ||
379 | tsb_init: /* %o0 = TSB vaddr, %o1 = size in bytes */ | ||
380 | prefetch [%o0 + 0x000], #n_writes | ||
381 | mov 1, %g1 | ||
382 | prefetch [%o0 + 0x040], #n_writes | ||
383 | sllx %g1, TSB_TAG_INVALID_BIT, %g1 | ||
384 | prefetch [%o0 + 0x080], #n_writes | ||
385 | 1: prefetch [%o0 + 0x0c0], #n_writes | ||
386 | stx %g1, [%o0 + 0x00] | ||
387 | stx %g1, [%o0 + 0x10] | ||
388 | stx %g1, [%o0 + 0x20] | ||
389 | stx %g1, [%o0 + 0x30] | ||
390 | prefetch [%o0 + 0x100], #n_writes | ||
391 | stx %g1, [%o0 + 0x40] | ||
392 | stx %g1, [%o0 + 0x50] | ||
393 | stx %g1, [%o0 + 0x60] | ||
394 | stx %g1, [%o0 + 0x70] | ||
395 | prefetch [%o0 + 0x140], #n_writes | ||
396 | stx %g1, [%o0 + 0x80] | ||
397 | stx %g1, [%o0 + 0x90] | ||
398 | stx %g1, [%o0 + 0xa0] | ||
399 | stx %g1, [%o0 + 0xb0] | ||
400 | prefetch [%o0 + 0x180], #n_writes | ||
401 | stx %g1, [%o0 + 0xc0] | ||
402 | stx %g1, [%o0 + 0xd0] | ||
403 | stx %g1, [%o0 + 0xe0] | ||
404 | stx %g1, [%o0 + 0xf0] | ||
405 | subcc %o1, 0x100, %o1 | ||
406 | bne,pt %xcc, 1b | ||
407 | add %o0, 0x100, %o0 | ||
408 | retl | ||
409 | nop | ||
410 | nop | ||
411 | nop | ||
412 | .size tsb_init, .-tsb_init | ||
413 | |||
414 | .globl NGtsb_init | ||
415 | .type NGtsb_init,#function | ||
416 | NGtsb_init: | ||
417 | rd %asi, %g2 | ||
418 | mov 1, %g1 | ||
419 | wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi | ||
420 | sllx %g1, TSB_TAG_INVALID_BIT, %g1 | ||
421 | 1: stxa %g1, [%o0 + 0x00] %asi | ||
422 | stxa %g1, [%o0 + 0x10] %asi | ||
423 | stxa %g1, [%o0 + 0x20] %asi | ||
424 | stxa %g1, [%o0 + 0x30] %asi | ||
425 | stxa %g1, [%o0 + 0x40] %asi | ||
426 | stxa %g1, [%o0 + 0x50] %asi | ||
427 | stxa %g1, [%o0 + 0x60] %asi | ||
428 | stxa %g1, [%o0 + 0x70] %asi | ||
429 | stxa %g1, [%o0 + 0x80] %asi | ||
430 | stxa %g1, [%o0 + 0x90] %asi | ||
431 | stxa %g1, [%o0 + 0xa0] %asi | ||
432 | stxa %g1, [%o0 + 0xb0] %asi | ||
433 | stxa %g1, [%o0 + 0xc0] %asi | ||
434 | stxa %g1, [%o0 + 0xd0] %asi | ||
435 | stxa %g1, [%o0 + 0xe0] %asi | ||
436 | stxa %g1, [%o0 + 0xf0] %asi | ||
437 | subcc %o1, 0x100, %o1 | ||
438 | bne,pt %xcc, 1b | ||
439 | add %o0, 0x100, %o0 | ||
440 | retl | ||
441 | wr %g2, 0x0, %asi | ||
442 | .size NGtsb_init, .-NGtsb_init | ||
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S index 8365bc1f81f3..5d901519db55 100644 --- a/arch/sparc64/kernel/ttable.S +++ b/arch/sparc64/kernel/ttable.S | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: ttable.S,v 1.38 2002/02/09 19:49:30 davem Exp $ | 1 | /* ttable.S: Sparc V9 Trap Table(s) with SpitFire/Cheetah/SUN4V extensions. |
2 | * ttable.S: Sparc V9 Trap Table(s) with SpitFire/Cheetah extensions. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1996, 2001 David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright (C) 1996, 2001, 2006 David S. Miller (davem@davemloft.net) |
5 | */ | 4 | */ |
6 | 5 | ||
7 | #include <linux/config.h> | 6 | #include <linux/config.h> |
@@ -19,7 +18,7 @@ tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3) | |||
19 | tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7) | 18 | tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7) |
20 | tl0_iax: membar #Sync | 19 | tl0_iax: membar #Sync |
21 | TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception) | 20 | TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception) |
22 | tl0_resv009: BTRAP(0x9) | 21 | tl0_itsb_4v: SUN4V_ITSB_MISS |
23 | tl0_iae: membar #Sync | 22 | tl0_iae: membar #Sync |
24 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | 23 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) |
25 | tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) | 24 | tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) |
@@ -38,7 +37,7 @@ tl0_div0: TRAP(do_div0) | |||
38 | tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e) | 37 | tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e) |
39 | tl0_resv02f: BTRAP(0x2f) | 38 | tl0_resv02f: BTRAP(0x2f) |
40 | tl0_dax: TRAP_NOSAVE(__spitfire_data_access_exception) | 39 | tl0_dax: TRAP_NOSAVE(__spitfire_data_access_exception) |
41 | tl0_resv031: BTRAP(0x31) | 40 | tl0_dtsb_4v: SUN4V_DTSB_MISS |
42 | tl0_dae: membar #Sync | 41 | tl0_dae: membar #Sync |
43 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | 42 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) |
44 | tl0_resv033: BTRAP(0x33) | 43 | tl0_resv033: BTRAP(0x33) |
@@ -52,12 +51,13 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40) | |||
52 | tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) | 51 | tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) |
53 | tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) | 52 | tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) |
54 | tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) | 53 | tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) |
54 | tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4) | ||
55 | #else | 55 | #else |
56 | tl0_irq1: BTRAP(0x41) | 56 | tl0_irq1: BTRAP(0x41) |
57 | tl0_irq2: BTRAP(0x42) | 57 | tl0_irq2: BTRAP(0x42) |
58 | tl0_irq3: BTRAP(0x43) | 58 | tl0_irq3: BTRAP(0x43) |
59 | tl0_irq4: BTRAP(0x44) | ||
59 | #endif | 60 | #endif |
60 | tl0_irq4: TRAP_IRQ(handler_irq, 4) | ||
61 | tl0_irq5: TRAP_IRQ(handler_irq, 5) TRAP_IRQ(handler_irq, 6) | 61 | tl0_irq5: TRAP_IRQ(handler_irq, 5) TRAP_IRQ(handler_irq, 6) |
62 | tl0_irq7: TRAP_IRQ(handler_irq, 7) TRAP_IRQ(handler_irq, 8) | 62 | tl0_irq7: TRAP_IRQ(handler_irq, 7) TRAP_IRQ(handler_irq, 8) |
63 | tl0_irq9: TRAP_IRQ(handler_irq, 9) TRAP_IRQ(handler_irq, 10) | 63 | tl0_irq9: TRAP_IRQ(handler_irq, 9) TRAP_IRQ(handler_irq, 10) |
@@ -78,9 +78,9 @@ tl0_vaw: TRAP(do_vaw) | |||
78 | tl0_cee: membar #Sync | 78 | tl0_cee: membar #Sync |
79 | TRAP_NOSAVE_7INSNS(__spitfire_cee_trap) | 79 | TRAP_NOSAVE_7INSNS(__spitfire_cee_trap) |
80 | tl0_iamiss: | 80 | tl0_iamiss: |
81 | #include "itlb_base.S" | 81 | #include "itlb_miss.S" |
82 | tl0_damiss: | 82 | tl0_damiss: |
83 | #include "dtlb_base.S" | 83 | #include "dtlb_miss.S" |
84 | tl0_daprot: | 84 | tl0_daprot: |
85 | #include "dtlb_prot.S" | 85 | #include "dtlb_prot.S" |
86 | tl0_fecc: BTRAP(0x70) /* Fast-ECC on Cheetah */ | 86 | tl0_fecc: BTRAP(0x70) /* Fast-ECC on Cheetah */ |
@@ -88,15 +88,18 @@ tl0_dcpe: BTRAP(0x71) /* D-cache Parity Error on Cheetah+ */ | |||
88 | tl0_icpe: BTRAP(0x72) /* I-cache Parity Error on Cheetah+ */ | 88 | tl0_icpe: BTRAP(0x72) /* I-cache Parity Error on Cheetah+ */ |
89 | tl0_resv073: BTRAP(0x73) BTRAP(0x74) BTRAP(0x75) | 89 | tl0_resv073: BTRAP(0x73) BTRAP(0x74) BTRAP(0x75) |
90 | tl0_resv076: BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b) | 90 | tl0_resv076: BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b) |
91 | tl0_resv07c: BTRAP(0x7c) BTRAP(0x7d) BTRAP(0x7e) BTRAP(0x7f) | 91 | tl0_cpu_mondo: TRAP_NOSAVE(sun4v_cpu_mondo) |
92 | tl0_dev_mondo: TRAP_NOSAVE(sun4v_dev_mondo) | ||
93 | tl0_res_mondo: TRAP_NOSAVE(sun4v_res_mondo) | ||
94 | tl0_nres_mondo: TRAP_NOSAVE(sun4v_nonres_mondo) | ||
92 | tl0_s0n: SPILL_0_NORMAL | 95 | tl0_s0n: SPILL_0_NORMAL |
93 | tl0_s1n: SPILL_1_NORMAL | 96 | tl0_s1n: SPILL_1_NORMAL |
94 | tl0_s2n: SPILL_2_NORMAL | 97 | tl0_s2n: SPILL_2_NORMAL |
95 | tl0_s3n: SPILL_3_NORMAL | 98 | tl0_s3n: SPILL_0_NORMAL_ETRAP |
96 | tl0_s4n: SPILL_4_NORMAL | 99 | tl0_s4n: SPILL_1_GENERIC_ETRAP |
97 | tl0_s5n: SPILL_5_NORMAL | 100 | tl0_s5n: SPILL_1_GENERIC_ETRAP_FIXUP |
98 | tl0_s6n: SPILL_6_NORMAL | 101 | tl0_s6n: SPILL_2_GENERIC_ETRAP |
99 | tl0_s7n: SPILL_7_NORMAL | 102 | tl0_s7n: SPILL_2_GENERIC_ETRAP_FIXUP |
100 | tl0_s0o: SPILL_0_OTHER | 103 | tl0_s0o: SPILL_0_OTHER |
101 | tl0_s1o: SPILL_1_OTHER | 104 | tl0_s1o: SPILL_1_OTHER |
102 | tl0_s2o: SPILL_2_OTHER | 105 | tl0_s2o: SPILL_2_OTHER |
@@ -110,9 +113,9 @@ tl0_f1n: FILL_1_NORMAL | |||
110 | tl0_f2n: FILL_2_NORMAL | 113 | tl0_f2n: FILL_2_NORMAL |
111 | tl0_f3n: FILL_3_NORMAL | 114 | tl0_f3n: FILL_3_NORMAL |
112 | tl0_f4n: FILL_4_NORMAL | 115 | tl0_f4n: FILL_4_NORMAL |
113 | tl0_f5n: FILL_5_NORMAL | 116 | tl0_f5n: FILL_0_NORMAL_RTRAP |
114 | tl0_f6n: FILL_6_NORMAL | 117 | tl0_f6n: FILL_1_GENERIC_RTRAP |
115 | tl0_f7n: FILL_7_NORMAL | 118 | tl0_f7n: FILL_2_GENERIC_RTRAP |
116 | tl0_f0o: FILL_0_OTHER | 119 | tl0_f0o: FILL_0_OTHER |
117 | tl0_f1o: FILL_1_OTHER | 120 | tl0_f1o: FILL_1_OTHER |
118 | tl0_f2o: FILL_2_OTHER | 121 | tl0_f2o: FILL_2_OTHER |
@@ -128,7 +131,7 @@ tl0_flushw: FLUSH_WINDOW_TRAP | |||
128 | tl0_resv104: BTRAP(0x104) BTRAP(0x105) BTRAP(0x106) BTRAP(0x107) | 131 | tl0_resv104: BTRAP(0x104) BTRAP(0x105) BTRAP(0x106) BTRAP(0x107) |
129 | .globl tl0_solaris | 132 | .globl tl0_solaris |
130 | tl0_solaris: SOLARIS_SYSCALL_TRAP | 133 | tl0_solaris: SOLARIS_SYSCALL_TRAP |
131 | tl0_netbsd: NETBSD_SYSCALL_TRAP | 134 | tl0_resv109: BTRAP(0x109) |
132 | tl0_resv10a: BTRAP(0x10a) BTRAP(0x10b) BTRAP(0x10c) BTRAP(0x10d) BTRAP(0x10e) | 135 | tl0_resv10a: BTRAP(0x10a) BTRAP(0x10b) BTRAP(0x10c) BTRAP(0x10d) BTRAP(0x10e) |
133 | tl0_resv10f: BTRAP(0x10f) | 136 | tl0_resv10f: BTRAP(0x10f) |
134 | tl0_linux32: LINUX_32BIT_SYSCALL_TRAP | 137 | tl0_linux32: LINUX_32BIT_SYSCALL_TRAP |
@@ -179,7 +182,7 @@ sparc64_ttable_tl1: | |||
179 | tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3) | 182 | tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3) |
180 | tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7) | 183 | tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7) |
181 | tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1) | 184 | tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1) |
182 | tl1_resv009: BTRAPTL1(0x9) | 185 | tl1_itsb_4v: SUN4V_ITSB_MISS |
183 | tl1_iae: membar #Sync | 186 | tl1_iae: membar #Sync |
184 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | 187 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) |
185 | tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) | 188 | tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) |
@@ -198,7 +201,7 @@ tl1_div0: TRAPTL1(do_div0_tl1) | |||
198 | tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c) | 201 | tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c) |
199 | tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f) | 202 | tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f) |
200 | tl1_dax: TRAP_NOSAVE(__spitfire_data_access_exception_tl1) | 203 | tl1_dax: TRAP_NOSAVE(__spitfire_data_access_exception_tl1) |
201 | tl1_resv031: BTRAPTL1(0x31) | 204 | tl1_dtsb_4v: SUN4V_DTSB_MISS |
202 | tl1_dae: membar #Sync | 205 | tl1_dae: membar #Sync |
203 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | 206 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) |
204 | tl1_resv033: BTRAPTL1(0x33) | 207 | tl1_resv033: BTRAPTL1(0x33) |
@@ -222,26 +225,10 @@ tl1_resv05c: BTRAPTL1(0x5c) BTRAPTL1(0x5d) BTRAPTL1(0x5e) BTRAPTL1(0x5f) | |||
222 | tl1_ivec: TRAP_IVEC | 225 | tl1_ivec: TRAP_IVEC |
223 | tl1_paw: TRAPTL1(do_paw_tl1) | 226 | tl1_paw: TRAPTL1(do_paw_tl1) |
224 | tl1_vaw: TRAPTL1(do_vaw_tl1) | 227 | tl1_vaw: TRAPTL1(do_vaw_tl1) |
225 | 228 | tl1_cee: BTRAPTL1(0x63) | |
226 | /* The grotty trick to save %g1 into current->thread.cee_stuff | ||
227 | * is because when we take this trap we could be interrupting | ||
228 | * trap code already using the trap alternate global registers. | ||
229 | * | ||
230 | * We cross our fingers and pray that this store/load does | ||
231 | * not cause yet another CEE trap. | ||
232 | */ | ||
233 | tl1_cee: membar #Sync | ||
234 | stx %g1, [%g6 + TI_CEE_STUFF] | ||
235 | ldxa [%g0] ASI_AFSR, %g1 | ||
236 | membar #Sync | ||
237 | stxa %g1, [%g0] ASI_AFSR | ||
238 | membar #Sync | ||
239 | ldx [%g6 + TI_CEE_STUFF], %g1 | ||
240 | retry | ||
241 | |||
242 | tl1_iamiss: BTRAPTL1(0x64) BTRAPTL1(0x65) BTRAPTL1(0x66) BTRAPTL1(0x67) | 229 | tl1_iamiss: BTRAPTL1(0x64) BTRAPTL1(0x65) BTRAPTL1(0x66) BTRAPTL1(0x67) |
243 | tl1_damiss: | 230 | tl1_damiss: |
244 | #include "dtlb_backend.S" | 231 | #include "dtlb_miss.S" |
245 | tl1_daprot: | 232 | tl1_daprot: |
246 | #include "dtlb_prot.S" | 233 | #include "dtlb_prot.S" |
247 | tl1_fecc: BTRAPTL1(0x70) /* Fast-ECC on Cheetah */ | 234 | tl1_fecc: BTRAPTL1(0x70) /* Fast-ECC on Cheetah */ |
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c index 70faf630603b..001e8518331f 100644 --- a/arch/sparc64/kernel/unaligned.c +++ b/arch/sparc64/kernel/unaligned.c | |||
@@ -277,7 +277,7 @@ static void kernel_mna_trap_fault(void) | |||
277 | regs->tstate |= (ASI_AIUS << 24UL); | 277 | regs->tstate |= (ASI_AIUS << 24UL); |
278 | } | 278 | } |
279 | 279 | ||
280 | asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, unsigned long sfar, unsigned long sfsr) | 280 | asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) |
281 | { | 281 | { |
282 | enum direction dir = decode_direction(insn); | 282 | enum direction dir = decode_direction(insn); |
283 | int size = decode_access_size(insn); | 283 | int size = decode_access_size(insn); |
@@ -405,6 +405,9 @@ extern void do_privact(struct pt_regs *regs); | |||
405 | extern void spitfire_data_access_exception(struct pt_regs *regs, | 405 | extern void spitfire_data_access_exception(struct pt_regs *regs, |
406 | unsigned long sfsr, | 406 | unsigned long sfsr, |
407 | unsigned long sfar); | 407 | unsigned long sfar); |
408 | extern void sun4v_data_access_exception(struct pt_regs *regs, | ||
409 | unsigned long addr, | ||
410 | unsigned long type_ctx); | ||
408 | 411 | ||
409 | int handle_ldf_stq(u32 insn, struct pt_regs *regs) | 412 | int handle_ldf_stq(u32 insn, struct pt_regs *regs) |
410 | { | 413 | { |
@@ -447,14 +450,20 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) | |||
447 | break; | 450 | break; |
448 | } | 451 | } |
449 | default: | 452 | default: |
450 | spitfire_data_access_exception(regs, 0, addr); | 453 | if (tlb_type == hypervisor) |
454 | sun4v_data_access_exception(regs, addr, 0); | ||
455 | else | ||
456 | spitfire_data_access_exception(regs, 0, addr); | ||
451 | return 1; | 457 | return 1; |
452 | } | 458 | } |
453 | if (put_user (first >> 32, (u32 __user *)addr) || | 459 | if (put_user (first >> 32, (u32 __user *)addr) || |
454 | __put_user ((u32)first, (u32 __user *)(addr + 4)) || | 460 | __put_user ((u32)first, (u32 __user *)(addr + 4)) || |
455 | __put_user (second >> 32, (u32 __user *)(addr + 8)) || | 461 | __put_user (second >> 32, (u32 __user *)(addr + 8)) || |
456 | __put_user ((u32)second, (u32 __user *)(addr + 12))) { | 462 | __put_user ((u32)second, (u32 __user *)(addr + 12))) { |
457 | spitfire_data_access_exception(regs, 0, addr); | 463 | if (tlb_type == hypervisor) |
464 | sun4v_data_access_exception(regs, addr, 0); | ||
465 | else | ||
466 | spitfire_data_access_exception(regs, 0, addr); | ||
458 | return 1; | 467 | return 1; |
459 | } | 468 | } |
460 | } else { | 469 | } else { |
@@ -467,7 +476,10 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) | |||
467 | do_privact(regs); | 476 | do_privact(regs); |
468 | return 1; | 477 | return 1; |
469 | } else if (asi > ASI_SNFL) { | 478 | } else if (asi > ASI_SNFL) { |
470 | spitfire_data_access_exception(regs, 0, addr); | 479 | if (tlb_type == hypervisor) |
480 | sun4v_data_access_exception(regs, addr, 0); | ||
481 | else | ||
482 | spitfire_data_access_exception(regs, 0, addr); | ||
471 | return 1; | 483 | return 1; |
472 | } | 484 | } |
473 | switch (insn & 0x180000) { | 485 | switch (insn & 0x180000) { |
@@ -484,7 +496,10 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) | |||
484 | err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); | 496 | err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); |
485 | } | 497 | } |
486 | if (err && !(asi & 0x2 /* NF */)) { | 498 | if (err && !(asi & 0x2 /* NF */)) { |
487 | spitfire_data_access_exception(regs, 0, addr); | 499 | if (tlb_type == hypervisor) |
500 | sun4v_data_access_exception(regs, addr, 0); | ||
501 | else | ||
502 | spitfire_data_access_exception(regs, 0, addr); | ||
488 | return 1; | 503 | return 1; |
489 | } | 504 | } |
490 | if (asi & 0x8) /* Little */ { | 505 | if (asi & 0x8) /* Little */ { |
@@ -548,7 +563,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
548 | u32 insn; | 563 | u32 insn; |
549 | u32 first, second; | 564 | u32 first, second; |
550 | u64 value; | 565 | u64 value; |
551 | u8 asi, freg; | 566 | u8 freg; |
552 | int flag; | 567 | int flag; |
553 | struct fpustate *f = FPUSTATE; | 568 | struct fpustate *f = FPUSTATE; |
554 | 569 | ||
@@ -557,7 +572,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
557 | if (test_thread_flag(TIF_32BIT)) | 572 | if (test_thread_flag(TIF_32BIT)) |
558 | pc = (u32)pc; | 573 | pc = (u32)pc; |
559 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { | 574 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { |
560 | asi = sfsr >> 16; | 575 | int asi = decode_asi(insn, regs); |
561 | if ((asi > ASI_SNFL) || | 576 | if ((asi > ASI_SNFL) || |
562 | (asi < ASI_P)) | 577 | (asi < ASI_P)) |
563 | goto daex; | 578 | goto daex; |
@@ -587,7 +602,11 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
587 | *(u64 *)(f->regs + freg) = value; | 602 | *(u64 *)(f->regs + freg) = value; |
588 | current_thread_info()->fpsaved[0] |= flag; | 603 | current_thread_info()->fpsaved[0] |= flag; |
589 | } else { | 604 | } else { |
590 | daex: spitfire_data_access_exception(regs, sfsr, sfar); | 605 | daex: |
606 | if (tlb_type == hypervisor) | ||
607 | sun4v_data_access_exception(regs, sfar, sfsr); | ||
608 | else | ||
609 | spitfire_data_access_exception(regs, sfsr, sfar); | ||
591 | return; | 610 | return; |
592 | } | 611 | } |
593 | advance(regs); | 612 | advance(regs); |
@@ -600,7 +619,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
600 | unsigned long tstate = regs->tstate; | 619 | unsigned long tstate = regs->tstate; |
601 | u32 insn; | 620 | u32 insn; |
602 | u64 value; | 621 | u64 value; |
603 | u8 asi, freg; | 622 | u8 freg; |
604 | int flag; | 623 | int flag; |
605 | struct fpustate *f = FPUSTATE; | 624 | struct fpustate *f = FPUSTATE; |
606 | 625 | ||
@@ -609,8 +628,8 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
609 | if (test_thread_flag(TIF_32BIT)) | 628 | if (test_thread_flag(TIF_32BIT)) |
610 | pc = (u32)pc; | 629 | pc = (u32)pc; |
611 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { | 630 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { |
631 | int asi = decode_asi(insn, regs); | ||
612 | freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); | 632 | freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); |
613 | asi = sfsr >> 16; | ||
614 | value = 0; | 633 | value = 0; |
615 | flag = (freg < 32) ? FPRS_DL : FPRS_DU; | 634 | flag = (freg < 32) ? FPRS_DL : FPRS_DU; |
616 | if ((asi > ASI_SNFL) || | 635 | if ((asi > ASI_SNFL) || |
@@ -631,7 +650,11 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
631 | __put_user ((u32)value, (u32 __user *)(sfar + 4))) | 650 | __put_user ((u32)value, (u32 __user *)(sfar + 4))) |
632 | goto daex; | 651 | goto daex; |
633 | } else { | 652 | } else { |
634 | daex: spitfire_data_access_exception(regs, sfsr, sfar); | 653 | daex: |
654 | if (tlb_type == hypervisor) | ||
655 | sun4v_data_access_exception(regs, sfar, sfsr); | ||
656 | else | ||
657 | spitfire_data_access_exception(regs, sfsr, sfar); | ||
635 | return; | 658 | return; |
636 | } | 659 | } |
637 | advance(regs); | 660 | advance(regs); |
diff --git a/arch/sparc64/kernel/us2e_cpufreq.c b/arch/sparc64/kernel/us2e_cpufreq.c index b35dc8dc995a..1f83fe6a82d6 100644 --- a/arch/sparc64/kernel/us2e_cpufreq.c +++ b/arch/sparc64/kernel/us2e_cpufreq.c | |||
@@ -346,6 +346,9 @@ static int __init us2e_freq_init(void) | |||
346 | unsigned long manuf, impl, ver; | 346 | unsigned long manuf, impl, ver; |
347 | int ret; | 347 | int ret; |
348 | 348 | ||
349 | if (tlb_type != spitfire) | ||
350 | return -ENODEV; | ||
351 | |||
349 | __asm__("rdpr %%ver, %0" : "=r" (ver)); | 352 | __asm__("rdpr %%ver, %0" : "=r" (ver)); |
350 | manuf = ((ver >> 48) & 0xffff); | 353 | manuf = ((ver >> 48) & 0xffff); |
351 | impl = ((ver >> 32) & 0xffff); | 354 | impl = ((ver >> 32) & 0xffff); |
@@ -354,20 +357,16 @@ static int __init us2e_freq_init(void) | |||
354 | struct cpufreq_driver *driver; | 357 | struct cpufreq_driver *driver; |
355 | 358 | ||
356 | ret = -ENOMEM; | 359 | ret = -ENOMEM; |
357 | driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); | 360 | driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); |
358 | if (!driver) | 361 | if (!driver) |
359 | goto err_out; | 362 | goto err_out; |
360 | memset(driver, 0, sizeof(*driver)); | ||
361 | 363 | ||
362 | us2e_freq_table = kmalloc( | 364 | us2e_freq_table = kzalloc( |
363 | (NR_CPUS * sizeof(struct us2e_freq_percpu_info)), | 365 | (NR_CPUS * sizeof(struct us2e_freq_percpu_info)), |
364 | GFP_KERNEL); | 366 | GFP_KERNEL); |
365 | if (!us2e_freq_table) | 367 | if (!us2e_freq_table) |
366 | goto err_out; | 368 | goto err_out; |
367 | 369 | ||
368 | memset(us2e_freq_table, 0, | ||
369 | (NR_CPUS * sizeof(struct us2e_freq_percpu_info))); | ||
370 | |||
371 | driver->init = us2e_freq_cpu_init; | 370 | driver->init = us2e_freq_cpu_init; |
372 | driver->verify = us2e_freq_verify; | 371 | driver->verify = us2e_freq_verify; |
373 | driver->target = us2e_freq_target; | 372 | driver->target = us2e_freq_target; |
diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c index 6d1f9a3c464f..47e3acafb5be 100644 --- a/arch/sparc64/kernel/us3_cpufreq.c +++ b/arch/sparc64/kernel/us3_cpufreq.c | |||
@@ -203,6 +203,9 @@ static int __init us3_freq_init(void) | |||
203 | unsigned long manuf, impl, ver; | 203 | unsigned long manuf, impl, ver; |
204 | int ret; | 204 | int ret; |
205 | 205 | ||
206 | if (tlb_type != cheetah && tlb_type != cheetah_plus) | ||
207 | return -ENODEV; | ||
208 | |||
206 | __asm__("rdpr %%ver, %0" : "=r" (ver)); | 209 | __asm__("rdpr %%ver, %0" : "=r" (ver)); |
207 | manuf = ((ver >> 48) & 0xffff); | 210 | manuf = ((ver >> 48) & 0xffff); |
208 | impl = ((ver >> 32) & 0xffff); | 211 | impl = ((ver >> 32) & 0xffff); |
@@ -215,20 +218,16 @@ static int __init us3_freq_init(void) | |||
215 | struct cpufreq_driver *driver; | 218 | struct cpufreq_driver *driver; |
216 | 219 | ||
217 | ret = -ENOMEM; | 220 | ret = -ENOMEM; |
218 | driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); | 221 | driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); |
219 | if (!driver) | 222 | if (!driver) |
220 | goto err_out; | 223 | goto err_out; |
221 | memset(driver, 0, sizeof(*driver)); | ||
222 | 224 | ||
223 | us3_freq_table = kmalloc( | 225 | us3_freq_table = kzalloc( |
224 | (NR_CPUS * sizeof(struct us3_freq_percpu_info)), | 226 | (NR_CPUS * sizeof(struct us3_freq_percpu_info)), |
225 | GFP_KERNEL); | 227 | GFP_KERNEL); |
226 | if (!us3_freq_table) | 228 | if (!us3_freq_table) |
227 | goto err_out; | 229 | goto err_out; |
228 | 230 | ||
229 | memset(us3_freq_table, 0, | ||
230 | (NR_CPUS * sizeof(struct us3_freq_percpu_info))); | ||
231 | |||
232 | driver->init = us3_freq_cpu_init; | 231 | driver->init = us3_freq_cpu_init; |
233 | driver->verify = us3_freq_verify; | 232 | driver->verify = us3_freq_verify; |
234 | driver->target = us3_freq_target; | 233 | driver->target = us3_freq_target; |
diff --git a/arch/sparc64/kernel/visemul.c b/arch/sparc64/kernel/visemul.c new file mode 100644 index 000000000000..84fedaa38aae --- /dev/null +++ b/arch/sparc64/kernel/visemul.c | |||
@@ -0,0 +1,894 @@ | |||
1 | /* visemul.c: Emulation of VIS instructions. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/errno.h> | ||
7 | #include <linux/thread_info.h> | ||
8 | |||
9 | #include <asm/ptrace.h> | ||
10 | #include <asm/pstate.h> | ||
11 | #include <asm/system.h> | ||
12 | #include <asm/fpumacro.h> | ||
13 | #include <asm/uaccess.h> | ||
14 | |||
15 | /* OPF field of various VIS instructions. */ | ||
16 | |||
17 | /* 000111011 - four 16-bit packs */ | ||
18 | #define FPACK16_OPF 0x03b | ||
19 | |||
20 | /* 000111010 - two 32-bit packs */ | ||
21 | #define FPACK32_OPF 0x03a | ||
22 | |||
23 | /* 000111101 - four 16-bit packs */ | ||
24 | #define FPACKFIX_OPF 0x03d | ||
25 | |||
26 | /* 001001101 - four 16-bit expands */ | ||
27 | #define FEXPAND_OPF 0x04d | ||
28 | |||
29 | /* 001001011 - two 32-bit merges */ | ||
30 | #define FPMERGE_OPF 0x04b | ||
31 | |||
32 | /* 000110001 - 8-by-16-bit partitoned product */ | ||
33 | #define FMUL8x16_OPF 0x031 | ||
34 | |||
35 | /* 000110011 - 8-by-16-bit upper alpha partitioned product */ | ||
36 | #define FMUL8x16AU_OPF 0x033 | ||
37 | |||
38 | /* 000110101 - 8-by-16-bit lower alpha partitioned product */ | ||
39 | #define FMUL8x16AL_OPF 0x035 | ||
40 | |||
41 | /* 000110110 - upper 8-by-16-bit partitioned product */ | ||
42 | #define FMUL8SUx16_OPF 0x036 | ||
43 | |||
44 | /* 000110111 - lower 8-by-16-bit partitioned product */ | ||
45 | #define FMUL8ULx16_OPF 0x037 | ||
46 | |||
47 | /* 000111000 - upper 8-by-16-bit partitioned product */ | ||
48 | #define FMULD8SUx16_OPF 0x038 | ||
49 | |||
50 | /* 000111001 - lower unsigned 8-by-16-bit partitioned product */ | ||
51 | #define FMULD8ULx16_OPF 0x039 | ||
52 | |||
53 | /* 000101000 - four 16-bit compare; set rd if src1 > src2 */ | ||
54 | #define FCMPGT16_OPF 0x028 | ||
55 | |||
56 | /* 000101100 - two 32-bit compare; set rd if src1 > src2 */ | ||
57 | #define FCMPGT32_OPF 0x02c | ||
58 | |||
59 | /* 000100000 - four 16-bit compare; set rd if src1 <= src2 */ | ||
60 | #define FCMPLE16_OPF 0x020 | ||
61 | |||
62 | /* 000100100 - two 32-bit compare; set rd if src1 <= src2 */ | ||
63 | #define FCMPLE32_OPF 0x024 | ||
64 | |||
65 | /* 000100010 - four 16-bit compare; set rd if src1 != src2 */ | ||
66 | #define FCMPNE16_OPF 0x022 | ||
67 | |||
68 | /* 000100110 - two 32-bit compare; set rd if src1 != src2 */ | ||
69 | #define FCMPNE32_OPF 0x026 | ||
70 | |||
71 | /* 000101010 - four 16-bit compare; set rd if src1 == src2 */ | ||
72 | #define FCMPEQ16_OPF 0x02a | ||
73 | |||
74 | /* 000101110 - two 32-bit compare; set rd if src1 == src2 */ | ||
75 | #define FCMPEQ32_OPF 0x02e | ||
76 | |||
77 | /* 000000000 - Eight 8-bit edge boundary processing */ | ||
78 | #define EDGE8_OPF 0x000 | ||
79 | |||
80 | /* 000000001 - Eight 8-bit edge boundary processing, no CC */ | ||
81 | #define EDGE8N_OPF 0x001 | ||
82 | |||
83 | /* 000000010 - Eight 8-bit edge boundary processing, little-endian */ | ||
84 | #define EDGE8L_OPF 0x002 | ||
85 | |||
86 | /* 000000011 - Eight 8-bit edge boundary processing, little-endian, no CC */ | ||
87 | #define EDGE8LN_OPF 0x003 | ||
88 | |||
89 | /* 000000100 - Four 16-bit edge boundary processing */ | ||
90 | #define EDGE16_OPF 0x004 | ||
91 | |||
92 | /* 000000101 - Four 16-bit edge boundary processing, no CC */ | ||
93 | #define EDGE16N_OPF 0x005 | ||
94 | |||
95 | /* 000000110 - Four 16-bit edge boundary processing, little-endian */ | ||
96 | #define EDGE16L_OPF 0x006 | ||
97 | |||
98 | /* 000000111 - Four 16-bit edge boundary processing, little-endian, no CC */ | ||
99 | #define EDGE16LN_OPF 0x007 | ||
100 | |||
101 | /* 000001000 - Two 32-bit edge boundary processing */ | ||
102 | #define EDGE32_OPF 0x008 | ||
103 | |||
104 | /* 000001001 - Two 32-bit edge boundary processing, no CC */ | ||
105 | #define EDGE32N_OPF 0x009 | ||
106 | |||
107 | /* 000001010 - Two 32-bit edge boundary processing, little-endian */ | ||
108 | #define EDGE32L_OPF 0x00a | ||
109 | |||
110 | /* 000001011 - Two 32-bit edge boundary processing, little-endian, no CC */ | ||
111 | #define EDGE32LN_OPF 0x00b | ||
112 | |||
113 | /* 000111110 - distance between 8 8-bit components */ | ||
114 | #define PDIST_OPF 0x03e | ||
115 | |||
116 | /* 000010000 - convert 8-bit 3-D address to blocked byte address */ | ||
117 | #define ARRAY8_OPF 0x010 | ||
118 | |||
119 | /* 000010010 - convert 16-bit 3-D address to blocked byte address */ | ||
120 | #define ARRAY16_OPF 0x012 | ||
121 | |||
122 | /* 000010100 - convert 32-bit 3-D address to blocked byte address */ | ||
123 | #define ARRAY32_OPF 0x014 | ||
124 | |||
125 | /* 000011001 - Set the GSR.MASK field in preparation for a BSHUFFLE */ | ||
126 | #define BMASK_OPF 0x019 | ||
127 | |||
128 | /* 001001100 - Permute bytes as specified by GSR.MASK */ | ||
129 | #define BSHUFFLE_OPF 0x04c | ||
130 | |||
131 | #define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19)) | ||
132 | #define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19)) | ||
133 | |||
134 | #define VIS_OPF_SHIFT 5 | ||
135 | #define VIS_OPF_MASK (0x1ff << VIS_OPF_SHIFT) | ||
136 | |||
137 | #define RS1(INSN) (((INSN) >> 24) & 0x1f) | ||
138 | #define RS2(INSN) (((INSN) >> 0) & 0x1f) | ||
139 | #define RD(INSN) (((INSN) >> 25) & 0x1f) | ||
140 | |||
141 | static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, | ||
142 | unsigned int rd, int from_kernel) | ||
143 | { | ||
144 | if (rs2 >= 16 || rs1 >= 16 || rd >= 16) { | ||
145 | if (from_kernel != 0) | ||
146 | __asm__ __volatile__("flushw"); | ||
147 | else | ||
148 | flushw_user(); | ||
149 | } | ||
150 | } | ||
151 | |||
152 | static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) | ||
153 | { | ||
154 | unsigned long value; | ||
155 | |||
156 | if (reg < 16) | ||
157 | return (!reg ? 0 : regs->u_regs[reg]); | ||
158 | if (regs->tstate & TSTATE_PRIV) { | ||
159 | struct reg_window *win; | ||
160 | win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); | ||
161 | value = win->locals[reg - 16]; | ||
162 | } else if (test_thread_flag(TIF_32BIT)) { | ||
163 | struct reg_window32 __user *win32; | ||
164 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); | ||
165 | get_user(value, &win32->locals[reg - 16]); | ||
166 | } else { | ||
167 | struct reg_window __user *win; | ||
168 | win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); | ||
169 | get_user(value, &win->locals[reg - 16]); | ||
170 | } | ||
171 | return value; | ||
172 | } | ||
173 | |||
174 | static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg, | ||
175 | struct pt_regs *regs) | ||
176 | { | ||
177 | BUG_ON(reg < 16); | ||
178 | BUG_ON(regs->tstate & TSTATE_PRIV); | ||
179 | |||
180 | if (test_thread_flag(TIF_32BIT)) { | ||
181 | struct reg_window32 __user *win32; | ||
182 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); | ||
183 | return (unsigned long __user *)&win32->locals[reg - 16]; | ||
184 | } else { | ||
185 | struct reg_window __user *win; | ||
186 | win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); | ||
187 | return &win->locals[reg - 16]; | ||
188 | } | ||
189 | } | ||
190 | |||
191 | static inline unsigned long *__fetch_reg_addr_kern(unsigned int reg, | ||
192 | struct pt_regs *regs) | ||
193 | { | ||
194 | BUG_ON(reg >= 16); | ||
195 | BUG_ON(regs->tstate & TSTATE_PRIV); | ||
196 | |||
197 | return ®s->u_regs[reg]; | ||
198 | } | ||
199 | |||
200 | static void store_reg(struct pt_regs *regs, unsigned long val, unsigned long rd) | ||
201 | { | ||
202 | if (rd < 16) { | ||
203 | unsigned long *rd_kern = __fetch_reg_addr_kern(rd, regs); | ||
204 | |||
205 | *rd_kern = val; | ||
206 | } else { | ||
207 | unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs); | ||
208 | |||
209 | if (test_thread_flag(TIF_32BIT)) | ||
210 | __put_user((u32)val, (u32 __user *)rd_user); | ||
211 | else | ||
212 | __put_user(val, rd_user); | ||
213 | } | ||
214 | } | ||
215 | |||
216 | static inline unsigned long fpd_regval(struct fpustate *f, | ||
217 | unsigned int insn_regnum) | ||
218 | { | ||
219 | insn_regnum = (((insn_regnum & 1) << 5) | | ||
220 | (insn_regnum & 0x1e)); | ||
221 | |||
222 | return *(unsigned long *) &f->regs[insn_regnum]; | ||
223 | } | ||
224 | |||
225 | static inline unsigned long *fpd_regaddr(struct fpustate *f, | ||
226 | unsigned int insn_regnum) | ||
227 | { | ||
228 | insn_regnum = (((insn_regnum & 1) << 5) | | ||
229 | (insn_regnum & 0x1e)); | ||
230 | |||
231 | return (unsigned long *) &f->regs[insn_regnum]; | ||
232 | } | ||
233 | |||
234 | static inline unsigned int fps_regval(struct fpustate *f, | ||
235 | unsigned int insn_regnum) | ||
236 | { | ||
237 | return f->regs[insn_regnum]; | ||
238 | } | ||
239 | |||
240 | static inline unsigned int *fps_regaddr(struct fpustate *f, | ||
241 | unsigned int insn_regnum) | ||
242 | { | ||
243 | return &f->regs[insn_regnum]; | ||
244 | } | ||
245 | |||
246 | struct edge_tab { | ||
247 | u16 left, right; | ||
248 | }; | ||
249 | struct edge_tab edge8_tab[8] = { | ||
250 | { 0xff, 0x80 }, | ||
251 | { 0x7f, 0xc0 }, | ||
252 | { 0x3f, 0xe0 }, | ||
253 | { 0x1f, 0xf0 }, | ||
254 | { 0x0f, 0xf8 }, | ||
255 | { 0x07, 0xfc }, | ||
256 | { 0x03, 0xfe }, | ||
257 | { 0x01, 0xff }, | ||
258 | }; | ||
259 | struct edge_tab edge8_tab_l[8] = { | ||
260 | { 0xff, 0x01 }, | ||
261 | { 0xfe, 0x03 }, | ||
262 | { 0xfc, 0x07 }, | ||
263 | { 0xf8, 0x0f }, | ||
264 | { 0xf0, 0x1f }, | ||
265 | { 0xe0, 0x3f }, | ||
266 | { 0xc0, 0x7f }, | ||
267 | { 0x80, 0xff }, | ||
268 | }; | ||
269 | struct edge_tab edge16_tab[4] = { | ||
270 | { 0xf, 0x8 }, | ||
271 | { 0x7, 0xc }, | ||
272 | { 0x3, 0xe }, | ||
273 | { 0x1, 0xf }, | ||
274 | }; | ||
275 | struct edge_tab edge16_tab_l[4] = { | ||
276 | { 0xf, 0x1 }, | ||
277 | { 0xe, 0x3 }, | ||
278 | { 0xc, 0x7 }, | ||
279 | { 0x8, 0xf }, | ||
280 | }; | ||
281 | struct edge_tab edge32_tab[2] = { | ||
282 | { 0x3, 0x2 }, | ||
283 | { 0x1, 0x3 }, | ||
284 | }; | ||
285 | struct edge_tab edge32_tab_l[2] = { | ||
286 | { 0x3, 0x1 }, | ||
287 | { 0x2, 0x3 }, | ||
288 | }; | ||
289 | |||
290 | static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf) | ||
291 | { | ||
292 | unsigned long orig_rs1, rs1, orig_rs2, rs2, rd_val; | ||
293 | u16 left, right; | ||
294 | |||
295 | maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); | ||
296 | orig_rs1 = rs1 = fetch_reg(RS1(insn), regs); | ||
297 | orig_rs2 = rs2 = fetch_reg(RS2(insn), regs); | ||
298 | |||
299 | if (test_thread_flag(TIF_32BIT)) { | ||
300 | rs1 = rs1 & 0xffffffff; | ||
301 | rs2 = rs2 & 0xffffffff; | ||
302 | } | ||
303 | switch (opf) { | ||
304 | default: | ||
305 | case EDGE8_OPF: | ||
306 | case EDGE8N_OPF: | ||
307 | left = edge8_tab[rs1 & 0x7].left; | ||
308 | right = edge8_tab[rs2 & 0x7].right; | ||
309 | break; | ||
310 | case EDGE8L_OPF: | ||
311 | case EDGE8LN_OPF: | ||
312 | left = edge8_tab_l[rs1 & 0x7].left; | ||
313 | right = edge8_tab_l[rs2 & 0x7].right; | ||
314 | break; | ||
315 | |||
316 | case EDGE16_OPF: | ||
317 | case EDGE16N_OPF: | ||
318 | left = edge16_tab[(rs1 >> 1) & 0x3].left; | ||
319 | right = edge16_tab[(rs2 >> 1) & 0x3].right; | ||
320 | break; | ||
321 | |||
322 | case EDGE16L_OPF: | ||
323 | case EDGE16LN_OPF: | ||
324 | left = edge16_tab_l[(rs1 >> 1) & 0x3].left; | ||
325 | right = edge16_tab_l[(rs2 >> 1) & 0x3].right; | ||
326 | break; | ||
327 | |||
328 | case EDGE32_OPF: | ||
329 | case EDGE32N_OPF: | ||
330 | left = edge32_tab[(rs1 >> 2) & 0x1].left; | ||
331 | right = edge32_tab[(rs2 >> 2) & 0x1].right; | ||
332 | break; | ||
333 | |||
334 | case EDGE32L_OPF: | ||
335 | case EDGE32LN_OPF: | ||
336 | left = edge32_tab_l[(rs1 >> 2) & 0x1].left; | ||
337 | right = edge32_tab_l[(rs2 >> 2) & 0x1].right; | ||
338 | break; | ||
339 | }; | ||
340 | |||
341 | if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL)) | ||
342 | rd_val = right & left; | ||
343 | else | ||
344 | rd_val = left; | ||
345 | |||
346 | store_reg(regs, rd_val, RD(insn)); | ||
347 | |||
348 | switch (opf) { | ||
349 | case EDGE8_OPF: | ||
350 | case EDGE8L_OPF: | ||
351 | case EDGE16_OPF: | ||
352 | case EDGE16L_OPF: | ||
353 | case EDGE32_OPF: | ||
354 | case EDGE32L_OPF: { | ||
355 | unsigned long ccr, tstate; | ||
356 | |||
357 | __asm__ __volatile__("subcc %1, %2, %%g0\n\t" | ||
358 | "rd %%ccr, %0" | ||
359 | : "=r" (ccr) | ||
360 | : "r" (orig_rs1), "r" (orig_rs2) | ||
361 | : "cc"); | ||
362 | tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC); | ||
363 | regs->tstate = tstate | (ccr << 32UL); | ||
364 | } | ||
365 | }; | ||
366 | } | ||
367 | |||
368 | static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf) | ||
369 | { | ||
370 | unsigned long rs1, rs2, rd_val; | ||
371 | unsigned int bits, bits_mask; | ||
372 | |||
373 | maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); | ||
374 | rs1 = fetch_reg(RS1(insn), regs); | ||
375 | rs2 = fetch_reg(RS2(insn), regs); | ||
376 | |||
377 | bits = (rs2 > 5 ? 5 : rs2); | ||
378 | bits_mask = (1UL << bits) - 1UL; | ||
379 | |||
380 | rd_val = ((((rs1 >> 11) & 0x3) << 0) | | ||
381 | (((rs1 >> 33) & 0x3) << 2) | | ||
382 | (((rs1 >> 55) & 0x1) << 4) | | ||
383 | (((rs1 >> 13) & 0xf) << 5) | | ||
384 | (((rs1 >> 35) & 0xf) << 9) | | ||
385 | (((rs1 >> 56) & 0xf) << 13) | | ||
386 | (((rs1 >> 17) & bits_mask) << 17) | | ||
387 | (((rs1 >> 39) & bits_mask) << (17 + bits)) | | ||
388 | (((rs1 >> 60) & 0xf) << (17 + (2*bits)))); | ||
389 | |||
390 | switch (opf) { | ||
391 | case ARRAY16_OPF: | ||
392 | rd_val <<= 1; | ||
393 | break; | ||
394 | |||
395 | case ARRAY32_OPF: | ||
396 | rd_val <<= 2; | ||
397 | }; | ||
398 | |||
399 | store_reg(regs, rd_val, RD(insn)); | ||
400 | } | ||
401 | |||
402 | static void bmask(struct pt_regs *regs, unsigned int insn) | ||
403 | { | ||
404 | unsigned long rs1, rs2, rd_val, gsr; | ||
405 | |||
406 | maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); | ||
407 | rs1 = fetch_reg(RS1(insn), regs); | ||
408 | rs2 = fetch_reg(RS2(insn), regs); | ||
409 | rd_val = rs1 + rs2; | ||
410 | |||
411 | store_reg(regs, rd_val, RD(insn)); | ||
412 | |||
413 | gsr = current_thread_info()->gsr[0] & 0xffffffff; | ||
414 | gsr |= rd_val << 32UL; | ||
415 | current_thread_info()->gsr[0] = gsr; | ||
416 | } | ||
417 | |||
418 | static void bshuffle(struct pt_regs *regs, unsigned int insn) | ||
419 | { | ||
420 | struct fpustate *f = FPUSTATE; | ||
421 | unsigned long rs1, rs2, rd_val; | ||
422 | unsigned long bmask, i; | ||
423 | |||
424 | bmask = current_thread_info()->gsr[0] >> 32UL; | ||
425 | |||
426 | rs1 = fpd_regval(f, RS1(insn)); | ||
427 | rs2 = fpd_regval(f, RS2(insn)); | ||
428 | |||
429 | rd_val = 0UL; | ||
430 | for (i = 0; i < 8; i++) { | ||
431 | unsigned long which = (bmask >> (i * 4)) & 0xf; | ||
432 | unsigned long byte; | ||
433 | |||
434 | if (which < 8) | ||
435 | byte = (rs1 >> (which * 8)) & 0xff; | ||
436 | else | ||
437 | byte = (rs2 >> ((which-8)*8)) & 0xff; | ||
438 | rd_val |= (byte << (i * 8)); | ||
439 | } | ||
440 | |||
441 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
442 | } | ||
443 | |||
444 | static void pdist(struct pt_regs *regs, unsigned int insn) | ||
445 | { | ||
446 | struct fpustate *f = FPUSTATE; | ||
447 | unsigned long rs1, rs2, *rd, rd_val; | ||
448 | unsigned long i; | ||
449 | |||
450 | rs1 = fpd_regval(f, RS1(insn)); | ||
451 | rs2 = fpd_regval(f, RS1(insn)); | ||
452 | rd = fpd_regaddr(f, RD(insn)); | ||
453 | |||
454 | rd_val = *rd; | ||
455 | |||
456 | for (i = 0; i < 8; i++) { | ||
457 | s16 s1, s2; | ||
458 | |||
459 | s1 = (rs1 >> (56 - (i * 8))) & 0xff; | ||
460 | s2 = (rs2 >> (56 - (i * 8))) & 0xff; | ||
461 | |||
462 | /* Absolute value of difference. */ | ||
463 | s1 -= s2; | ||
464 | if (s1 < 0) | ||
465 | s1 = ~s1 + 1; | ||
466 | |||
467 | rd_val += s1; | ||
468 | } | ||
469 | |||
470 | *rd = rd_val; | ||
471 | } | ||
472 | |||
473 | static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf) | ||
474 | { | ||
475 | struct fpustate *f = FPUSTATE; | ||
476 | unsigned long rs1, rs2, gsr, scale, rd_val; | ||
477 | |||
478 | gsr = current_thread_info()->gsr[0]; | ||
479 | scale = (gsr >> 3) & (opf == FPACK16_OPF ? 0xf : 0x1f); | ||
480 | switch (opf) { | ||
481 | case FPACK16_OPF: { | ||
482 | unsigned long byte; | ||
483 | |||
484 | rs2 = fpd_regval(f, RS2(insn)); | ||
485 | rd_val = 0; | ||
486 | for (byte = 0; byte < 4; byte++) { | ||
487 | unsigned int val; | ||
488 | s16 src = (rs2 >> (byte * 16UL)) & 0xffffUL; | ||
489 | int scaled = src << scale; | ||
490 | int from_fixed = scaled >> 7; | ||
491 | |||
492 | val = ((from_fixed < 0) ? | ||
493 | 0 : | ||
494 | (from_fixed > 255) ? | ||
495 | 255 : from_fixed); | ||
496 | |||
497 | rd_val |= (val << (8 * byte)); | ||
498 | } | ||
499 | *fps_regaddr(f, RD(insn)) = rd_val; | ||
500 | break; | ||
501 | } | ||
502 | |||
503 | case FPACK32_OPF: { | ||
504 | unsigned long word; | ||
505 | |||
506 | rs1 = fpd_regval(f, RS1(insn)); | ||
507 | rs2 = fpd_regval(f, RS2(insn)); | ||
508 | rd_val = (rs1 << 8) & ~(0x000000ff000000ffUL); | ||
509 | for (word = 0; word < 2; word++) { | ||
510 | unsigned long val; | ||
511 | s32 src = (rs2 >> (word * 32UL)); | ||
512 | s64 scaled = src << scale; | ||
513 | s64 from_fixed = scaled >> 23; | ||
514 | |||
515 | val = ((from_fixed < 0) ? | ||
516 | 0 : | ||
517 | (from_fixed > 255) ? | ||
518 | 255 : from_fixed); | ||
519 | |||
520 | rd_val |= (val << (32 * word)); | ||
521 | } | ||
522 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
523 | break; | ||
524 | } | ||
525 | |||
526 | case FPACKFIX_OPF: { | ||
527 | unsigned long word; | ||
528 | |||
529 | rs2 = fpd_regval(f, RS2(insn)); | ||
530 | |||
531 | rd_val = 0; | ||
532 | for (word = 0; word < 2; word++) { | ||
533 | long val; | ||
534 | s32 src = (rs2 >> (word * 32UL)); | ||
535 | s64 scaled = src << scale; | ||
536 | s64 from_fixed = scaled >> 16; | ||
537 | |||
538 | val = ((from_fixed < -32768) ? | ||
539 | -32768 : | ||
540 | (from_fixed > 32767) ? | ||
541 | 32767 : from_fixed); | ||
542 | |||
543 | rd_val |= ((val & 0xffff) << (word * 16)); | ||
544 | } | ||
545 | *fps_regaddr(f, RD(insn)) = rd_val; | ||
546 | break; | ||
547 | } | ||
548 | |||
549 | case FEXPAND_OPF: { | ||
550 | unsigned long byte; | ||
551 | |||
552 | rs2 = fps_regval(f, RS2(insn)); | ||
553 | |||
554 | rd_val = 0; | ||
555 | for (byte = 0; byte < 4; byte++) { | ||
556 | unsigned long val; | ||
557 | u8 src = (rs2 >> (byte * 8)) & 0xff; | ||
558 | |||
559 | val = src << 4; | ||
560 | |||
561 | rd_val |= (val << (byte * 16)); | ||
562 | } | ||
563 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
564 | break; | ||
565 | } | ||
566 | |||
567 | case FPMERGE_OPF: { | ||
568 | rs1 = fps_regval(f, RS1(insn)); | ||
569 | rs2 = fps_regval(f, RS2(insn)); | ||
570 | |||
571 | rd_val = (((rs2 & 0x000000ff) << 0) | | ||
572 | ((rs1 & 0x000000ff) << 8) | | ||
573 | ((rs2 & 0x0000ff00) << 8) | | ||
574 | ((rs1 & 0x0000ff00) << 16) | | ||
575 | ((rs2 & 0x00ff0000) << 16) | | ||
576 | ((rs1 & 0x00ff0000) << 24) | | ||
577 | ((rs2 & 0xff000000) << 24) | | ||
578 | ((rs1 & 0xff000000) << 32)); | ||
579 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
580 | break; | ||
581 | } | ||
582 | }; | ||
583 | } | ||
584 | |||
585 | static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf) | ||
586 | { | ||
587 | struct fpustate *f = FPUSTATE; | ||
588 | unsigned long rs1, rs2, rd_val; | ||
589 | |||
590 | switch (opf) { | ||
591 | case FMUL8x16_OPF: { | ||
592 | unsigned long byte; | ||
593 | |||
594 | rs1 = fps_regval(f, RS1(insn)); | ||
595 | rs2 = fpd_regval(f, RS2(insn)); | ||
596 | |||
597 | rd_val = 0; | ||
598 | for (byte = 0; byte < 4; byte++) { | ||
599 | u16 src1 = (rs1 >> (byte * 8)) & 0x00ff; | ||
600 | s16 src2 = (rs2 >> (byte * 16)) & 0xffff; | ||
601 | u32 prod = src1 * src2; | ||
602 | u16 scaled = ((prod & 0x00ffff00) >> 8); | ||
603 | |||
604 | /* Round up. */ | ||
605 | if (prod & 0x80) | ||
606 | scaled++; | ||
607 | rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); | ||
608 | } | ||
609 | |||
610 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
611 | break; | ||
612 | } | ||
613 | |||
614 | case FMUL8x16AU_OPF: | ||
615 | case FMUL8x16AL_OPF: { | ||
616 | unsigned long byte; | ||
617 | s16 src2; | ||
618 | |||
619 | rs1 = fps_regval(f, RS1(insn)); | ||
620 | rs2 = fps_regval(f, RS2(insn)); | ||
621 | |||
622 | rd_val = 0; | ||
623 | src2 = (rs2 >> (opf == FMUL8x16AU_OPF) ? 16 : 0); | ||
624 | for (byte = 0; byte < 4; byte++) { | ||
625 | u16 src1 = (rs1 >> (byte * 8)) & 0x00ff; | ||
626 | u32 prod = src1 * src2; | ||
627 | u16 scaled = ((prod & 0x00ffff00) >> 8); | ||
628 | |||
629 | /* Round up. */ | ||
630 | if (prod & 0x80) | ||
631 | scaled++; | ||
632 | rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); | ||
633 | } | ||
634 | |||
635 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
636 | break; | ||
637 | } | ||
638 | |||
639 | case FMUL8SUx16_OPF: | ||
640 | case FMUL8ULx16_OPF: { | ||
641 | unsigned long byte, ushift; | ||
642 | |||
643 | rs1 = fpd_regval(f, RS1(insn)); | ||
644 | rs2 = fpd_regval(f, RS2(insn)); | ||
645 | |||
646 | rd_val = 0; | ||
647 | ushift = (opf == FMUL8SUx16_OPF) ? 8 : 0; | ||
648 | for (byte = 0; byte < 4; byte++) { | ||
649 | u16 src1; | ||
650 | s16 src2; | ||
651 | u32 prod; | ||
652 | u16 scaled; | ||
653 | |||
654 | src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff); | ||
655 | src2 = ((rs2 >> (16 * byte)) & 0xffff); | ||
656 | prod = src1 * src2; | ||
657 | scaled = ((prod & 0x00ffff00) >> 8); | ||
658 | |||
659 | /* Round up. */ | ||
660 | if (prod & 0x80) | ||
661 | scaled++; | ||
662 | rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); | ||
663 | } | ||
664 | |||
665 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
666 | break; | ||
667 | } | ||
668 | |||
669 | case FMULD8SUx16_OPF: | ||
670 | case FMULD8ULx16_OPF: { | ||
671 | unsigned long byte, ushift; | ||
672 | |||
673 | rs1 = fps_regval(f, RS1(insn)); | ||
674 | rs2 = fps_regval(f, RS2(insn)); | ||
675 | |||
676 | rd_val = 0; | ||
677 | ushift = (opf == FMULD8SUx16_OPF) ? 8 : 0; | ||
678 | for (byte = 0; byte < 2; byte++) { | ||
679 | u16 src1; | ||
680 | s16 src2; | ||
681 | u32 prod; | ||
682 | u16 scaled; | ||
683 | |||
684 | src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff); | ||
685 | src2 = ((rs2 >> (16 * byte)) & 0xffff); | ||
686 | prod = src1 * src2; | ||
687 | scaled = ((prod & 0x00ffff00) >> 8); | ||
688 | |||
689 | /* Round up. */ | ||
690 | if (prod & 0x80) | ||
691 | scaled++; | ||
692 | rd_val |= ((scaled & 0xffffUL) << | ||
693 | ((byte * 32UL) + 7UL)); | ||
694 | } | ||
695 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
696 | break; | ||
697 | } | ||
698 | }; | ||
699 | } | ||
700 | |||
701 | static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) | ||
702 | { | ||
703 | struct fpustate *f = FPUSTATE; | ||
704 | unsigned long rs1, rs2, rd_val, i; | ||
705 | |||
706 | rs1 = fpd_regval(f, RS1(insn)); | ||
707 | rs2 = fpd_regval(f, RS2(insn)); | ||
708 | |||
709 | rd_val = 0; | ||
710 | |||
711 | switch (opf) { | ||
712 | case FCMPGT16_OPF: | ||
713 | for (i = 0; i < 4; i++) { | ||
714 | s16 a = (rs1 >> (i * 16)) & 0xffff; | ||
715 | s16 b = (rs2 >> (i * 16)) & 0xffff; | ||
716 | |||
717 | if (a > b) | ||
718 | rd_val |= 1 << i; | ||
719 | } | ||
720 | break; | ||
721 | |||
722 | case FCMPGT32_OPF: | ||
723 | for (i = 0; i < 2; i++) { | ||
724 | s32 a = (rs1 >> (i * 32)) & 0xffff; | ||
725 | s32 b = (rs2 >> (i * 32)) & 0xffff; | ||
726 | |||
727 | if (a > b) | ||
728 | rd_val |= 1 << i; | ||
729 | } | ||
730 | break; | ||
731 | |||
732 | case FCMPLE16_OPF: | ||
733 | for (i = 0; i < 4; i++) { | ||
734 | s16 a = (rs1 >> (i * 16)) & 0xffff; | ||
735 | s16 b = (rs2 >> (i * 16)) & 0xffff; | ||
736 | |||
737 | if (a <= b) | ||
738 | rd_val |= 1 << i; | ||
739 | } | ||
740 | break; | ||
741 | |||
742 | case FCMPLE32_OPF: | ||
743 | for (i = 0; i < 2; i++) { | ||
744 | s32 a = (rs1 >> (i * 32)) & 0xffff; | ||
745 | s32 b = (rs2 >> (i * 32)) & 0xffff; | ||
746 | |||
747 | if (a <= b) | ||
748 | rd_val |= 1 << i; | ||
749 | } | ||
750 | break; | ||
751 | |||
752 | case FCMPNE16_OPF: | ||
753 | for (i = 0; i < 4; i++) { | ||
754 | s16 a = (rs1 >> (i * 16)) & 0xffff; | ||
755 | s16 b = (rs2 >> (i * 16)) & 0xffff; | ||
756 | |||
757 | if (a != b) | ||
758 | rd_val |= 1 << i; | ||
759 | } | ||
760 | break; | ||
761 | |||
762 | case FCMPNE32_OPF: | ||
763 | for (i = 0; i < 2; i++) { | ||
764 | s32 a = (rs1 >> (i * 32)) & 0xffff; | ||
765 | s32 b = (rs2 >> (i * 32)) & 0xffff; | ||
766 | |||
767 | if (a != b) | ||
768 | rd_val |= 1 << i; | ||
769 | } | ||
770 | break; | ||
771 | |||
772 | case FCMPEQ16_OPF: | ||
773 | for (i = 0; i < 4; i++) { | ||
774 | s16 a = (rs1 >> (i * 16)) & 0xffff; | ||
775 | s16 b = (rs2 >> (i * 16)) & 0xffff; | ||
776 | |||
777 | if (a == b) | ||
778 | rd_val |= 1 << i; | ||
779 | } | ||
780 | break; | ||
781 | |||
782 | case FCMPEQ32_OPF: | ||
783 | for (i = 0; i < 2; i++) { | ||
784 | s32 a = (rs1 >> (i * 32)) & 0xffff; | ||
785 | s32 b = (rs2 >> (i * 32)) & 0xffff; | ||
786 | |||
787 | if (a == b) | ||
788 | rd_val |= 1 << i; | ||
789 | } | ||
790 | break; | ||
791 | }; | ||
792 | |||
793 | maybe_flush_windows(0, 0, RD(insn), 0); | ||
794 | store_reg(regs, rd_val, RD(insn)); | ||
795 | } | ||
796 | |||
797 | /* Emulate the VIS instructions which are not implemented in | ||
798 | * hardware on Niagara. | ||
799 | */ | ||
800 | int vis_emul(struct pt_regs *regs, unsigned int insn) | ||
801 | { | ||
802 | unsigned long pc = regs->tpc; | ||
803 | unsigned int opf; | ||
804 | |||
805 | BUG_ON(regs->tstate & TSTATE_PRIV); | ||
806 | |||
807 | if (test_thread_flag(TIF_32BIT)) | ||
808 | pc = (u32)pc; | ||
809 | |||
810 | if (get_user(insn, (u32 __user *) pc)) | ||
811 | return -EFAULT; | ||
812 | |||
813 | if ((insn & VIS_OPCODE_MASK) != VIS_OPCODE_VAL) | ||
814 | return -EINVAL; | ||
815 | |||
816 | opf = (insn & VIS_OPF_MASK) >> VIS_OPF_SHIFT; | ||
817 | switch (opf) { | ||
818 | default: | ||
819 | return -EINVAL; | ||
820 | |||
821 | /* Pixel Formatting Instructions. */ | ||
822 | case FPACK16_OPF: | ||
823 | case FPACK32_OPF: | ||
824 | case FPACKFIX_OPF: | ||
825 | case FEXPAND_OPF: | ||
826 | case FPMERGE_OPF: | ||
827 | pformat(regs, insn, opf); | ||
828 | break; | ||
829 | |||
830 | /* Partitioned Multiply Instructions */ | ||
831 | case FMUL8x16_OPF: | ||
832 | case FMUL8x16AU_OPF: | ||
833 | case FMUL8x16AL_OPF: | ||
834 | case FMUL8SUx16_OPF: | ||
835 | case FMUL8ULx16_OPF: | ||
836 | case FMULD8SUx16_OPF: | ||
837 | case FMULD8ULx16_OPF: | ||
838 | pmul(regs, insn, opf); | ||
839 | break; | ||
840 | |||
841 | /* Pixel Compare Instructions */ | ||
842 | case FCMPGT16_OPF: | ||
843 | case FCMPGT32_OPF: | ||
844 | case FCMPLE16_OPF: | ||
845 | case FCMPLE32_OPF: | ||
846 | case FCMPNE16_OPF: | ||
847 | case FCMPNE32_OPF: | ||
848 | case FCMPEQ16_OPF: | ||
849 | case FCMPEQ32_OPF: | ||
850 | pcmp(regs, insn, opf); | ||
851 | break; | ||
852 | |||
853 | /* Edge Handling Instructions */ | ||
854 | case EDGE8_OPF: | ||
855 | case EDGE8N_OPF: | ||
856 | case EDGE8L_OPF: | ||
857 | case EDGE8LN_OPF: | ||
858 | case EDGE16_OPF: | ||
859 | case EDGE16N_OPF: | ||
860 | case EDGE16L_OPF: | ||
861 | case EDGE16LN_OPF: | ||
862 | case EDGE32_OPF: | ||
863 | case EDGE32N_OPF: | ||
864 | case EDGE32L_OPF: | ||
865 | case EDGE32LN_OPF: | ||
866 | edge(regs, insn, opf); | ||
867 | break; | ||
868 | |||
869 | /* Pixel Component Distance */ | ||
870 | case PDIST_OPF: | ||
871 | pdist(regs, insn); | ||
872 | break; | ||
873 | |||
874 | /* Three-Dimensional Array Addressing Instructions */ | ||
875 | case ARRAY8_OPF: | ||
876 | case ARRAY16_OPF: | ||
877 | case ARRAY32_OPF: | ||
878 | array(regs, insn, opf); | ||
879 | break; | ||
880 | |||
881 | /* Byte Mask and Shuffle Instructions */ | ||
882 | case BMASK_OPF: | ||
883 | bmask(regs, insn); | ||
884 | break; | ||
885 | |||
886 | case BSHUFFLE_OPF: | ||
887 | bshuffle(regs, insn); | ||
888 | break; | ||
889 | }; | ||
890 | |||
891 | regs->tpc = regs->tnpc; | ||
892 | regs->tnpc += 4; | ||
893 | return 0; | ||
894 | } | ||
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S index 467d13a0d5c1..b097379a49a8 100644 --- a/arch/sparc64/kernel/vmlinux.lds.S +++ b/arch/sparc64/kernel/vmlinux.lds.S | |||
@@ -70,6 +70,22 @@ SECTIONS | |||
70 | .con_initcall.init : { *(.con_initcall.init) } | 70 | .con_initcall.init : { *(.con_initcall.init) } |
71 | __con_initcall_end = .; | 71 | __con_initcall_end = .; |
72 | SECURITY_INIT | 72 | SECURITY_INIT |
73 | . = ALIGN(4); | ||
74 | __tsb_ldquad_phys_patch = .; | ||
75 | .tsb_ldquad_phys_patch : { *(.tsb_ldquad_phys_patch) } | ||
76 | __tsb_ldquad_phys_patch_end = .; | ||
77 | __tsb_phys_patch = .; | ||
78 | .tsb_phys_patch : { *(.tsb_phys_patch) } | ||
79 | __tsb_phys_patch_end = .; | ||
80 | __cpuid_patch = .; | ||
81 | .cpuid_patch : { *(.cpuid_patch) } | ||
82 | __cpuid_patch_end = .; | ||
83 | __sun4v_1insn_patch = .; | ||
84 | .sun4v_1insn_patch : { *(.sun4v_1insn_patch) } | ||
85 | __sun4v_1insn_patch_end = .; | ||
86 | __sun4v_2insn_patch = .; | ||
87 | .sun4v_2insn_patch : { *(.sun4v_2insn_patch) } | ||
88 | __sun4v_2insn_patch_end = .; | ||
73 | . = ALIGN(8192); | 89 | . = ALIGN(8192); |
74 | __initramfs_start = .; | 90 | __initramfs_start = .; |
75 | .init.ramfs : { *(.init.ramfs) } | 91 | .init.ramfs : { *(.init.ramfs) } |
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S index 39160926267b..c4aa110a10e5 100644 --- a/arch/sparc64/kernel/winfixup.S +++ b/arch/sparc64/kernel/winfixup.S | |||
@@ -1,8 +1,6 @@ | |||
1 | /* $Id: winfixup.S,v 1.30 2002/02/09 19:49:30 davem Exp $ | 1 | /* winfixup.S: Handle cases where user stack pointer is found to be bogus. |
2 | * | 2 | * |
3 | * winfixup.S: Handle cases where user stack pointer is found to be bogus. | 3 | * Copyright (C) 1997, 2006 David S. Miller (davem@davemloft.net) |
4 | * | ||
5 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | ||
6 | */ | 4 | */ |
7 | 5 | ||
8 | #include <asm/asi.h> | 6 | #include <asm/asi.h> |
@@ -15,374 +13,144 @@ | |||
15 | 13 | ||
16 | .text | 14 | .text |
17 | 15 | ||
18 | set_pcontext: | 16 | /* It used to be the case that these register window fault |
19 | sethi %hi(sparc64_kern_pri_context), %l1 | 17 | * handlers could run via the save and restore instructions |
20 | ldx [%l1 + %lo(sparc64_kern_pri_context)], %l1 | 18 | * done by the trap entry and exit code. They now do the |
21 | mov PRIMARY_CONTEXT, %g1 | 19 | * window spill/fill by hand, so that case no longer can occur. |
22 | stxa %l1, [%g1] ASI_DMMU | 20 | */ |
23 | flush %g6 | ||
24 | retl | ||
25 | nop | ||
26 | 21 | ||
27 | .align 32 | 22 | .align 32 |
28 | |||
29 | /* Here are the rules, pay attention. | ||
30 | * | ||
31 | * The kernel is disallowed from touching user space while | ||
32 | * the trap level is greater than zero, except for from within | ||
33 | * the window spill/fill handlers. This must be followed | ||
34 | * so that we can easily detect the case where we tried to | ||
35 | * spill/fill with a bogus (or unmapped) user stack pointer. | ||
36 | * | ||
37 | * These are layed out in a special way for cache reasons, | ||
38 | * don't touch... | ||
39 | */ | ||
40 | .globl fill_fixup, spill_fixup | ||
41 | fill_fixup: | 23 | fill_fixup: |
42 | rdpr %tstate, %g1 | 24 | TRAP_LOAD_THREAD_REG(%g6, %g1) |
43 | andcc %g1, TSTATE_PRIV, %g0 | 25 | rdpr %tstate, %g1 |
44 | or %g4, FAULT_CODE_WINFIXUP, %g4 | 26 | and %g1, TSTATE_CWP, %g1 |
45 | be,pt %xcc, window_scheisse_from_user_common | 27 | or %g4, FAULT_CODE_WINFIXUP, %g4 |
46 | and %g1, TSTATE_CWP, %g1 | 28 | stb %g4, [%g6 + TI_FAULT_CODE] |
47 | 29 | stx %g5, [%g6 + TI_FAULT_ADDR] | |
48 | /* This is the extremely complex case, but it does happen from | 30 | wrpr %g1, %cwp |
49 | * time to time if things are just right. Essentially the restore | 31 | ba,pt %xcc, etrap |
50 | * done in rtrap right before going back to user mode, with tl=1 | 32 | rd %pc, %g7 |
51 | * and that levels trap stack registers all setup, took a fill trap, | 33 | call do_sparc64_fault |
52 | * the user stack was not mapped in the tlb, and tlb miss occurred, | 34 | add %sp, PTREGS_OFF, %o0 |
53 | * the pte found was not valid, and a simple ref bit watch update | 35 | ba,pt %xcc, rtrap_clr_l6 |
54 | * could not satisfy the miss, so we got here. | ||
55 | * | ||
56 | * We must carefully unwind the state so we get back to tl=0, preserve | ||
57 | * all the register values we were going to give to the user. Luckily | ||
58 | * most things are where they need to be, we also have the address | ||
59 | * which triggered the fault handy as well. | ||
60 | * | ||
61 | * Also note that we must preserve %l5 and %l6. If the user was | ||
62 | * returning from a system call, we must make it look this way | ||
63 | * after we process the fill fault on the users stack. | ||
64 | * | ||
65 | * First, get into the window where the original restore was executed. | ||
66 | */ | ||
67 | |||
68 | rdpr %wstate, %g2 ! Grab user mode wstate. | ||
69 | wrpr %g1, %cwp ! Get into the right window. | ||
70 | sll %g2, 3, %g2 ! NORMAL-->OTHER | ||
71 | |||
72 | wrpr %g0, 0x0, %canrestore ! Standard etrap stuff. | ||
73 | wrpr %g2, 0x0, %wstate ! This must be consistent. | ||
74 | wrpr %g0, 0x0, %otherwin ! We know this. | ||
75 | call set_pcontext ! Change contexts... | ||
76 | nop | 36 | nop |
77 | rdpr %pstate, %l1 ! Prepare to change globals. | ||
78 | mov %g6, %o7 ! Get current. | ||
79 | |||
80 | andn %l1, PSTATE_MM, %l1 ! We want to be in RMO | ||
81 | stb %g4, [%g6 + TI_FAULT_CODE] | ||
82 | stx %g5, [%g6 + TI_FAULT_ADDR] | ||
83 | wrpr %g0, 0x0, %tl ! Out of trap levels. | ||
84 | wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate | ||
85 | mov %o7, %g6 | ||
86 | ldx [%g6 + TI_TASK], %g4 | ||
87 | #ifdef CONFIG_SMP | ||
88 | mov TSB_REG, %g1 | ||
89 | ldxa [%g1] ASI_IMMU, %g5 | ||
90 | #endif | ||
91 | 37 | ||
92 | /* This is the same as below, except we handle this a bit special | 38 | /* Be very careful about usage of the trap globals here. |
93 | * since we must preserve %l5 and %l6, see comment above. | 39 | * You cannot touch %g5 as that has the fault information. |
94 | */ | ||
95 | call do_sparc64_fault | ||
96 | add %sp, PTREGS_OFF, %o0 | ||
97 | ba,pt %xcc, rtrap | ||
98 | nop ! yes, nop is correct | ||
99 | |||
100 | /* Be very careful about usage of the alternate globals here. | ||
101 | * You cannot touch %g4/%g5 as that has the fault information | ||
102 | * should this be from usermode. Also be careful for the case | ||
103 | * where we get here from the save instruction in etrap.S when | ||
104 | * coming from either user or kernel (does not matter which, it | ||
105 | * is the same problem in both cases). Essentially this means | ||
106 | * do not touch %g7 or %g2 so we handle the two cases fine. | ||
107 | */ | 40 | */ |
108 | spill_fixup: | 41 | spill_fixup: |
109 | ldx [%g6 + TI_FLAGS], %g1 | 42 | spill_fixup_mna: |
110 | andcc %g1, _TIF_32BIT, %g0 | 43 | spill_fixup_dax: |
111 | ldub [%g6 + TI_WSAVED], %g1 | 44 | TRAP_LOAD_THREAD_REG(%g6, %g1) |
112 | 45 | ldx [%g6 + TI_FLAGS], %g1 | |
113 | sll %g1, 3, %g3 | 46 | andcc %g1, _TIF_32BIT, %g0 |
114 | add %g6, %g3, %g3 | 47 | ldub [%g6 + TI_WSAVED], %g1 |
115 | stx %sp, [%g3 + TI_RWIN_SPTRS] | 48 | sll %g1, 3, %g3 |
116 | sll %g1, 7, %g3 | 49 | add %g6, %g3, %g3 |
117 | bne,pt %xcc, 1f | 50 | stx %sp, [%g3 + TI_RWIN_SPTRS] |
118 | add %g6, %g3, %g3 | 51 | sll %g1, 7, %g3 |
119 | stx %l0, [%g3 + TI_REG_WINDOW + 0x00] | 52 | bne,pt %xcc, 1f |
120 | stx %l1, [%g3 + TI_REG_WINDOW + 0x08] | 53 | add %g6, %g3, %g3 |
121 | 54 | stx %l0, [%g3 + TI_REG_WINDOW + 0x00] | |
122 | stx %l2, [%g3 + TI_REG_WINDOW + 0x10] | 55 | stx %l1, [%g3 + TI_REG_WINDOW + 0x08] |
123 | stx %l3, [%g3 + TI_REG_WINDOW + 0x18] | 56 | stx %l2, [%g3 + TI_REG_WINDOW + 0x10] |
124 | stx %l4, [%g3 + TI_REG_WINDOW + 0x20] | 57 | stx %l3, [%g3 + TI_REG_WINDOW + 0x18] |
125 | stx %l5, [%g3 + TI_REG_WINDOW + 0x28] | 58 | stx %l4, [%g3 + TI_REG_WINDOW + 0x20] |
126 | stx %l6, [%g3 + TI_REG_WINDOW + 0x30] | 59 | stx %l5, [%g3 + TI_REG_WINDOW + 0x28] |
127 | stx %l7, [%g3 + TI_REG_WINDOW + 0x38] | 60 | stx %l6, [%g3 + TI_REG_WINDOW + 0x30] |
128 | stx %i0, [%g3 + TI_REG_WINDOW + 0x40] | 61 | stx %l7, [%g3 + TI_REG_WINDOW + 0x38] |
129 | stx %i1, [%g3 + TI_REG_WINDOW + 0x48] | 62 | stx %i0, [%g3 + TI_REG_WINDOW + 0x40] |
130 | 63 | stx %i1, [%g3 + TI_REG_WINDOW + 0x48] | |
131 | stx %i2, [%g3 + TI_REG_WINDOW + 0x50] | 64 | stx %i2, [%g3 + TI_REG_WINDOW + 0x50] |
132 | stx %i3, [%g3 + TI_REG_WINDOW + 0x58] | 65 | stx %i3, [%g3 + TI_REG_WINDOW + 0x58] |
133 | stx %i4, [%g3 + TI_REG_WINDOW + 0x60] | 66 | stx %i4, [%g3 + TI_REG_WINDOW + 0x60] |
134 | stx %i5, [%g3 + TI_REG_WINDOW + 0x68] | 67 | stx %i5, [%g3 + TI_REG_WINDOW + 0x68] |
135 | stx %i6, [%g3 + TI_REG_WINDOW + 0x70] | 68 | stx %i6, [%g3 + TI_REG_WINDOW + 0x70] |
136 | b,pt %xcc, 2f | 69 | ba,pt %xcc, 2f |
137 | stx %i7, [%g3 + TI_REG_WINDOW + 0x78] | 70 | stx %i7, [%g3 + TI_REG_WINDOW + 0x78] |
138 | 1: stw %l0, [%g3 + TI_REG_WINDOW + 0x00] | 71 | 1: stw %l0, [%g3 + TI_REG_WINDOW + 0x00] |
139 | 72 | stw %l1, [%g3 + TI_REG_WINDOW + 0x04] | |
140 | stw %l1, [%g3 + TI_REG_WINDOW + 0x04] | 73 | stw %l2, [%g3 + TI_REG_WINDOW + 0x08] |
141 | stw %l2, [%g3 + TI_REG_WINDOW + 0x08] | 74 | stw %l3, [%g3 + TI_REG_WINDOW + 0x0c] |
142 | stw %l3, [%g3 + TI_REG_WINDOW + 0x0c] | 75 | stw %l4, [%g3 + TI_REG_WINDOW + 0x10] |
143 | stw %l4, [%g3 + TI_REG_WINDOW + 0x10] | 76 | stw %l5, [%g3 + TI_REG_WINDOW + 0x14] |
144 | stw %l5, [%g3 + TI_REG_WINDOW + 0x14] | 77 | stw %l6, [%g3 + TI_REG_WINDOW + 0x18] |
145 | stw %l6, [%g3 + TI_REG_WINDOW + 0x18] | 78 | stw %l7, [%g3 + TI_REG_WINDOW + 0x1c] |
146 | stw %l7, [%g3 + TI_REG_WINDOW + 0x1c] | 79 | stw %i0, [%g3 + TI_REG_WINDOW + 0x20] |
147 | stw %i0, [%g3 + TI_REG_WINDOW + 0x20] | 80 | stw %i1, [%g3 + TI_REG_WINDOW + 0x24] |
148 | 81 | stw %i2, [%g3 + TI_REG_WINDOW + 0x28] | |
149 | stw %i1, [%g3 + TI_REG_WINDOW + 0x24] | 82 | stw %i3, [%g3 + TI_REG_WINDOW + 0x2c] |
150 | stw %i2, [%g3 + TI_REG_WINDOW + 0x28] | 83 | stw %i4, [%g3 + TI_REG_WINDOW + 0x30] |
151 | stw %i3, [%g3 + TI_REG_WINDOW + 0x2c] | 84 | stw %i5, [%g3 + TI_REG_WINDOW + 0x34] |
152 | stw %i4, [%g3 + TI_REG_WINDOW + 0x30] | 85 | stw %i6, [%g3 + TI_REG_WINDOW + 0x38] |
153 | stw %i5, [%g3 + TI_REG_WINDOW + 0x34] | 86 | stw %i7, [%g3 + TI_REG_WINDOW + 0x3c] |
154 | stw %i6, [%g3 + TI_REG_WINDOW + 0x38] | 87 | 2: add %g1, 1, %g1 |
155 | stw %i7, [%g3 + TI_REG_WINDOW + 0x3c] | 88 | stb %g1, [%g6 + TI_WSAVED] |
156 | 2: add %g1, 1, %g1 | 89 | rdpr %tstate, %g1 |
157 | 90 | andcc %g1, TSTATE_PRIV, %g0 | |
158 | stb %g1, [%g6 + TI_WSAVED] | ||
159 | rdpr %tstate, %g1 | ||
160 | andcc %g1, TSTATE_PRIV, %g0 | ||
161 | saved | 91 | saved |
162 | and %g1, TSTATE_CWP, %g1 | 92 | be,pn %xcc, 1f |
163 | be,pn %xcc, window_scheisse_from_user_common | 93 | and %g1, TSTATE_CWP, %g1 |
164 | mov FAULT_CODE_WRITE | FAULT_CODE_DTLB | FAULT_CODE_WINFIXUP, %g4 | ||
165 | retry | 94 | retry |
95 | 1: mov FAULT_CODE_WRITE | FAULT_CODE_DTLB | FAULT_CODE_WINFIXUP, %g4 | ||
96 | stb %g4, [%g6 + TI_FAULT_CODE] | ||
97 | stx %g5, [%g6 + TI_FAULT_ADDR] | ||
98 | wrpr %g1, %cwp | ||
99 | ba,pt %xcc, etrap | ||
100 | rd %pc, %g7 | ||
101 | call do_sparc64_fault | ||
102 | add %sp, PTREGS_OFF, %o0 | ||
103 | ba,a,pt %xcc, rtrap_clr_l6 | ||
166 | 104 | ||
167 | window_scheisse_from_user_common: | ||
168 | stb %g4, [%g6 + TI_FAULT_CODE] | ||
169 | stx %g5, [%g6 + TI_FAULT_ADDR] | ||
170 | wrpr %g1, %cwp | ||
171 | ba,pt %xcc, etrap | ||
172 | rd %pc, %g7 | ||
173 | call do_sparc64_fault | ||
174 | add %sp, PTREGS_OFF, %o0 | ||
175 | ba,a,pt %xcc, rtrap_clr_l6 | ||
176 | |||
177 | .globl winfix_mna, fill_fixup_mna, spill_fixup_mna | ||
178 | winfix_mna: | 105 | winfix_mna: |
179 | andn %g3, 0x7f, %g3 | 106 | andn %g3, 0x7f, %g3 |
180 | add %g3, 0x78, %g3 | 107 | add %g3, 0x78, %g3 |
181 | wrpr %g3, %tnpc | 108 | wrpr %g3, %tnpc |
182 | done | 109 | done |
183 | fill_fixup_mna: | ||
184 | rdpr %tstate, %g1 | ||
185 | andcc %g1, TSTATE_PRIV, %g0 | ||
186 | be,pt %xcc, window_mna_from_user_common | ||
187 | and %g1, TSTATE_CWP, %g1 | ||
188 | 110 | ||
189 | /* Please, see fill_fixup commentary about why we must preserve | 111 | fill_fixup_mna: |
190 | * %l5 and %l6 to preserve absolute correct semantics. | 112 | rdpr %tstate, %g1 |
191 | */ | 113 | and %g1, TSTATE_CWP, %g1 |
192 | rdpr %wstate, %g2 ! Grab user mode wstate. | 114 | wrpr %g1, %cwp |
193 | wrpr %g1, %cwp ! Get into the right window. | 115 | ba,pt %xcc, etrap |
194 | sll %g2, 3, %g2 ! NORMAL-->OTHER | 116 | rd %pc, %g7 |
195 | wrpr %g0, 0x0, %canrestore ! Standard etrap stuff. | 117 | sethi %hi(tlb_type), %g1 |
196 | 118 | lduw [%g1 + %lo(tlb_type)], %g1 | |
197 | wrpr %g2, 0x0, %wstate ! This must be consistent. | 119 | cmp %g1, 3 |
198 | wrpr %g0, 0x0, %otherwin ! We know this. | 120 | bne,pt %icc, 1f |
199 | call set_pcontext ! Change contexts... | 121 | add %sp, PTREGS_OFF, %o0 |
122 | mov %l4, %o2 | ||
123 | call sun4v_do_mna | ||
124 | mov %l5, %o1 | ||
125 | ba,a,pt %xcc, rtrap_clr_l6 | ||
126 | 1: mov %l4, %o1 | ||
127 | mov %l5, %o2 | ||
128 | call mem_address_unaligned | ||
200 | nop | 129 | nop |
201 | rdpr %pstate, %l1 ! Prepare to change globals. | 130 | ba,a,pt %xcc, rtrap_clr_l6 |
202 | mov %g4, %o2 ! Setup args for | ||
203 | mov %g5, %o1 ! final call to mem_address_unaligned. | ||
204 | andn %l1, PSTATE_MM, %l1 ! We want to be in RMO | ||
205 | 131 | ||
206 | mov %g6, %o7 ! Stash away current. | ||
207 | wrpr %g0, 0x0, %tl ! Out of trap levels. | ||
208 | wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate | ||
209 | mov %o7, %g6 ! Get current back. | ||
210 | ldx [%g6 + TI_TASK], %g4 ! Finish it. | ||
211 | #ifdef CONFIG_SMP | ||
212 | mov TSB_REG, %g1 | ||
213 | ldxa [%g1] ASI_IMMU, %g5 | ||
214 | #endif | ||
215 | call mem_address_unaligned | ||
216 | add %sp, PTREGS_OFF, %o0 | ||
217 | |||
218 | b,pt %xcc, rtrap | ||
219 | nop ! yes, the nop is correct | ||
220 | spill_fixup_mna: | ||
221 | ldx [%g6 + TI_FLAGS], %g1 | ||
222 | andcc %g1, _TIF_32BIT, %g0 | ||
223 | ldub [%g6 + TI_WSAVED], %g1 | ||
224 | sll %g1, 3, %g3 | ||
225 | add %g6, %g3, %g3 | ||
226 | stx %sp, [%g3 + TI_RWIN_SPTRS] | ||
227 | |||
228 | sll %g1, 7, %g3 | ||
229 | bne,pt %xcc, 1f | ||
230 | add %g6, %g3, %g3 | ||
231 | stx %l0, [%g3 + TI_REG_WINDOW + 0x00] | ||
232 | stx %l1, [%g3 + TI_REG_WINDOW + 0x08] | ||
233 | stx %l2, [%g3 + TI_REG_WINDOW + 0x10] | ||
234 | stx %l3, [%g3 + TI_REG_WINDOW + 0x18] | ||
235 | stx %l4, [%g3 + TI_REG_WINDOW + 0x20] | ||
236 | |||
237 | stx %l5, [%g3 + TI_REG_WINDOW + 0x28] | ||
238 | stx %l6, [%g3 + TI_REG_WINDOW + 0x30] | ||
239 | stx %l7, [%g3 + TI_REG_WINDOW + 0x38] | ||
240 | stx %i0, [%g3 + TI_REG_WINDOW + 0x40] | ||
241 | stx %i1, [%g3 + TI_REG_WINDOW + 0x48] | ||
242 | stx %i2, [%g3 + TI_REG_WINDOW + 0x50] | ||
243 | stx %i3, [%g3 + TI_REG_WINDOW + 0x58] | ||
244 | stx %i4, [%g3 + TI_REG_WINDOW + 0x60] | ||
245 | |||
246 | stx %i5, [%g3 + TI_REG_WINDOW + 0x68] | ||
247 | stx %i6, [%g3 + TI_REG_WINDOW + 0x70] | ||
248 | stx %i7, [%g3 + TI_REG_WINDOW + 0x78] | ||
249 | b,pt %xcc, 2f | ||
250 | add %g1, 1, %g1 | ||
251 | 1: std %l0, [%g3 + TI_REG_WINDOW + 0x00] | ||
252 | std %l2, [%g3 + TI_REG_WINDOW + 0x08] | ||
253 | std %l4, [%g3 + TI_REG_WINDOW + 0x10] | ||
254 | |||
255 | std %l6, [%g3 + TI_REG_WINDOW + 0x18] | ||
256 | std %i0, [%g3 + TI_REG_WINDOW + 0x20] | ||
257 | std %i2, [%g3 + TI_REG_WINDOW + 0x28] | ||
258 | std %i4, [%g3 + TI_REG_WINDOW + 0x30] | ||
259 | std %i6, [%g3 + TI_REG_WINDOW + 0x38] | ||
260 | add %g1, 1, %g1 | ||
261 | 2: stb %g1, [%g6 + TI_WSAVED] | ||
262 | rdpr %tstate, %g1 | ||
263 | |||
264 | andcc %g1, TSTATE_PRIV, %g0 | ||
265 | saved | ||
266 | be,pn %xcc, window_mna_from_user_common | ||
267 | and %g1, TSTATE_CWP, %g1 | ||
268 | retry | ||
269 | window_mna_from_user_common: | ||
270 | wrpr %g1, %cwp | ||
271 | sethi %hi(109f), %g7 | ||
272 | ba,pt %xcc, etrap | ||
273 | 109: or %g7, %lo(109b), %g7 | ||
274 | mov %l4, %o2 | ||
275 | mov %l5, %o1 | ||
276 | call mem_address_unaligned | ||
277 | add %sp, PTREGS_OFF, %o0 | ||
278 | ba,pt %xcc, rtrap | ||
279 | clr %l6 | ||
280 | |||
281 | /* These are only needed for 64-bit mode processes which | ||
282 | * put their stack pointer into the VPTE area and there | ||
283 | * happens to be a VPTE tlb entry mapped there during | ||
284 | * a spill/fill trap to that stack frame. | ||
285 | */ | ||
286 | .globl winfix_dax, fill_fixup_dax, spill_fixup_dax | ||
287 | winfix_dax: | 132 | winfix_dax: |
288 | andn %g3, 0x7f, %g3 | 133 | andn %g3, 0x7f, %g3 |
289 | add %g3, 0x74, %g3 | 134 | add %g3, 0x74, %g3 |
290 | wrpr %g3, %tnpc | 135 | wrpr %g3, %tnpc |
291 | done | 136 | done |
292 | fill_fixup_dax: | ||
293 | rdpr %tstate, %g1 | ||
294 | andcc %g1, TSTATE_PRIV, %g0 | ||
295 | be,pt %xcc, window_dax_from_user_common | ||
296 | and %g1, TSTATE_CWP, %g1 | ||
297 | |||
298 | /* Please, see fill_fixup commentary about why we must preserve | ||
299 | * %l5 and %l6 to preserve absolute correct semantics. | ||
300 | */ | ||
301 | rdpr %wstate, %g2 ! Grab user mode wstate. | ||
302 | wrpr %g1, %cwp ! Get into the right window. | ||
303 | sll %g2, 3, %g2 ! NORMAL-->OTHER | ||
304 | wrpr %g0, 0x0, %canrestore ! Standard etrap stuff. | ||
305 | 137 | ||
306 | wrpr %g2, 0x0, %wstate ! This must be consistent. | 138 | fill_fixup_dax: |
307 | wrpr %g0, 0x0, %otherwin ! We know this. | 139 | rdpr %tstate, %g1 |
308 | call set_pcontext ! Change contexts... | 140 | and %g1, TSTATE_CWP, %g1 |
141 | wrpr %g1, %cwp | ||
142 | ba,pt %xcc, etrap | ||
143 | rd %pc, %g7 | ||
144 | sethi %hi(tlb_type), %g1 | ||
145 | mov %l4, %o1 | ||
146 | lduw [%g1 + %lo(tlb_type)], %g1 | ||
147 | mov %l5, %o2 | ||
148 | cmp %g1, 3 | ||
149 | bne,pt %icc, 1f | ||
150 | add %sp, PTREGS_OFF, %o0 | ||
151 | call sun4v_data_access_exception | ||
309 | nop | 152 | nop |
310 | rdpr %pstate, %l1 ! Prepare to change globals. | 153 | ba,a,pt %xcc, rtrap_clr_l6 |
311 | mov %g4, %o1 ! Setup args for | 154 | 1: call spitfire_data_access_exception |
312 | mov %g5, %o2 ! final call to spitfire_data_access_exception. | 155 | nop |
313 | andn %l1, PSTATE_MM, %l1 ! We want to be in RMO | 156 | ba,a,pt %xcc, rtrap_clr_l6 |
314 | |||
315 | mov %g6, %o7 ! Stash away current. | ||
316 | wrpr %g0, 0x0, %tl ! Out of trap levels. | ||
317 | wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate | ||
318 | mov %o7, %g6 ! Get current back. | ||
319 | ldx [%g6 + TI_TASK], %g4 ! Finish it. | ||
320 | #ifdef CONFIG_SMP | ||
321 | mov TSB_REG, %g1 | ||
322 | ldxa [%g1] ASI_IMMU, %g5 | ||
323 | #endif | ||
324 | call spitfire_data_access_exception | ||
325 | add %sp, PTREGS_OFF, %o0 | ||
326 | |||
327 | b,pt %xcc, rtrap | ||
328 | nop ! yes, the nop is correct | ||
329 | spill_fixup_dax: | ||
330 | ldx [%g6 + TI_FLAGS], %g1 | ||
331 | andcc %g1, _TIF_32BIT, %g0 | ||
332 | ldub [%g6 + TI_WSAVED], %g1 | ||
333 | sll %g1, 3, %g3 | ||
334 | add %g6, %g3, %g3 | ||
335 | stx %sp, [%g3 + TI_RWIN_SPTRS] | ||
336 | |||
337 | sll %g1, 7, %g3 | ||
338 | bne,pt %xcc, 1f | ||
339 | add %g6, %g3, %g3 | ||
340 | stx %l0, [%g3 + TI_REG_WINDOW + 0x00] | ||
341 | stx %l1, [%g3 + TI_REG_WINDOW + 0x08] | ||
342 | stx %l2, [%g3 + TI_REG_WINDOW + 0x10] | ||
343 | stx %l3, [%g3 + TI_REG_WINDOW + 0x18] | ||
344 | stx %l4, [%g3 + TI_REG_WINDOW + 0x20] | ||
345 | |||
346 | stx %l5, [%g3 + TI_REG_WINDOW + 0x28] | ||
347 | stx %l6, [%g3 + TI_REG_WINDOW + 0x30] | ||
348 | stx %l7, [%g3 + TI_REG_WINDOW + 0x38] | ||
349 | stx %i0, [%g3 + TI_REG_WINDOW + 0x40] | ||
350 | stx %i1, [%g3 + TI_REG_WINDOW + 0x48] | ||
351 | stx %i2, [%g3 + TI_REG_WINDOW + 0x50] | ||
352 | stx %i3, [%g3 + TI_REG_WINDOW + 0x58] | ||
353 | stx %i4, [%g3 + TI_REG_WINDOW + 0x60] | ||
354 | |||
355 | stx %i5, [%g3 + TI_REG_WINDOW + 0x68] | ||
356 | stx %i6, [%g3 + TI_REG_WINDOW + 0x70] | ||
357 | stx %i7, [%g3 + TI_REG_WINDOW + 0x78] | ||
358 | b,pt %xcc, 2f | ||
359 | add %g1, 1, %g1 | ||
360 | 1: std %l0, [%g3 + TI_REG_WINDOW + 0x00] | ||
361 | std %l2, [%g3 + TI_REG_WINDOW + 0x08] | ||
362 | std %l4, [%g3 + TI_REG_WINDOW + 0x10] | ||
363 | |||
364 | std %l6, [%g3 + TI_REG_WINDOW + 0x18] | ||
365 | std %i0, [%g3 + TI_REG_WINDOW + 0x20] | ||
366 | std %i2, [%g3 + TI_REG_WINDOW + 0x28] | ||
367 | std %i4, [%g3 + TI_REG_WINDOW + 0x30] | ||
368 | std %i6, [%g3 + TI_REG_WINDOW + 0x38] | ||
369 | add %g1, 1, %g1 | ||
370 | 2: stb %g1, [%g6 + TI_WSAVED] | ||
371 | rdpr %tstate, %g1 | ||
372 | |||
373 | andcc %g1, TSTATE_PRIV, %g0 | ||
374 | saved | ||
375 | be,pn %xcc, window_dax_from_user_common | ||
376 | and %g1, TSTATE_CWP, %g1 | ||
377 | retry | ||
378 | window_dax_from_user_common: | ||
379 | wrpr %g1, %cwp | ||
380 | sethi %hi(109f), %g7 | ||
381 | ba,pt %xcc, etrap | ||
382 | 109: or %g7, %lo(109b), %g7 | ||
383 | mov %l4, %o1 | ||
384 | mov %l5, %o2 | ||
385 | call spitfire_data_access_exception | ||
386 | add %sp, PTREGS_OFF, %o0 | ||
387 | ba,pt %xcc, rtrap | ||
388 | clr %l6 | ||
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile index c295806500f7..8812ded19f01 100644 --- a/arch/sparc64/lib/Makefile +++ b/arch/sparc64/lib/Makefile | |||
@@ -11,6 +11,8 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \ | |||
11 | VISsave.o atomic.o bitops.o \ | 11 | VISsave.o atomic.o bitops.o \ |
12 | U1memcpy.o U1copy_from_user.o U1copy_to_user.o \ | 12 | U1memcpy.o U1copy_from_user.o U1copy_to_user.o \ |
13 | U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \ | 13 | U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \ |
14 | NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \ | ||
15 | NGpage.o NGbzero.o \ | ||
14 | copy_in_user.o user_fixup.o memmove.o \ | 16 | copy_in_user.o user_fixup.o memmove.o \ |
15 | mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o | 17 | mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o |
16 | 18 | ||
diff --git a/arch/sparc64/lib/NGbzero.S b/arch/sparc64/lib/NGbzero.S new file mode 100644 index 000000000000..e86baece5cc8 --- /dev/null +++ b/arch/sparc64/lib/NGbzero.S | |||
@@ -0,0 +1,163 @@ | |||
1 | /* NGbzero.S: Niagara optimized memset/clear_user. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | #include <asm/asi.h> | ||
6 | |||
7 | #define EX_ST(x,y) \ | ||
8 | 98: x,y; \ | ||
9 | .section .fixup; \ | ||
10 | .align 4; \ | ||
11 | 99: retl; \ | ||
12 | mov %o1, %o0; \ | ||
13 | .section __ex_table; \ | ||
14 | .align 4; \ | ||
15 | .word 98b, 99b; \ | ||
16 | .text; \ | ||
17 | .align 4; | ||
18 | |||
19 | .text | ||
20 | |||
21 | .globl NGmemset | ||
22 | .type NGmemset, #function | ||
23 | NGmemset: /* %o0=buf, %o1=pat, %o2=len */ | ||
24 | and %o1, 0xff, %o3 | ||
25 | mov %o2, %o1 | ||
26 | sllx %o3, 8, %g1 | ||
27 | or %g1, %o3, %o2 | ||
28 | sllx %o2, 16, %g1 | ||
29 | or %g1, %o2, %o2 | ||
30 | sllx %o2, 32, %g1 | ||
31 | ba,pt %xcc, 1f | ||
32 | or %g1, %o2, %o2 | ||
33 | |||
34 | .globl NGbzero | ||
35 | .type NGbzero, #function | ||
36 | NGbzero: | ||
37 | clr %o2 | ||
38 | 1: brz,pn %o1, NGbzero_return | ||
39 | mov %o0, %o3 | ||
40 | |||
41 | /* %o5: saved %asi, restored at NGbzero_done | ||
42 | * %g7: store-init %asi to use | ||
43 | * %o4: non-store-init %asi to use | ||
44 | */ | ||
45 | rd %asi, %o5 | ||
46 | mov ASI_BLK_INIT_QUAD_LDD_P, %g7 | ||
47 | mov ASI_P, %o4 | ||
48 | wr %o4, 0x0, %asi | ||
49 | |||
50 | NGbzero_from_clear_user: | ||
51 | cmp %o1, 15 | ||
52 | bl,pn %icc, NGbzero_tiny | ||
53 | andcc %o0, 0x7, %g1 | ||
54 | be,pt %xcc, 2f | ||
55 | mov 8, %g2 | ||
56 | sub %g2, %g1, %g1 | ||
57 | sub %o1, %g1, %o1 | ||
58 | 1: EX_ST(stba %o2, [%o0 + 0x00] %asi) | ||
59 | subcc %g1, 1, %g1 | ||
60 | bne,pt %xcc, 1b | ||
61 | add %o0, 1, %o0 | ||
62 | 2: cmp %o1, 128 | ||
63 | bl,pn %icc, NGbzero_medium | ||
64 | andcc %o0, (64 - 1), %g1 | ||
65 | be,pt %xcc, NGbzero_pre_loop | ||
66 | mov 64, %g2 | ||
67 | sub %g2, %g1, %g1 | ||
68 | sub %o1, %g1, %o1 | ||
69 | 1: EX_ST(stxa %o2, [%o0 + 0x00] %asi) | ||
70 | subcc %g1, 8, %g1 | ||
71 | bne,pt %xcc, 1b | ||
72 | add %o0, 8, %o0 | ||
73 | |||
74 | NGbzero_pre_loop: | ||
75 | wr %g7, 0x0, %asi | ||
76 | andn %o1, (64 - 1), %g1 | ||
77 | sub %o1, %g1, %o1 | ||
78 | NGbzero_loop: | ||
79 | EX_ST(stxa %o2, [%o0 + 0x00] %asi) | ||
80 | EX_ST(stxa %o2, [%o0 + 0x08] %asi) | ||
81 | EX_ST(stxa %o2, [%o0 + 0x10] %asi) | ||
82 | EX_ST(stxa %o2, [%o0 + 0x18] %asi) | ||
83 | EX_ST(stxa %o2, [%o0 + 0x20] %asi) | ||
84 | EX_ST(stxa %o2, [%o0 + 0x28] %asi) | ||
85 | EX_ST(stxa %o2, [%o0 + 0x30] %asi) | ||
86 | EX_ST(stxa %o2, [%o0 + 0x38] %asi) | ||
87 | subcc %g1, 64, %g1 | ||
88 | bne,pt %xcc, NGbzero_loop | ||
89 | add %o0, 64, %o0 | ||
90 | |||
91 | wr %o4, 0x0, %asi | ||
92 | brz,pn %o1, NGbzero_done | ||
93 | NGbzero_medium: | ||
94 | andncc %o1, 0x7, %g1 | ||
95 | be,pn %xcc, 2f | ||
96 | sub %o1, %g1, %o1 | ||
97 | 1: EX_ST(stxa %o2, [%o0 + 0x00] %asi) | ||
98 | subcc %g1, 8, %g1 | ||
99 | bne,pt %xcc, 1b | ||
100 | add %o0, 8, %o0 | ||
101 | 2: brz,pt %o1, NGbzero_done | ||
102 | nop | ||
103 | |||
104 | NGbzero_tiny: | ||
105 | 1: EX_ST(stba %o2, [%o0 + 0x00] %asi) | ||
106 | subcc %o1, 1, %o1 | ||
107 | bne,pt %icc, 1b | ||
108 | add %o0, 1, %o0 | ||
109 | |||
110 | /* fallthrough */ | ||
111 | |||
112 | NGbzero_done: | ||
113 | wr %o5, 0x0, %asi | ||
114 | |||
115 | NGbzero_return: | ||
116 | retl | ||
117 | mov %o3, %o0 | ||
118 | .size NGbzero, .-NGbzero | ||
119 | .size NGmemset, .-NGmemset | ||
120 | |||
121 | .globl NGclear_user | ||
122 | .type NGclear_user, #function | ||
123 | NGclear_user: /* %o0=buf, %o1=len */ | ||
124 | rd %asi, %o5 | ||
125 | brz,pn %o1, NGbzero_done | ||
126 | clr %o3 | ||
127 | cmp %o5, ASI_AIUS | ||
128 | bne,pn %icc, NGbzero | ||
129 | clr %o2 | ||
130 | mov ASI_BLK_INIT_QUAD_LDD_AIUS, %g7 | ||
131 | ba,pt %xcc, NGbzero_from_clear_user | ||
132 | mov ASI_AIUS, %o4 | ||
133 | .size NGclear_user, .-NGclear_user | ||
134 | |||
135 | #define BRANCH_ALWAYS 0x10680000 | ||
136 | #define NOP 0x01000000 | ||
137 | #define NG_DO_PATCH(OLD, NEW) \ | ||
138 | sethi %hi(NEW), %g1; \ | ||
139 | or %g1, %lo(NEW), %g1; \ | ||
140 | sethi %hi(OLD), %g2; \ | ||
141 | or %g2, %lo(OLD), %g2; \ | ||
142 | sub %g1, %g2, %g1; \ | ||
143 | sethi %hi(BRANCH_ALWAYS), %g3; \ | ||
144 | sll %g1, 11, %g1; \ | ||
145 | srl %g1, 11 + 2, %g1; \ | ||
146 | or %g3, %lo(BRANCH_ALWAYS), %g3; \ | ||
147 | or %g3, %g1, %g3; \ | ||
148 | stw %g3, [%g2]; \ | ||
149 | sethi %hi(NOP), %g3; \ | ||
150 | or %g3, %lo(NOP), %g3; \ | ||
151 | stw %g3, [%g2 + 0x4]; \ | ||
152 | flush %g2; | ||
153 | |||
154 | .globl niagara_patch_bzero | ||
155 | .type niagara_patch_bzero,#function | ||
156 | niagara_patch_bzero: | ||
157 | NG_DO_PATCH(memset, NGmemset) | ||
158 | NG_DO_PATCH(__bzero, NGbzero) | ||
159 | NG_DO_PATCH(__clear_user, NGclear_user) | ||
160 | NG_DO_PATCH(tsb_init, NGtsb_init) | ||
161 | retl | ||
162 | nop | ||
163 | .size niagara_patch_bzero,.-niagara_patch_bzero | ||
diff --git a/arch/sparc64/lib/NGcopy_from_user.S b/arch/sparc64/lib/NGcopy_from_user.S new file mode 100644 index 000000000000..2d93456f76dd --- /dev/null +++ b/arch/sparc64/lib/NGcopy_from_user.S | |||
@@ -0,0 +1,37 @@ | |||
1 | /* NGcopy_from_user.S: Niagara optimized copy from userspace. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #define EX_LD(x) \ | ||
7 | 98: x; \ | ||
8 | .section .fixup; \ | ||
9 | .align 4; \ | ||
10 | 99: wr %g0, ASI_AIUS, %asi;\ | ||
11 | retl; \ | ||
12 | mov 1, %o0; \ | ||
13 | .section __ex_table,"a";\ | ||
14 | .align 4; \ | ||
15 | .word 98b, 99b; \ | ||
16 | .text; \ | ||
17 | .align 4; | ||
18 | |||
19 | #ifndef ASI_AIUS | ||
20 | #define ASI_AIUS 0x11 | ||
21 | #endif | ||
22 | |||
23 | #define FUNC_NAME NGcopy_from_user | ||
24 | #define LOAD(type,addr,dest) type##a [addr] ASI_AIUS, dest | ||
25 | #define LOAD_TWIN(addr_reg,dest0,dest1) \ | ||
26 | ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_AIUS, dest0 | ||
27 | #define EX_RETVAL(x) 0 | ||
28 | |||
29 | #ifdef __KERNEL__ | ||
30 | #define PREAMBLE \ | ||
31 | rd %asi, %g1; \ | ||
32 | cmp %g1, ASI_AIUS; \ | ||
33 | bne,pn %icc, memcpy_user_stub; \ | ||
34 | nop | ||
35 | #endif | ||
36 | |||
37 | #include "NGmemcpy.S" | ||
diff --git a/arch/sparc64/lib/NGcopy_to_user.S b/arch/sparc64/lib/NGcopy_to_user.S new file mode 100644 index 000000000000..34112d5054ef --- /dev/null +++ b/arch/sparc64/lib/NGcopy_to_user.S | |||
@@ -0,0 +1,40 @@ | |||
1 | /* NGcopy_to_user.S: Niagara optimized copy to userspace. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #define EX_ST(x) \ | ||
7 | 98: x; \ | ||
8 | .section .fixup; \ | ||
9 | .align 4; \ | ||
10 | 99: wr %g0, ASI_AIUS, %asi;\ | ||
11 | retl; \ | ||
12 | mov 1, %o0; \ | ||
13 | .section __ex_table,"a";\ | ||
14 | .align 4; \ | ||
15 | .word 98b, 99b; \ | ||
16 | .text; \ | ||
17 | .align 4; | ||
18 | |||
19 | #ifndef ASI_AIUS | ||
20 | #define ASI_AIUS 0x11 | ||
21 | #endif | ||
22 | |||
23 | #define FUNC_NAME NGcopy_to_user | ||
24 | #define STORE(type,src,addr) type##a src, [addr] ASI_AIUS | ||
25 | #define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS | ||
26 | #define EX_RETVAL(x) 0 | ||
27 | |||
28 | #ifdef __KERNEL__ | ||
29 | /* Writing to %asi is _expensive_ so we hardcode it. | ||
30 | * Reading %asi to check for KERNEL_DS is comparatively | ||
31 | * cheap. | ||
32 | */ | ||
33 | #define PREAMBLE \ | ||
34 | rd %asi, %g1; \ | ||
35 | cmp %g1, ASI_AIUS; \ | ||
36 | bne,pn %icc, memcpy_user_stub; \ | ||
37 | nop | ||
38 | #endif | ||
39 | |||
40 | #include "NGmemcpy.S" | ||
diff --git a/arch/sparc64/lib/NGmemcpy.S b/arch/sparc64/lib/NGmemcpy.S new file mode 100644 index 000000000000..8e522b3dc095 --- /dev/null +++ b/arch/sparc64/lib/NGmemcpy.S | |||
@@ -0,0 +1,368 @@ | |||
1 | /* NGmemcpy.S: Niagara optimized memcpy. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #ifdef __KERNEL__ | ||
7 | #include <asm/asi.h> | ||
8 | #include <asm/thread_info.h> | ||
9 | #define GLOBAL_SPARE %g7 | ||
10 | #define RESTORE_ASI(TMP) \ | ||
11 | ldub [%g6 + TI_CURRENT_DS], TMP; \ | ||
12 | wr TMP, 0x0, %asi; | ||
13 | #else | ||
14 | #define GLOBAL_SPARE %g5 | ||
15 | #define RESTORE_ASI(TMP) \ | ||
16 | wr %g0, ASI_PNF, %asi | ||
17 | #endif | ||
18 | |||
19 | #ifndef STORE_ASI | ||
20 | #define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P | ||
21 | #endif | ||
22 | |||
23 | #ifndef EX_LD | ||
24 | #define EX_LD(x) x | ||
25 | #endif | ||
26 | |||
27 | #ifndef EX_ST | ||
28 | #define EX_ST(x) x | ||
29 | #endif | ||
30 | |||
31 | #ifndef EX_RETVAL | ||
32 | #define EX_RETVAL(x) x | ||
33 | #endif | ||
34 | |||
35 | #ifndef LOAD | ||
36 | #ifndef MEMCPY_DEBUG | ||
37 | #define LOAD(type,addr,dest) type [addr], dest | ||
38 | #else | ||
39 | #define LOAD(type,addr,dest) type##a [addr] 0x80, dest | ||
40 | #endif | ||
41 | #endif | ||
42 | |||
43 | #ifndef LOAD_TWIN | ||
44 | #define LOAD_TWIN(addr_reg,dest0,dest1) \ | ||
45 | ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_P, dest0 | ||
46 | #endif | ||
47 | |||
48 | #ifndef STORE | ||
49 | #define STORE(type,src,addr) type src, [addr] | ||
50 | #endif | ||
51 | |||
52 | #ifndef STORE_INIT | ||
53 | #define STORE_INIT(src,addr) stxa src, [addr] %asi | ||
54 | #endif | ||
55 | |||
56 | #ifndef FUNC_NAME | ||
57 | #define FUNC_NAME NGmemcpy | ||
58 | #endif | ||
59 | |||
60 | #ifndef PREAMBLE | ||
61 | #define PREAMBLE | ||
62 | #endif | ||
63 | |||
64 | #ifndef XCC | ||
65 | #define XCC xcc | ||
66 | #endif | ||
67 | |||
68 | .register %g2,#scratch | ||
69 | .register %g3,#scratch | ||
70 | |||
71 | .text | ||
72 | .align 64 | ||
73 | |||
74 | .globl FUNC_NAME | ||
75 | .type FUNC_NAME,#function | ||
76 | FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | ||
77 | srlx %o2, 31, %g2 | ||
78 | cmp %g2, 0 | ||
79 | tne %xcc, 5 | ||
80 | PREAMBLE | ||
81 | mov %o0, GLOBAL_SPARE | ||
82 | cmp %o2, 0 | ||
83 | be,pn %XCC, 85f | ||
84 | or %o0, %o1, %o3 | ||
85 | cmp %o2, 16 | ||
86 | blu,a,pn %XCC, 80f | ||
87 | or %o3, %o2, %o3 | ||
88 | |||
89 | /* 2 blocks (128 bytes) is the minimum we can do the block | ||
90 | * copy with. We need to ensure that we'll iterate at least | ||
91 | * once in the block copy loop. At worst we'll need to align | ||
92 | * the destination to a 64-byte boundary which can chew up | ||
93 | * to (64 - 1) bytes from the length before we perform the | ||
94 | * block copy loop. | ||
95 | */ | ||
96 | cmp %o2, (2 * 64) | ||
97 | blu,pt %XCC, 70f | ||
98 | andcc %o3, 0x7, %g0 | ||
99 | |||
100 | /* %o0: dst | ||
101 | * %o1: src | ||
102 | * %o2: len (known to be >= 128) | ||
103 | * | ||
104 | * The block copy loops will use %o4/%o5,%g2/%g3 as | ||
105 | * temporaries while copying the data. | ||
106 | */ | ||
107 | |||
108 | LOAD(prefetch, %o1, #one_read) | ||
109 | wr %g0, STORE_ASI, %asi | ||
110 | |||
111 | /* Align destination on 64-byte boundary. */ | ||
112 | andcc %o0, (64 - 1), %o4 | ||
113 | be,pt %XCC, 2f | ||
114 | sub %o4, 64, %o4 | ||
115 | sub %g0, %o4, %o4 ! bytes to align dst | ||
116 | sub %o2, %o4, %o2 | ||
117 | 1: subcc %o4, 1, %o4 | ||
118 | EX_LD(LOAD(ldub, %o1, %g1)) | ||
119 | EX_ST(STORE(stb, %g1, %o0)) | ||
120 | add %o1, 1, %o1 | ||
121 | bne,pt %XCC, 1b | ||
122 | add %o0, 1, %o0 | ||
123 | |||
124 | /* If the source is on a 16-byte boundary we can do | ||
125 | * the direct block copy loop. If it is 8-byte aligned | ||
126 | * we can do the 16-byte loads offset by -8 bytes and the | ||
127 | * init stores offset by one register. | ||
128 | * | ||
129 | * If the source is not even 8-byte aligned, we need to do | ||
130 | * shifting and masking (basically integer faligndata). | ||
131 | * | ||
132 | * The careful bit with init stores is that if we store | ||
133 | * to any part of the cache line we have to store the whole | ||
134 | * cacheline else we can end up with corrupt L2 cache line | ||
135 | * contents. Since the loop works on 64-bytes of 64-byte | ||
136 | * aligned store data at a time, this is easy to ensure. | ||
137 | */ | ||
138 | 2: | ||
139 | andcc %o1, (16 - 1), %o4 | ||
140 | andn %o2, (64 - 1), %g1 ! block copy loop iterator | ||
141 | sub %o2, %g1, %o2 ! final sub-block copy bytes | ||
142 | be,pt %XCC, 50f | ||
143 | cmp %o4, 8 | ||
144 | be,a,pt %XCC, 10f | ||
145 | sub %o1, 0x8, %o1 | ||
146 | |||
147 | /* Neither 8-byte nor 16-byte aligned, shift and mask. */ | ||
148 | mov %g1, %o4 | ||
149 | and %o1, 0x7, %g1 | ||
150 | sll %g1, 3, %g1 | ||
151 | mov 64, %o3 | ||
152 | andn %o1, 0x7, %o1 | ||
153 | EX_LD(LOAD(ldx, %o1, %g2)) | ||
154 | sub %o3, %g1, %o3 | ||
155 | sllx %g2, %g1, %g2 | ||
156 | |||
157 | #define SWIVEL_ONE_DWORD(SRC, TMP1, TMP2, PRE_VAL, PRE_SHIFT, POST_SHIFT, DST)\ | ||
158 | EX_LD(LOAD(ldx, SRC, TMP1)); \ | ||
159 | srlx TMP1, PRE_SHIFT, TMP2; \ | ||
160 | or TMP2, PRE_VAL, TMP2; \ | ||
161 | EX_ST(STORE_INIT(TMP2, DST)); \ | ||
162 | sllx TMP1, POST_SHIFT, PRE_VAL; | ||
163 | |||
164 | 1: add %o1, 0x8, %o1 | ||
165 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x00) | ||
166 | add %o1, 0x8, %o1 | ||
167 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x08) | ||
168 | add %o1, 0x8, %o1 | ||
169 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x10) | ||
170 | add %o1, 0x8, %o1 | ||
171 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x18) | ||
172 | add %o1, 32, %o1 | ||
173 | LOAD(prefetch, %o1, #one_read) | ||
174 | sub %o1, 32 - 8, %o1 | ||
175 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x20) | ||
176 | add %o1, 8, %o1 | ||
177 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x28) | ||
178 | add %o1, 8, %o1 | ||
179 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x30) | ||
180 | add %o1, 8, %o1 | ||
181 | SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x38) | ||
182 | subcc %o4, 64, %o4 | ||
183 | bne,pt %XCC, 1b | ||
184 | add %o0, 64, %o0 | ||
185 | |||
186 | #undef SWIVEL_ONE_DWORD | ||
187 | |||
188 | srl %g1, 3, %g1 | ||
189 | ba,pt %XCC, 60f | ||
190 | add %o1, %g1, %o1 | ||
191 | |||
192 | 10: /* Destination is 64-byte aligned, source was only 8-byte | ||
193 | * aligned but it has been subtracted by 8 and we perform | ||
194 | * one twin load ahead, then add 8 back into source when | ||
195 | * we finish the loop. | ||
196 | */ | ||
197 | EX_LD(LOAD_TWIN(%o1, %o4, %o5)) | ||
198 | 1: add %o1, 16, %o1 | ||
199 | EX_LD(LOAD_TWIN(%o1, %g2, %g3)) | ||
200 | add %o1, 16 + 32, %o1 | ||
201 | LOAD(prefetch, %o1, #one_read) | ||
202 | sub %o1, 32, %o1 | ||
203 | EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line | ||
204 | EX_ST(STORE_INIT(%g2, %o0 + 0x08)) | ||
205 | EX_LD(LOAD_TWIN(%o1, %o4, %o5)) | ||
206 | add %o1, 16, %o1 | ||
207 | EX_ST(STORE_INIT(%g3, %o0 + 0x10)) | ||
208 | EX_ST(STORE_INIT(%o4, %o0 + 0x18)) | ||
209 | EX_LD(LOAD_TWIN(%o1, %g2, %g3)) | ||
210 | add %o1, 16, %o1 | ||
211 | EX_ST(STORE_INIT(%o5, %o0 + 0x20)) | ||
212 | EX_ST(STORE_INIT(%g2, %o0 + 0x28)) | ||
213 | EX_LD(LOAD_TWIN(%o1, %o4, %o5)) | ||
214 | EX_ST(STORE_INIT(%g3, %o0 + 0x30)) | ||
215 | EX_ST(STORE_INIT(%o4, %o0 + 0x38)) | ||
216 | subcc %g1, 64, %g1 | ||
217 | bne,pt %XCC, 1b | ||
218 | add %o0, 64, %o0 | ||
219 | |||
220 | ba,pt %XCC, 60f | ||
221 | add %o1, 0x8, %o1 | ||
222 | |||
223 | 50: /* Destination is 64-byte aligned, and source is 16-byte | ||
224 | * aligned. | ||
225 | */ | ||
226 | 1: EX_LD(LOAD_TWIN(%o1, %o4, %o5)) | ||
227 | add %o1, 16, %o1 | ||
228 | EX_LD(LOAD_TWIN(%o1, %g2, %g3)) | ||
229 | add %o1, 16 + 32, %o1 | ||
230 | LOAD(prefetch, %o1, #one_read) | ||
231 | sub %o1, 32, %o1 | ||
232 | EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line | ||
233 | EX_ST(STORE_INIT(%o5, %o0 + 0x08)) | ||
234 | EX_LD(LOAD_TWIN(%o1, %o4, %o5)) | ||
235 | add %o1, 16, %o1 | ||
236 | EX_ST(STORE_INIT(%g2, %o0 + 0x10)) | ||
237 | EX_ST(STORE_INIT(%g3, %o0 + 0x18)) | ||
238 | EX_LD(LOAD_TWIN(%o1, %g2, %g3)) | ||
239 | add %o1, 16, %o1 | ||
240 | EX_ST(STORE_INIT(%o4, %o0 + 0x20)) | ||
241 | EX_ST(STORE_INIT(%o5, %o0 + 0x28)) | ||
242 | EX_ST(STORE_INIT(%g2, %o0 + 0x30)) | ||
243 | EX_ST(STORE_INIT(%g3, %o0 + 0x38)) | ||
244 | subcc %g1, 64, %g1 | ||
245 | bne,pt %XCC, 1b | ||
246 | add %o0, 64, %o0 | ||
247 | /* fall through */ | ||
248 | |||
249 | 60: | ||
250 | /* %o2 contains any final bytes still needed to be copied | ||
251 | * over. If anything is left, we copy it one byte at a time. | ||
252 | */ | ||
253 | RESTORE_ASI(%o3) | ||
254 | brz,pt %o2, 85f | ||
255 | sub %o0, %o1, %o3 | ||
256 | ba,a,pt %XCC, 90f | ||
257 | |||
258 | .align 64 | ||
259 | 70: /* 16 < len <= 64 */ | ||
260 | bne,pn %XCC, 75f | ||
261 | sub %o0, %o1, %o3 | ||
262 | |||
263 | 72: | ||
264 | andn %o2, 0xf, %o4 | ||
265 | and %o2, 0xf, %o2 | ||
266 | 1: subcc %o4, 0x10, %o4 | ||
267 | EX_LD(LOAD(ldx, %o1, %o5)) | ||
268 | add %o1, 0x08, %o1 | ||
269 | EX_LD(LOAD(ldx, %o1, %g1)) | ||
270 | sub %o1, 0x08, %o1 | ||
271 | EX_ST(STORE(stx, %o5, %o1 + %o3)) | ||
272 | add %o1, 0x8, %o1 | ||
273 | EX_ST(STORE(stx, %g1, %o1 + %o3)) | ||
274 | bgu,pt %XCC, 1b | ||
275 | add %o1, 0x8, %o1 | ||
276 | 73: andcc %o2, 0x8, %g0 | ||
277 | be,pt %XCC, 1f | ||
278 | nop | ||
279 | sub %o2, 0x8, %o2 | ||
280 | EX_LD(LOAD(ldx, %o1, %o5)) | ||
281 | EX_ST(STORE(stx, %o5, %o1 + %o3)) | ||
282 | add %o1, 0x8, %o1 | ||
283 | 1: andcc %o2, 0x4, %g0 | ||
284 | be,pt %XCC, 1f | ||
285 | nop | ||
286 | sub %o2, 0x4, %o2 | ||
287 | EX_LD(LOAD(lduw, %o1, %o5)) | ||
288 | EX_ST(STORE(stw, %o5, %o1 + %o3)) | ||
289 | add %o1, 0x4, %o1 | ||
290 | 1: cmp %o2, 0 | ||
291 | be,pt %XCC, 85f | ||
292 | nop | ||
293 | ba,pt %xcc, 90f | ||
294 | nop | ||
295 | |||
296 | 75: | ||
297 | andcc %o0, 0x7, %g1 | ||
298 | sub %g1, 0x8, %g1 | ||
299 | be,pn %icc, 2f | ||
300 | sub %g0, %g1, %g1 | ||
301 | sub %o2, %g1, %o2 | ||
302 | |||
303 | 1: subcc %g1, 1, %g1 | ||
304 | EX_LD(LOAD(ldub, %o1, %o5)) | ||
305 | EX_ST(STORE(stb, %o5, %o1 + %o3)) | ||
306 | bgu,pt %icc, 1b | ||
307 | add %o1, 1, %o1 | ||
308 | |||
309 | 2: add %o1, %o3, %o0 | ||
310 | andcc %o1, 0x7, %g1 | ||
311 | bne,pt %icc, 8f | ||
312 | sll %g1, 3, %g1 | ||
313 | |||
314 | cmp %o2, 16 | ||
315 | bgeu,pt %icc, 72b | ||
316 | nop | ||
317 | ba,a,pt %xcc, 73b | ||
318 | |||
319 | 8: mov 64, %o3 | ||
320 | andn %o1, 0x7, %o1 | ||
321 | EX_LD(LOAD(ldx, %o1, %g2)) | ||
322 | sub %o3, %g1, %o3 | ||
323 | andn %o2, 0x7, %o4 | ||
324 | sllx %g2, %g1, %g2 | ||
325 | 1: add %o1, 0x8, %o1 | ||
326 | EX_LD(LOAD(ldx, %o1, %g3)) | ||
327 | subcc %o4, 0x8, %o4 | ||
328 | srlx %g3, %o3, %o5 | ||
329 | or %o5, %g2, %o5 | ||
330 | EX_ST(STORE(stx, %o5, %o0)) | ||
331 | add %o0, 0x8, %o0 | ||
332 | bgu,pt %icc, 1b | ||
333 | sllx %g3, %g1, %g2 | ||
334 | |||
335 | srl %g1, 3, %g1 | ||
336 | andcc %o2, 0x7, %o2 | ||
337 | be,pn %icc, 85f | ||
338 | add %o1, %g1, %o1 | ||
339 | ba,pt %xcc, 90f | ||
340 | sub %o0, %o1, %o3 | ||
341 | |||
342 | .align 64 | ||
343 | 80: /* 0 < len <= 16 */ | ||
344 | andcc %o3, 0x3, %g0 | ||
345 | bne,pn %XCC, 90f | ||
346 | sub %o0, %o1, %o3 | ||
347 | |||
348 | 1: | ||
349 | subcc %o2, 4, %o2 | ||
350 | EX_LD(LOAD(lduw, %o1, %g1)) | ||
351 | EX_ST(STORE(stw, %g1, %o1 + %o3)) | ||
352 | bgu,pt %XCC, 1b | ||
353 | add %o1, 4, %o1 | ||
354 | |||
355 | 85: retl | ||
356 | mov EX_RETVAL(GLOBAL_SPARE), %o0 | ||
357 | |||
358 | .align 32 | ||
359 | 90: | ||
360 | subcc %o2, 1, %o2 | ||
361 | EX_LD(LOAD(ldub, %o1, %g1)) | ||
362 | EX_ST(STORE(stb, %g1, %o1 + %o3)) | ||
363 | bgu,pt %XCC, 90b | ||
364 | add %o1, 1, %o1 | ||
365 | retl | ||
366 | mov EX_RETVAL(GLOBAL_SPARE), %o0 | ||
367 | |||
368 | .size FUNC_NAME, .-FUNC_NAME | ||
diff --git a/arch/sparc64/lib/NGpage.S b/arch/sparc64/lib/NGpage.S new file mode 100644 index 000000000000..7d7c3bb8dcbf --- /dev/null +++ b/arch/sparc64/lib/NGpage.S | |||
@@ -0,0 +1,96 @@ | |||
1 | /* NGpage.S: Niagara optimize clear and copy page. | ||
2 | * | ||
3 | * Copyright (C) 2006 (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <asm/asi.h> | ||
7 | #include <asm/page.h> | ||
8 | |||
9 | .text | ||
10 | .align 32 | ||
11 | |||
12 | /* This is heavily simplified from the sun4u variants | ||
13 | * because Niagara does not have any D-cache aliasing issues | ||
14 | * and also we don't need to use the FPU in order to implement | ||
15 | * an optimal page copy/clear. | ||
16 | */ | ||
17 | |||
18 | NGcopy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ | ||
19 | prefetch [%o1 + 0x00], #one_read | ||
20 | mov 8, %g1 | ||
21 | mov 16, %g2 | ||
22 | mov 24, %g3 | ||
23 | set PAGE_SIZE, %g7 | ||
24 | |||
25 | 1: ldda [%o1 + %g0] ASI_BLK_INIT_QUAD_LDD_P, %o2 | ||
26 | ldda [%o1 + %g2] ASI_BLK_INIT_QUAD_LDD_P, %o4 | ||
27 | prefetch [%o1 + 0x40], #one_read | ||
28 | add %o1, 32, %o1 | ||
29 | stxa %o2, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P | ||
30 | stxa %o3, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P | ||
31 | ldda [%o1 + %g0] ASI_BLK_INIT_QUAD_LDD_P, %o2 | ||
32 | stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P | ||
33 | stxa %o5, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P | ||
34 | ldda [%o1 + %g2] ASI_BLK_INIT_QUAD_LDD_P, %o4 | ||
35 | add %o1, 32, %o1 | ||
36 | add %o0, 32, %o0 | ||
37 | stxa %o2, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P | ||
38 | stxa %o3, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P | ||
39 | stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P | ||
40 | stxa %o5, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P | ||
41 | subcc %g7, 64, %g7 | ||
42 | bne,pt %xcc, 1b | ||
43 | add %o0, 32, %o0 | ||
44 | retl | ||
45 | nop | ||
46 | |||
47 | NGclear_page: /* %o0=dest */ | ||
48 | NGclear_user_page: /* %o0=dest, %o1=vaddr */ | ||
49 | mov 8, %g1 | ||
50 | mov 16, %g2 | ||
51 | mov 24, %g3 | ||
52 | set PAGE_SIZE, %g7 | ||
53 | |||
54 | 1: stxa %g0, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P | ||
55 | stxa %g0, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P | ||
56 | stxa %g0, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P | ||
57 | stxa %g0, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P | ||
58 | add %o0, 32, %o0 | ||
59 | stxa %g0, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P | ||
60 | stxa %g0, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P | ||
61 | stxa %g0, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P | ||
62 | stxa %g0, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P | ||
63 | subcc %g7, 64, %g7 | ||
64 | bne,pt %xcc, 1b | ||
65 | add %o0, 32, %o0 | ||
66 | retl | ||
67 | nop | ||
68 | |||
69 | #define BRANCH_ALWAYS 0x10680000 | ||
70 | #define NOP 0x01000000 | ||
71 | #define NG_DO_PATCH(OLD, NEW) \ | ||
72 | sethi %hi(NEW), %g1; \ | ||
73 | or %g1, %lo(NEW), %g1; \ | ||
74 | sethi %hi(OLD), %g2; \ | ||
75 | or %g2, %lo(OLD), %g2; \ | ||
76 | sub %g1, %g2, %g1; \ | ||
77 | sethi %hi(BRANCH_ALWAYS), %g3; \ | ||
78 | sll %g1, 11, %g1; \ | ||
79 | srl %g1, 11 + 2, %g1; \ | ||
80 | or %g3, %lo(BRANCH_ALWAYS), %g3; \ | ||
81 | or %g3, %g1, %g3; \ | ||
82 | stw %g3, [%g2]; \ | ||
83 | sethi %hi(NOP), %g3; \ | ||
84 | or %g3, %lo(NOP), %g3; \ | ||
85 | stw %g3, [%g2 + 0x4]; \ | ||
86 | flush %g2; | ||
87 | |||
88 | .globl niagara_patch_pageops | ||
89 | .type niagara_patch_pageops,#function | ||
90 | niagara_patch_pageops: | ||
91 | NG_DO_PATCH(copy_user_page, NGcopy_user_page) | ||
92 | NG_DO_PATCH(_clear_page, NGclear_page) | ||
93 | NG_DO_PATCH(clear_user_page, NGclear_user_page) | ||
94 | retl | ||
95 | nop | ||
96 | .size niagara_patch_pageops,.-niagara_patch_pageops | ||
diff --git a/arch/sparc64/lib/NGpatch.S b/arch/sparc64/lib/NGpatch.S new file mode 100644 index 000000000000..3b0674fc3366 --- /dev/null +++ b/arch/sparc64/lib/NGpatch.S | |||
@@ -0,0 +1,33 @@ | |||
1 | /* NGpatch.S: Patch Ultra-I routines with Niagara variant. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #define BRANCH_ALWAYS 0x10680000 | ||
7 | #define NOP 0x01000000 | ||
8 | #define NG_DO_PATCH(OLD, NEW) \ | ||
9 | sethi %hi(NEW), %g1; \ | ||
10 | or %g1, %lo(NEW), %g1; \ | ||
11 | sethi %hi(OLD), %g2; \ | ||
12 | or %g2, %lo(OLD), %g2; \ | ||
13 | sub %g1, %g2, %g1; \ | ||
14 | sethi %hi(BRANCH_ALWAYS), %g3; \ | ||
15 | sll %g1, 11, %g1; \ | ||
16 | srl %g1, 11 + 2, %g1; \ | ||
17 | or %g3, %lo(BRANCH_ALWAYS), %g3; \ | ||
18 | or %g3, %g1, %g3; \ | ||
19 | stw %g3, [%g2]; \ | ||
20 | sethi %hi(NOP), %g3; \ | ||
21 | or %g3, %lo(NOP), %g3; \ | ||
22 | stw %g3, [%g2 + 0x4]; \ | ||
23 | flush %g2; | ||
24 | |||
25 | .globl niagara_patch_copyops | ||
26 | .type niagara_patch_copyops,#function | ||
27 | niagara_patch_copyops: | ||
28 | NG_DO_PATCH(memcpy, NGmemcpy) | ||
29 | NG_DO_PATCH(___copy_from_user, NGcopy_from_user) | ||
30 | NG_DO_PATCH(___copy_to_user, NGcopy_to_user) | ||
31 | retl | ||
32 | nop | ||
33 | .size niagara_patch_copyops,.-niagara_patch_copyops | ||
diff --git a/arch/sparc64/lib/U3patch.S b/arch/sparc64/lib/U3patch.S index e2b6c5e4b95a..ecc302619a6e 100644 --- a/arch/sparc64/lib/U3patch.S +++ b/arch/sparc64/lib/U3patch.S | |||
@@ -12,7 +12,8 @@ | |||
12 | or %g2, %lo(OLD), %g2; \ | 12 | or %g2, %lo(OLD), %g2; \ |
13 | sub %g1, %g2, %g1; \ | 13 | sub %g1, %g2, %g1; \ |
14 | sethi %hi(BRANCH_ALWAYS), %g3; \ | 14 | sethi %hi(BRANCH_ALWAYS), %g3; \ |
15 | srl %g1, 2, %g1; \ | 15 | sll %g1, 11, %g1; \ |
16 | srl %g1, 11 + 2, %g1; \ | ||
16 | or %g3, %lo(BRANCH_ALWAYS), %g3; \ | 17 | or %g3, %lo(BRANCH_ALWAYS), %g3; \ |
17 | or %g3, %g1, %g3; \ | 18 | or %g3, %g1, %g3; \ |
18 | stw %g3, [%g2]; \ | 19 | stw %g3, [%g2]; \ |
diff --git a/arch/sparc64/lib/bzero.S b/arch/sparc64/lib/bzero.S index 1d2abcfa4e52..c7bbae8c590f 100644 --- a/arch/sparc64/lib/bzero.S +++ b/arch/sparc64/lib/bzero.S | |||
@@ -98,12 +98,12 @@ __bzero_done: | |||
98 | .text; \ | 98 | .text; \ |
99 | .align 4; | 99 | .align 4; |
100 | 100 | ||
101 | .globl __bzero_noasi | 101 | .globl __clear_user |
102 | .type __bzero_noasi, #function | 102 | .type __clear_user, #function |
103 | __bzero_noasi: /* %o0=buf, %o1=len */ | 103 | __clear_user: /* %o0=buf, %o1=len */ |
104 | brz,pn %o1, __bzero_noasi_done | 104 | brz,pn %o1, __clear_user_done |
105 | cmp %o1, 16 | 105 | cmp %o1, 16 |
106 | bl,pn %icc, __bzero_noasi_tiny | 106 | bl,pn %icc, __clear_user_tiny |
107 | EX_ST(prefetcha [%o0 + 0x00] %asi, #n_writes) | 107 | EX_ST(prefetcha [%o0 + 0x00] %asi, #n_writes) |
108 | andcc %o0, 0x3, %g0 | 108 | andcc %o0, 0x3, %g0 |
109 | be,pt %icc, 2f | 109 | be,pt %icc, 2f |
@@ -145,14 +145,14 @@ __bzero_noasi: /* %o0=buf, %o1=len */ | |||
145 | subcc %g1, 8, %g1 | 145 | subcc %g1, 8, %g1 |
146 | bne,pt %icc, 5b | 146 | bne,pt %icc, 5b |
147 | add %o0, 0x8, %o0 | 147 | add %o0, 0x8, %o0 |
148 | 6: brz,pt %o1, __bzero_noasi_done | 148 | 6: brz,pt %o1, __clear_user_done |
149 | nop | 149 | nop |
150 | __bzero_noasi_tiny: | 150 | __clear_user_tiny: |
151 | 1: EX_ST(stba %g0, [%o0 + 0x00] %asi) | 151 | 1: EX_ST(stba %g0, [%o0 + 0x00] %asi) |
152 | subcc %o1, 1, %o1 | 152 | subcc %o1, 1, %o1 |
153 | bne,pt %icc, 1b | 153 | bne,pt %icc, 1b |
154 | add %o0, 1, %o0 | 154 | add %o0, 1, %o0 |
155 | __bzero_noasi_done: | 155 | __clear_user_done: |
156 | retl | 156 | retl |
157 | clr %o0 | 157 | clr %o0 |
158 | .size __bzero_noasi, .-__bzero_noasi | 158 | .size __clear_user, .-__clear_user |
diff --git a/arch/sparc64/lib/clear_page.S b/arch/sparc64/lib/clear_page.S index b59884ef051d..77e531f6c2a7 100644 --- a/arch/sparc64/lib/clear_page.S +++ b/arch/sparc64/lib/clear_page.S | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <asm/page.h> | 9 | #include <asm/page.h> |
10 | #include <asm/pgtable.h> | 10 | #include <asm/pgtable.h> |
11 | #include <asm/spitfire.h> | 11 | #include <asm/spitfire.h> |
12 | #include <asm/head.h> | ||
12 | 13 | ||
13 | /* What we used to do was lock a TLB entry into a specific | 14 | /* What we used to do was lock a TLB entry into a specific |
14 | * TLB slot, clear the page with interrupts disabled, then | 15 | * TLB slot, clear the page with interrupts disabled, then |
@@ -22,9 +23,6 @@ | |||
22 | * disable preemption during the clear. | 23 | * disable preemption during the clear. |
23 | */ | 24 | */ |
24 | 25 | ||
25 | #define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS) | ||
26 | #define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W) | ||
27 | |||
28 | .text | 26 | .text |
29 | 27 | ||
30 | .globl _clear_page | 28 | .globl _clear_page |
@@ -43,12 +41,11 @@ clear_user_page: /* %o0=dest, %o1=vaddr */ | |||
43 | sethi %hi(PAGE_SIZE), %o4 | 41 | sethi %hi(PAGE_SIZE), %o4 |
44 | 42 | ||
45 | sllx %g2, 32, %g2 | 43 | sllx %g2, 32, %g2 |
46 | sethi %uhi(TTE_BITS_TOP), %g3 | 44 | sethi %hi(PAGE_KERNEL_LOCKED), %g3 |
47 | 45 | ||
48 | sllx %g3, 32, %g3 | 46 | ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3 |
49 | sub %o0, %g2, %g1 ! paddr | 47 | sub %o0, %g2, %g1 ! paddr |
50 | 48 | ||
51 | or %g3, TTE_BITS_BOTTOM, %g3 | ||
52 | and %o1, %o4, %o0 ! vaddr D-cache alias bit | 49 | and %o1, %o4, %o0 ! vaddr D-cache alias bit |
53 | 50 | ||
54 | or %g1, %g3, %g1 ! TTE data | 51 | or %g1, %g3, %g1 ! TTE data |
@@ -66,7 +63,8 @@ clear_user_page: /* %o0=dest, %o1=vaddr */ | |||
66 | wrpr %o4, PSTATE_IE, %pstate | 63 | wrpr %o4, PSTATE_IE, %pstate |
67 | stxa %o0, [%g3] ASI_DMMU | 64 | stxa %o0, [%g3] ASI_DMMU |
68 | stxa %g1, [%g0] ASI_DTLB_DATA_IN | 65 | stxa %g1, [%g0] ASI_DTLB_DATA_IN |
69 | flush %g6 | 66 | sethi %hi(KERNBASE), %g1 |
67 | flush %g1 | ||
70 | wrpr %o4, 0x0, %pstate | 68 | wrpr %o4, 0x0, %pstate |
71 | 69 | ||
72 | mov 1, %o4 | 70 | mov 1, %o4 |
diff --git a/arch/sparc64/lib/copy_page.S b/arch/sparc64/lib/copy_page.S index feebb14fd27a..37460666a5c3 100644 --- a/arch/sparc64/lib/copy_page.S +++ b/arch/sparc64/lib/copy_page.S | |||
@@ -23,8 +23,6 @@ | |||
23 | * disable preemption during the clear. | 23 | * disable preemption during the clear. |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS) | ||
27 | #define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W) | ||
28 | #define DCACHE_SIZE (PAGE_SIZE * 2) | 26 | #define DCACHE_SIZE (PAGE_SIZE * 2) |
29 | 27 | ||
30 | #if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19) | 28 | #if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19) |
@@ -52,13 +50,12 @@ copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ | |||
52 | sethi %hi(PAGE_SIZE), %o3 | 50 | sethi %hi(PAGE_SIZE), %o3 |
53 | 51 | ||
54 | sllx %g2, 32, %g2 | 52 | sllx %g2, 32, %g2 |
55 | sethi %uhi(TTE_BITS_TOP), %g3 | 53 | sethi %hi(PAGE_KERNEL_LOCKED), %g3 |
56 | 54 | ||
57 | sllx %g3, 32, %g3 | 55 | ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3 |
58 | sub %o0, %g2, %g1 ! dest paddr | 56 | sub %o0, %g2, %g1 ! dest paddr |
59 | 57 | ||
60 | sub %o1, %g2, %g2 ! src paddr | 58 | sub %o1, %g2, %g2 ! src paddr |
61 | or %g3, TTE_BITS_BOTTOM, %g3 | ||
62 | 59 | ||
63 | and %o2, %o3, %o0 ! vaddr D-cache alias bit | 60 | and %o2, %o3, %o0 ! vaddr D-cache alias bit |
64 | or %g1, %g3, %g1 ! dest TTE data | 61 | or %g1, %g3, %g1 ! dest TTE data |
diff --git a/arch/sparc64/lib/delay.c b/arch/sparc64/lib/delay.c index e8808727617a..fb27e54a03ee 100644 --- a/arch/sparc64/lib/delay.c +++ b/arch/sparc64/lib/delay.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* delay.c: Delay loops for sparc64 | 1 | /* delay.c: Delay loops for sparc64 |
2 | * | 2 | * |
3 | * Copyright (C) 2004 David S. Miller <davem@redhat.com> | 3 | * Copyright (C) 2004, 2006 David S. Miller <davem@davemloft.net> |
4 | * | 4 | * |
5 | * Based heavily upon x86 variant which is: | 5 | * Based heavily upon x86 variant which is: |
6 | * Copyright (C) 1993 Linus Torvalds | 6 | * Copyright (C) 1993 Linus Torvalds |
@@ -8,19 +8,16 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/delay.h> | 10 | #include <linux/delay.h> |
11 | #include <asm/timer.h> | ||
11 | 12 | ||
12 | void __delay(unsigned long loops) | 13 | void __delay(unsigned long loops) |
13 | { | 14 | { |
14 | __asm__ __volatile__( | 15 | unsigned long bclock, now; |
15 | " b,pt %%xcc, 1f\n" | 16 | |
16 | " cmp %0, 0\n" | 17 | bclock = tick_ops->get_tick(); |
17 | " .align 32\n" | 18 | do { |
18 | "1:\n" | 19 | now = tick_ops->get_tick(); |
19 | " bne,pt %%xcc, 1b\n" | 20 | } while ((now-bclock) < loops); |
20 | " subcc %0, 1, %0\n" | ||
21 | : "=&r" (loops) | ||
22 | : "0" (loops) | ||
23 | : "cc"); | ||
24 | } | 21 | } |
25 | 22 | ||
26 | /* We used to multiply by HZ after shifting down by 32 bits | 23 | /* We used to multiply by HZ after shifting down by 32 bits |
diff --git a/arch/sparc64/lib/xor.S b/arch/sparc64/lib/xor.S index 4cd5d2be1ae1..a79c8888170d 100644 --- a/arch/sparc64/lib/xor.S +++ b/arch/sparc64/lib/xor.S | |||
@@ -2,9 +2,10 @@ | |||
2 | * arch/sparc64/lib/xor.S | 2 | * arch/sparc64/lib/xor.S |
3 | * | 3 | * |
4 | * High speed xor_block operation for RAID4/5 utilizing the | 4 | * High speed xor_block operation for RAID4/5 utilizing the |
5 | * UltraSparc Visual Instruction Set. | 5 | * UltraSparc Visual Instruction Set and Niagara store-init/twin-load. |
6 | * | 6 | * |
7 | * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) | 7 | * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) |
8 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> | ||
8 | */ | 9 | */ |
9 | 10 | ||
10 | #include <asm/visasm.h> | 11 | #include <asm/visasm.h> |
@@ -19,6 +20,8 @@ | |||
19 | */ | 20 | */ |
20 | .text | 21 | .text |
21 | .align 32 | 22 | .align 32 |
23 | |||
24 | /* VIS versions. */ | ||
22 | .globl xor_vis_2 | 25 | .globl xor_vis_2 |
23 | .type xor_vis_2,#function | 26 | .type xor_vis_2,#function |
24 | xor_vis_2: | 27 | xor_vis_2: |
@@ -352,3 +355,298 @@ xor_vis_5: | |||
352 | ret | 355 | ret |
353 | restore | 356 | restore |
354 | .size xor_vis_5, .-xor_vis_5 | 357 | .size xor_vis_5, .-xor_vis_5 |
358 | |||
359 | /* Niagara versions. */ | ||
360 | .globl xor_niagara_2 | ||
361 | .type xor_niagara_2,#function | ||
362 | xor_niagara_2: /* %o0=bytes, %o1=dest, %o2=src */ | ||
363 | save %sp, -192, %sp | ||
364 | prefetch [%i1], #n_writes | ||
365 | prefetch [%i2], #one_read | ||
366 | rd %asi, %g7 | ||
367 | wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi | ||
368 | srlx %i0, 6, %g1 | ||
369 | mov %i1, %i0 | ||
370 | mov %i2, %i1 | ||
371 | 1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src + 0x00 */ | ||
372 | ldda [%i1 + 0x10] %asi, %i4 /* %i4/%i5 = src + 0x10 */ | ||
373 | ldda [%i1 + 0x20] %asi, %g2 /* %g2/%g3 = src + 0x20 */ | ||
374 | ldda [%i1 + 0x30] %asi, %l0 /* %l0/%l1 = src + 0x30 */ | ||
375 | prefetch [%i1 + 0x40], #one_read | ||
376 | ldda [%i0 + 0x00] %asi, %o0 /* %o0/%o1 = dest + 0x00 */ | ||
377 | ldda [%i0 + 0x10] %asi, %o2 /* %o2/%o3 = dest + 0x10 */ | ||
378 | ldda [%i0 + 0x20] %asi, %o4 /* %o4/%o5 = dest + 0x20 */ | ||
379 | ldda [%i0 + 0x30] %asi, %l2 /* %l2/%l3 = dest + 0x30 */ | ||
380 | prefetch [%i0 + 0x40], #n_writes | ||
381 | xor %o0, %i2, %o0 | ||
382 | xor %o1, %i3, %o1 | ||
383 | stxa %o0, [%i0 + 0x00] %asi | ||
384 | stxa %o1, [%i0 + 0x08] %asi | ||
385 | xor %o2, %i4, %o2 | ||
386 | xor %o3, %i5, %o3 | ||
387 | stxa %o2, [%i0 + 0x10] %asi | ||
388 | stxa %o3, [%i0 + 0x18] %asi | ||
389 | xor %o4, %g2, %o4 | ||
390 | xor %o5, %g3, %o5 | ||
391 | stxa %o4, [%i0 + 0x20] %asi | ||
392 | stxa %o5, [%i0 + 0x28] %asi | ||
393 | xor %l2, %l0, %l2 | ||
394 | xor %l3, %l1, %l3 | ||
395 | stxa %l2, [%i0 + 0x30] %asi | ||
396 | stxa %l3, [%i0 + 0x38] %asi | ||
397 | add %i0, 0x40, %i0 | ||
398 | subcc %g1, 1, %g1 | ||
399 | bne,pt %xcc, 1b | ||
400 | add %i1, 0x40, %i1 | ||
401 | membar #Sync | ||
402 | wr %g7, 0x0, %asi | ||
403 | ret | ||
404 | restore | ||
405 | .size xor_niagara_2, .-xor_niagara_2 | ||
406 | |||
407 | .globl xor_niagara_3 | ||
408 | .type xor_niagara_3,#function | ||
409 | xor_niagara_3: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */ | ||
410 | save %sp, -192, %sp | ||
411 | prefetch [%i1], #n_writes | ||
412 | prefetch [%i2], #one_read | ||
413 | prefetch [%i3], #one_read | ||
414 | rd %asi, %g7 | ||
415 | wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi | ||
416 | srlx %i0, 6, %g1 | ||
417 | mov %i1, %i0 | ||
418 | mov %i2, %i1 | ||
419 | mov %i3, %l7 | ||
420 | 1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ | ||
421 | ldda [%i1 + 0x10] %asi, %i4 /* %i4/%i5 = src1 + 0x10 */ | ||
422 | ldda [%l7 + 0x00] %asi, %g2 /* %g2/%g3 = src2 + 0x00 */ | ||
423 | ldda [%l7 + 0x10] %asi, %l0 /* %l0/%l1 = src2 + 0x10 */ | ||
424 | ldda [%i0 + 0x00] %asi, %o0 /* %o0/%o1 = dest + 0x00 */ | ||
425 | ldda [%i0 + 0x10] %asi, %o2 /* %o2/%o3 = dest + 0x10 */ | ||
426 | xor %g2, %i2, %g2 | ||
427 | xor %g3, %i3, %g3 | ||
428 | xor %o0, %g2, %o0 | ||
429 | xor %o1, %g3, %o1 | ||
430 | stxa %o0, [%i0 + 0x00] %asi | ||
431 | stxa %o1, [%i0 + 0x08] %asi | ||
432 | ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ | ||
433 | ldda [%l7 + 0x20] %asi, %g2 /* %g2/%g3 = src2 + 0x20 */ | ||
434 | ldda [%i0 + 0x20] %asi, %o0 /* %o0/%o1 = dest + 0x20 */ | ||
435 | xor %l0, %i4, %l0 | ||
436 | xor %l1, %i5, %l1 | ||
437 | xor %o2, %l0, %o2 | ||
438 | xor %o3, %l1, %o3 | ||
439 | stxa %o2, [%i0 + 0x10] %asi | ||
440 | stxa %o3, [%i0 + 0x18] %asi | ||
441 | ldda [%i1 + 0x30] %asi, %i4 /* %i4/%i5 = src1 + 0x30 */ | ||
442 | ldda [%l7 + 0x30] %asi, %l0 /* %l0/%l1 = src2 + 0x30 */ | ||
443 | ldda [%i0 + 0x30] %asi, %o2 /* %o2/%o3 = dest + 0x30 */ | ||
444 | prefetch [%i1 + 0x40], #one_read | ||
445 | prefetch [%l7 + 0x40], #one_read | ||
446 | prefetch [%i0 + 0x40], #n_writes | ||
447 | xor %g2, %i2, %g2 | ||
448 | xor %g3, %i3, %g3 | ||
449 | xor %o0, %g2, %o0 | ||
450 | xor %o1, %g3, %o1 | ||
451 | stxa %o0, [%i0 + 0x20] %asi | ||
452 | stxa %o1, [%i0 + 0x28] %asi | ||
453 | xor %l0, %i4, %l0 | ||
454 | xor %l1, %i5, %l1 | ||
455 | xor %o2, %l0, %o2 | ||
456 | xor %o3, %l1, %o3 | ||
457 | stxa %o2, [%i0 + 0x30] %asi | ||
458 | stxa %o3, [%i0 + 0x38] %asi | ||
459 | add %i0, 0x40, %i0 | ||
460 | add %i1, 0x40, %i1 | ||
461 | subcc %g1, 1, %g1 | ||
462 | bne,pt %xcc, 1b | ||
463 | add %l7, 0x40, %l7 | ||
464 | membar #Sync | ||
465 | wr %g7, 0x0, %asi | ||
466 | ret | ||
467 | restore | ||
468 | .size xor_niagara_3, .-xor_niagara_3 | ||
469 | |||
470 | .globl xor_niagara_4 | ||
471 | .type xor_niagara_4,#function | ||
472 | xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */ | ||
473 | save %sp, -192, %sp | ||
474 | prefetch [%i1], #n_writes | ||
475 | prefetch [%i2], #one_read | ||
476 | prefetch [%i3], #one_read | ||
477 | prefetch [%i4], #one_read | ||
478 | rd %asi, %g7 | ||
479 | wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi | ||
480 | srlx %i0, 6, %g1 | ||
481 | mov %i1, %i0 | ||
482 | mov %i2, %i1 | ||
483 | mov %i3, %l7 | ||
484 | mov %i4, %l6 | ||
485 | 1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ | ||
486 | ldda [%l7 + 0x00] %asi, %i4 /* %i4/%i5 = src2 + 0x00 */ | ||
487 | ldda [%l6 + 0x00] %asi, %g2 /* %g2/%g3 = src3 + 0x00 */ | ||
488 | ldda [%i0 + 0x00] %asi, %l0 /* %l0/%l1 = dest + 0x00 */ | ||
489 | xor %i4, %i2, %i4 | ||
490 | xor %i5, %i3, %i5 | ||
491 | ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */ | ||
492 | xor %g2, %i4, %g2 | ||
493 | xor %g3, %i5, %g3 | ||
494 | ldda [%i7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */ | ||
495 | xor %l0, %g2, %l0 | ||
496 | xor %l1, %g3, %l1 | ||
497 | stxa %l0, [%i0 + 0x00] %asi | ||
498 | stxa %l1, [%i0 + 0x08] %asi | ||
499 | ldda [%i6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */ | ||
500 | ldda [%i0 + 0x10] %asi, %l0 /* %l0/%l1 = dest + 0x10 */ | ||
501 | |||
502 | xor %i4, %i2, %i4 | ||
503 | xor %i5, %i3, %i5 | ||
504 | ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ | ||
505 | xor %g2, %i4, %g2 | ||
506 | xor %g3, %i5, %g3 | ||
507 | ldda [%i7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */ | ||
508 | xor %l0, %g2, %l0 | ||
509 | xor %l1, %g3, %l1 | ||
510 | stxa %l0, [%i0 + 0x10] %asi | ||
511 | stxa %l1, [%i0 + 0x18] %asi | ||
512 | ldda [%i6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */ | ||
513 | ldda [%i0 + 0x20] %asi, %l0 /* %l0/%l1 = dest + 0x20 */ | ||
514 | |||
515 | xor %i4, %i2, %i4 | ||
516 | xor %i5, %i3, %i5 | ||
517 | ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */ | ||
518 | xor %g2, %i4, %g2 | ||
519 | xor %g3, %i5, %g3 | ||
520 | ldda [%i7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */ | ||
521 | xor %l0, %g2, %l0 | ||
522 | xor %l1, %g3, %l1 | ||
523 | stxa %l0, [%i0 + 0x20] %asi | ||
524 | stxa %l1, [%i0 + 0x28] %asi | ||
525 | ldda [%i6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */ | ||
526 | ldda [%i0 + 0x30] %asi, %l0 /* %l0/%l1 = dest + 0x30 */ | ||
527 | |||
528 | prefetch [%i1 + 0x40], #one_read | ||
529 | prefetch [%l7 + 0x40], #one_read | ||
530 | prefetch [%l6 + 0x40], #one_read | ||
531 | prefetch [%i0 + 0x40], #n_writes | ||
532 | |||
533 | xor %i4, %i2, %i4 | ||
534 | xor %i5, %i3, %i5 | ||
535 | xor %g2, %i4, %g2 | ||
536 | xor %g3, %i5, %g3 | ||
537 | xor %l0, %g2, %l0 | ||
538 | xor %l1, %g3, %l1 | ||
539 | stxa %l0, [%i0 + 0x30] %asi | ||
540 | stxa %l1, [%i0 + 0x38] %asi | ||
541 | |||
542 | add %i0, 0x40, %i0 | ||
543 | add %i1, 0x40, %i1 | ||
544 | add %l7, 0x40, %l7 | ||
545 | subcc %g1, 1, %g1 | ||
546 | bne,pt %xcc, 1b | ||
547 | add %l6, 0x40, %l6 | ||
548 | membar #Sync | ||
549 | wr %g7, 0x0, %asi | ||
550 | ret | ||
551 | restore | ||
552 | .size xor_niagara_4, .-xor_niagara_4 | ||
553 | |||
554 | .globl xor_niagara_5 | ||
555 | .type xor_niagara_5,#function | ||
556 | xor_niagara_5: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */ | ||
557 | save %sp, -192, %sp | ||
558 | prefetch [%i1], #n_writes | ||
559 | prefetch [%i2], #one_read | ||
560 | prefetch [%i3], #one_read | ||
561 | prefetch [%i4], #one_read | ||
562 | prefetch [%i5], #one_read | ||
563 | rd %asi, %g7 | ||
564 | wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi | ||
565 | srlx %i0, 6, %g1 | ||
566 | mov %i1, %i0 | ||
567 | mov %i2, %i1 | ||
568 | mov %i3, %l7 | ||
569 | mov %i4, %l6 | ||
570 | mov %i5, %l5 | ||
571 | 1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ | ||
572 | ldda [%l7 + 0x00] %asi, %i4 /* %i4/%i5 = src2 + 0x00 */ | ||
573 | ldda [%l6 + 0x00] %asi, %g2 /* %g2/%g3 = src3 + 0x00 */ | ||
574 | ldda [%l5 + 0x00] %asi, %l0 /* %l0/%l1 = src4 + 0x00 */ | ||
575 | ldda [%i0 + 0x00] %asi, %l2 /* %l2/%l3 = dest + 0x00 */ | ||
576 | xor %i4, %i2, %i4 | ||
577 | xor %i5, %i3, %i5 | ||
578 | ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */ | ||
579 | xor %g2, %i4, %g2 | ||
580 | xor %g3, %i5, %g3 | ||
581 | ldda [%l7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */ | ||
582 | xor %l0, %g2, %l0 | ||
583 | xor %l1, %g3, %l1 | ||
584 | ldda [%l6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */ | ||
585 | xor %l2, %l0, %l2 | ||
586 | xor %l3, %l1, %l3 | ||
587 | stxa %l2, [%i0 + 0x00] %asi | ||
588 | stxa %l3, [%i0 + 0x08] %asi | ||
589 | ldda [%l5 + 0x10] %asi, %l0 /* %l0/%l1 = src4 + 0x10 */ | ||
590 | ldda [%i0 + 0x10] %asi, %l2 /* %l2/%l3 = dest + 0x10 */ | ||
591 | |||
592 | xor %i4, %i2, %i4 | ||
593 | xor %i5, %i3, %i5 | ||
594 | ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ | ||
595 | xor %g2, %i4, %g2 | ||
596 | xor %g3, %i5, %g3 | ||
597 | ldda [%l7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */ | ||
598 | xor %l0, %g2, %l0 | ||
599 | xor %l1, %g3, %l1 | ||
600 | ldda [%l6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */ | ||
601 | xor %l2, %l0, %l2 | ||
602 | xor %l3, %l1, %l3 | ||
603 | stxa %l2, [%i0 + 0x10] %asi | ||
604 | stxa %l3, [%i0 + 0x18] %asi | ||
605 | ldda [%l5 + 0x20] %asi, %l0 /* %l0/%l1 = src4 + 0x20 */ | ||
606 | ldda [%i0 + 0x20] %asi, %l2 /* %l2/%l3 = dest + 0x20 */ | ||
607 | |||
608 | xor %i4, %i2, %i4 | ||
609 | xor %i5, %i3, %i5 | ||
610 | ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */ | ||
611 | xor %g2, %i4, %g2 | ||
612 | xor %g3, %i5, %g3 | ||
613 | ldda [%l7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */ | ||
614 | xor %l0, %g2, %l0 | ||
615 | xor %l1, %g3, %l1 | ||
616 | ldda [%l6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */ | ||
617 | xor %l2, %l0, %l2 | ||
618 | xor %l3, %l1, %l3 | ||
619 | stxa %l2, [%i0 + 0x20] %asi | ||
620 | stxa %l3, [%i0 + 0x28] %asi | ||
621 | ldda [%l5 + 0x30] %asi, %l0 /* %l0/%l1 = src4 + 0x30 */ | ||
622 | ldda [%i0 + 0x30] %asi, %l2 /* %l2/%l3 = dest + 0x30 */ | ||
623 | |||
624 | prefetch [%i1 + 0x40], #one_read | ||
625 | prefetch [%l7 + 0x40], #one_read | ||
626 | prefetch [%l6 + 0x40], #one_read | ||
627 | prefetch [%l5 + 0x40], #one_read | ||
628 | prefetch [%i0 + 0x40], #n_writes | ||
629 | |||
630 | xor %i4, %i2, %i4 | ||
631 | xor %i5, %i3, %i5 | ||
632 | xor %g2, %i4, %g2 | ||
633 | xor %g3, %i5, %g3 | ||
634 | xor %l0, %g2, %l0 | ||
635 | xor %l1, %g3, %l1 | ||
636 | xor %l2, %l0, %l2 | ||
637 | xor %l3, %l1, %l3 | ||
638 | stxa %l2, [%i0 + 0x30] %asi | ||
639 | stxa %l3, [%i0 + 0x38] %asi | ||
640 | |||
641 | add %i0, 0x40, %i0 | ||
642 | add %i1, 0x40, %i1 | ||
643 | add %l7, 0x40, %l7 | ||
644 | add %l6, 0x40, %l6 | ||
645 | subcc %g1, 1, %g1 | ||
646 | bne,pt %xcc, 1b | ||
647 | add %l5, 0x40, %l5 | ||
648 | membar #Sync | ||
649 | wr %g7, 0x0, %asi | ||
650 | ret | ||
651 | restore | ||
652 | .size xor_niagara_5, .-xor_niagara_5 | ||
diff --git a/arch/sparc64/math-emu/math.c b/arch/sparc64/math-emu/math.c index 2ae05cd7b773..6ee496c2864a 100644 --- a/arch/sparc64/math-emu/math.c +++ b/arch/sparc64/math-emu/math.c | |||
@@ -206,9 +206,29 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f) | |||
206 | case FSTOQ: TYPE(3,3,1,1,1,0,0); break; | 206 | case FSTOQ: TYPE(3,3,1,1,1,0,0); break; |
207 | case FDTOQ: TYPE(3,3,1,2,1,0,0); break; | 207 | case FDTOQ: TYPE(3,3,1,2,1,0,0); break; |
208 | case FQTOI: TYPE(3,1,0,3,1,0,0); break; | 208 | case FQTOI: TYPE(3,1,0,3,1,0,0); break; |
209 | |||
210 | /* We can get either unimplemented or unfinished | ||
211 | * for these cases. Pre-Niagara systems generate | ||
212 | * unfinished fpop for SUBNORMAL cases, and Niagara | ||
213 | * always gives unimplemented fpop for fsqrt{s,d}. | ||
214 | */ | ||
215 | case FSQRTS: { | ||
216 | unsigned long x = current_thread_info()->xfsr[0]; | ||
217 | |||
218 | x = (x >> 14) & 0xf; | ||
219 | TYPE(x,1,1,1,1,0,0); | ||
220 | break; | ||
221 | } | ||
222 | |||
223 | case FSQRTD: { | ||
224 | unsigned long x = current_thread_info()->xfsr[0]; | ||
225 | |||
226 | x = (x >> 14) & 0xf; | ||
227 | TYPE(x,2,1,2,1,0,0); | ||
228 | break; | ||
229 | } | ||
230 | |||
209 | /* SUBNORMAL - ftt == 2 */ | 231 | /* SUBNORMAL - ftt == 2 */ |
210 | case FSQRTS: TYPE(2,1,1,1,1,0,0); break; | ||
211 | case FSQRTD: TYPE(2,2,1,2,1,0,0); break; | ||
212 | case FADDD: | 232 | case FADDD: |
213 | case FSUBD: | 233 | case FSUBD: |
214 | case FMULD: | 234 | case FMULD: |
diff --git a/arch/sparc64/mm/Makefile b/arch/sparc64/mm/Makefile index 9d0960e69f48..e415bf942bcd 100644 --- a/arch/sparc64/mm/Makefile +++ b/arch/sparc64/mm/Makefile | |||
@@ -5,6 +5,6 @@ | |||
5 | EXTRA_AFLAGS := -ansi | 5 | EXTRA_AFLAGS := -ansi |
6 | EXTRA_CFLAGS := -Werror | 6 | EXTRA_CFLAGS := -Werror |
7 | 7 | ||
8 | obj-y := ultra.o tlb.o fault.o init.o generic.o | 8 | obj-y := ultra.o tlb.o tsb.o fault.o init.o generic.o |
9 | 9 | ||
10 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 10 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index 6f0539aa44d0..63b6cc0cd5d5 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/lsu.h> | 29 | #include <asm/lsu.h> |
30 | #include <asm/sections.h> | 30 | #include <asm/sections.h> |
31 | #include <asm/kdebug.h> | 31 | #include <asm/kdebug.h> |
32 | #include <asm/mmu_context.h> | ||
32 | 33 | ||
33 | /* | 34 | /* |
34 | * To debug kernel to catch accesses to certain virtual/physical addresses. | 35 | * To debug kernel to catch accesses to certain virtual/physical addresses. |
@@ -91,12 +92,13 @@ static void __kprobes unhandled_fault(unsigned long address, | |||
91 | die_if_kernel("Oops", regs); | 92 | die_if_kernel("Oops", regs); |
92 | } | 93 | } |
93 | 94 | ||
94 | static void bad_kernel_pc(struct pt_regs *regs) | 95 | static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) |
95 | { | 96 | { |
96 | unsigned long *ksp; | 97 | unsigned long *ksp; |
97 | 98 | ||
98 | printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", | 99 | printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", |
99 | regs->tpc); | 100 | regs->tpc); |
101 | printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); | ||
100 | __asm__("mov %%sp, %0" : "=r" (ksp)); | 102 | __asm__("mov %%sp, %0" : "=r" (ksp)); |
101 | show_stack(current, ksp); | 103 | show_stack(current, ksp); |
102 | unhandled_fault(regs->tpc, current, regs); | 104 | unhandled_fault(regs->tpc, current, regs); |
@@ -137,7 +139,7 @@ static unsigned int get_user_insn(unsigned long tpc) | |||
137 | if (!pte_present(pte)) | 139 | if (!pte_present(pte)) |
138 | goto out; | 140 | goto out; |
139 | 141 | ||
140 | pa = (pte_val(pte) & _PAGE_PADDR); | 142 | pa = (pte_pfn(pte) << PAGE_SHIFT); |
141 | pa += (tpc & ~PAGE_MASK); | 143 | pa += (tpc & ~PAGE_MASK); |
142 | 144 | ||
143 | /* Use phys bypass so we don't pollute dtlb/dcache. */ | 145 | /* Use phys bypass so we don't pollute dtlb/dcache. */ |
@@ -257,7 +259,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) | |||
257 | struct vm_area_struct *vma; | 259 | struct vm_area_struct *vma; |
258 | unsigned int insn = 0; | 260 | unsigned int insn = 0; |
259 | int si_code, fault_code; | 261 | int si_code, fault_code; |
260 | unsigned long address; | 262 | unsigned long address, mm_rss; |
261 | 263 | ||
262 | fault_code = get_thread_fault_code(); | 264 | fault_code = get_thread_fault_code(); |
263 | 265 | ||
@@ -280,7 +282,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) | |||
280 | (tpc >= MODULES_VADDR && tpc < MODULES_END)) { | 282 | (tpc >= MODULES_VADDR && tpc < MODULES_END)) { |
281 | /* Valid, no problems... */ | 283 | /* Valid, no problems... */ |
282 | } else { | 284 | } else { |
283 | bad_kernel_pc(regs); | 285 | bad_kernel_pc(regs, address); |
284 | return; | 286 | return; |
285 | } | 287 | } |
286 | } | 288 | } |
@@ -406,6 +408,11 @@ good_area: | |||
406 | } | 408 | } |
407 | 409 | ||
408 | up_read(&mm->mmap_sem); | 410 | up_read(&mm->mmap_sem); |
411 | |||
412 | mm_rss = get_mm_rss(mm); | ||
413 | if (unlikely(mm_rss >= mm->context.tsb_rss_limit)) | ||
414 | tsb_grow(mm, mm_rss); | ||
415 | |||
409 | return; | 416 | return; |
410 | 417 | ||
411 | /* | 418 | /* |
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c index 580b63da836b..5fc5c579e35e 100644 --- a/arch/sparc64/mm/generic.c +++ b/arch/sparc64/mm/generic.c | |||
@@ -15,15 +15,6 @@ | |||
15 | #include <asm/page.h> | 15 | #include <asm/page.h> |
16 | #include <asm/tlbflush.h> | 16 | #include <asm/tlbflush.h> |
17 | 17 | ||
18 | static inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space) | ||
19 | { | ||
20 | pte_t pte; | ||
21 | pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E) & | ||
22 | ~(unsigned long)_PAGE_CACHE); | ||
23 | pte_val(pte) |= (((unsigned long)space) << 32); | ||
24 | return pte; | ||
25 | } | ||
26 | |||
27 | /* Remap IO memory, the same way as remap_pfn_range(), but use | 18 | /* Remap IO memory, the same way as remap_pfn_range(), but use |
28 | * the obio memory space. | 19 | * the obio memory space. |
29 | * | 20 | * |
@@ -48,24 +39,29 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, | |||
48 | pte_t entry; | 39 | pte_t entry; |
49 | unsigned long curend = address + PAGE_SIZE; | 40 | unsigned long curend = address + PAGE_SIZE; |
50 | 41 | ||
51 | entry = mk_pte_io(offset, prot, space); | 42 | entry = mk_pte_io(offset, prot, space, PAGE_SIZE); |
52 | if (!(address & 0xffff)) { | 43 | if (!(address & 0xffff)) { |
53 | if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) { | 44 | if (PAGE_SIZE < (4 * 1024 * 1024) && |
54 | entry = mk_pte_io(offset, | 45 | !(address & 0x3fffff) && |
55 | __pgprot(pgprot_val (prot) | _PAGE_SZ4MB), | 46 | !(offset & 0x3ffffe) && |
56 | space); | 47 | end >= address + 0x400000) { |
48 | entry = mk_pte_io(offset, prot, space, | ||
49 | 4 * 1024 * 1024); | ||
57 | curend = address + 0x400000; | 50 | curend = address + 0x400000; |
58 | offset += 0x400000; | 51 | offset += 0x400000; |
59 | } else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) { | 52 | } else if (PAGE_SIZE < (512 * 1024) && |
60 | entry = mk_pte_io(offset, | 53 | !(address & 0x7ffff) && |
61 | __pgprot(pgprot_val (prot) | _PAGE_SZ512K), | 54 | !(offset & 0x7fffe) && |
62 | space); | 55 | end >= address + 0x80000) { |
56 | entry = mk_pte_io(offset, prot, space, | ||
57 | 512 * 1024 * 1024); | ||
63 | curend = address + 0x80000; | 58 | curend = address + 0x80000; |
64 | offset += 0x80000; | 59 | offset += 0x80000; |
65 | } else if (!(offset & 0xfffe) && end >= address + 0x10000) { | 60 | } else if (PAGE_SIZE < (64 * 1024) && |
66 | entry = mk_pte_io(offset, | 61 | !(offset & 0xfffe) && |
67 | __pgprot(pgprot_val (prot) | _PAGE_SZ64K), | 62 | end >= address + 0x10000) { |
68 | space); | 63 | entry = mk_pte_io(offset, prot, space, |
64 | 64 * 1024); | ||
69 | curend = address + 0x10000; | 65 | curend = address + 0x10000; |
70 | offset += 0x10000; | 66 | offset += 0x10000; |
71 | } else | 67 | } else |
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c index 625cbb336a23..a7a24869d045 100644 --- a/arch/sparc64/mm/hugetlbpage.c +++ b/arch/sparc64/mm/hugetlbpage.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * SPARC64 Huge TLB page support. | 2 | * SPARC64 Huge TLB page support. |
3 | * | 3 | * |
4 | * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com) | 4 | * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net) |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/config.h> | 7 | #include <linux/config.h> |
@@ -22,6 +22,175 @@ | |||
22 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
23 | #include <asm/mmu_context.h> | 23 | #include <asm/mmu_context.h> |
24 | 24 | ||
25 | /* Slightly simplified from the non-hugepage variant because by | ||
26 | * definition we don't have to worry about any page coloring stuff | ||
27 | */ | ||
28 | #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL)) | ||
29 | #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL)) | ||
30 | |||
31 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, | ||
32 | unsigned long addr, | ||
33 | unsigned long len, | ||
34 | unsigned long pgoff, | ||
35 | unsigned long flags) | ||
36 | { | ||
37 | struct mm_struct *mm = current->mm; | ||
38 | struct vm_area_struct * vma; | ||
39 | unsigned long task_size = TASK_SIZE; | ||
40 | unsigned long start_addr; | ||
41 | |||
42 | if (test_thread_flag(TIF_32BIT)) | ||
43 | task_size = STACK_TOP32; | ||
44 | if (unlikely(len >= VA_EXCLUDE_START)) | ||
45 | return -ENOMEM; | ||
46 | |||
47 | if (len > mm->cached_hole_size) { | ||
48 | start_addr = addr = mm->free_area_cache; | ||
49 | } else { | ||
50 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
51 | mm->cached_hole_size = 0; | ||
52 | } | ||
53 | |||
54 | task_size -= len; | ||
55 | |||
56 | full_search: | ||
57 | addr = ALIGN(addr, HPAGE_SIZE); | ||
58 | |||
59 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | ||
60 | /* At this point: (!vma || addr < vma->vm_end). */ | ||
61 | if (addr < VA_EXCLUDE_START && | ||
62 | (addr + len) >= VA_EXCLUDE_START) { | ||
63 | addr = VA_EXCLUDE_END; | ||
64 | vma = find_vma(mm, VA_EXCLUDE_END); | ||
65 | } | ||
66 | if (unlikely(task_size < addr)) { | ||
67 | if (start_addr != TASK_UNMAPPED_BASE) { | ||
68 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
69 | mm->cached_hole_size = 0; | ||
70 | goto full_search; | ||
71 | } | ||
72 | return -ENOMEM; | ||
73 | } | ||
74 | if (likely(!vma || addr + len <= vma->vm_start)) { | ||
75 | /* | ||
76 | * Remember the place where we stopped the search: | ||
77 | */ | ||
78 | mm->free_area_cache = addr + len; | ||
79 | return addr; | ||
80 | } | ||
81 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
82 | mm->cached_hole_size = vma->vm_start - addr; | ||
83 | |||
84 | addr = ALIGN(vma->vm_end, HPAGE_SIZE); | ||
85 | } | ||
86 | } | ||
87 | |||
88 | static unsigned long | ||
89 | hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | ||
90 | const unsigned long len, | ||
91 | const unsigned long pgoff, | ||
92 | const unsigned long flags) | ||
93 | { | ||
94 | struct vm_area_struct *vma; | ||
95 | struct mm_struct *mm = current->mm; | ||
96 | unsigned long addr = addr0; | ||
97 | |||
98 | /* This should only ever run for 32-bit processes. */ | ||
99 | BUG_ON(!test_thread_flag(TIF_32BIT)); | ||
100 | |||
101 | /* check if free_area_cache is useful for us */ | ||
102 | if (len <= mm->cached_hole_size) { | ||
103 | mm->cached_hole_size = 0; | ||
104 | mm->free_area_cache = mm->mmap_base; | ||
105 | } | ||
106 | |||
107 | /* either no address requested or can't fit in requested address hole */ | ||
108 | addr = mm->free_area_cache & HPAGE_MASK; | ||
109 | |||
110 | /* make sure it can fit in the remaining address space */ | ||
111 | if (likely(addr > len)) { | ||
112 | vma = find_vma(mm, addr-len); | ||
113 | if (!vma || addr <= vma->vm_start) { | ||
114 | /* remember the address as a hint for next time */ | ||
115 | return (mm->free_area_cache = addr-len); | ||
116 | } | ||
117 | } | ||
118 | |||
119 | if (unlikely(mm->mmap_base < len)) | ||
120 | goto bottomup; | ||
121 | |||
122 | addr = (mm->mmap_base-len) & HPAGE_MASK; | ||
123 | |||
124 | do { | ||
125 | /* | ||
126 | * Lookup failure means no vma is above this address, | ||
127 | * else if new region fits below vma->vm_start, | ||
128 | * return with success: | ||
129 | */ | ||
130 | vma = find_vma(mm, addr); | ||
131 | if (likely(!vma || addr+len <= vma->vm_start)) { | ||
132 | /* remember the address as a hint for next time */ | ||
133 | return (mm->free_area_cache = addr); | ||
134 | } | ||
135 | |||
136 | /* remember the largest hole we saw so far */ | ||
137 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
138 | mm->cached_hole_size = vma->vm_start - addr; | ||
139 | |||
140 | /* try just below the current vma->vm_start */ | ||
141 | addr = (vma->vm_start-len) & HPAGE_MASK; | ||
142 | } while (likely(len < vma->vm_start)); | ||
143 | |||
144 | bottomup: | ||
145 | /* | ||
146 | * A failed mmap() very likely causes application failure, | ||
147 | * so fall back to the bottom-up function here. This scenario | ||
148 | * can happen with large stack limits and large mmap() | ||
149 | * allocations. | ||
150 | */ | ||
151 | mm->cached_hole_size = ~0UL; | ||
152 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
153 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); | ||
154 | /* | ||
155 | * Restore the topdown base: | ||
156 | */ | ||
157 | mm->free_area_cache = mm->mmap_base; | ||
158 | mm->cached_hole_size = ~0UL; | ||
159 | |||
160 | return addr; | ||
161 | } | ||
162 | |||
163 | unsigned long | ||
164 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | ||
165 | unsigned long len, unsigned long pgoff, unsigned long flags) | ||
166 | { | ||
167 | struct mm_struct *mm = current->mm; | ||
168 | struct vm_area_struct *vma; | ||
169 | unsigned long task_size = TASK_SIZE; | ||
170 | |||
171 | if (test_thread_flag(TIF_32BIT)) | ||
172 | task_size = STACK_TOP32; | ||
173 | |||
174 | if (len & ~HPAGE_MASK) | ||
175 | return -EINVAL; | ||
176 | if (len > task_size) | ||
177 | return -ENOMEM; | ||
178 | |||
179 | if (addr) { | ||
180 | addr = ALIGN(addr, HPAGE_SIZE); | ||
181 | vma = find_vma(mm, addr); | ||
182 | if (task_size - len >= addr && | ||
183 | (!vma || addr + len <= vma->vm_start)) | ||
184 | return addr; | ||
185 | } | ||
186 | if (mm->get_unmapped_area == arch_get_unmapped_area) | ||
187 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, | ||
188 | pgoff, flags); | ||
189 | else | ||
190 | return hugetlb_get_unmapped_area_topdown(file, addr, len, | ||
191 | pgoff, flags); | ||
192 | } | ||
193 | |||
25 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | 194 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) |
26 | { | 195 | { |
27 | pgd_t *pgd; | 196 | pgd_t *pgd; |
@@ -48,12 +217,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | |||
48 | pmd_t *pmd; | 217 | pmd_t *pmd; |
49 | pte_t *pte = NULL; | 218 | pte_t *pte = NULL; |
50 | 219 | ||
220 | addr &= HPAGE_MASK; | ||
221 | |||
51 | pgd = pgd_offset(mm, addr); | 222 | pgd = pgd_offset(mm, addr); |
52 | if (pgd) { | 223 | if (!pgd_none(*pgd)) { |
53 | pud = pud_offset(pgd, addr); | 224 | pud = pud_offset(pgd, addr); |
54 | if (pud) { | 225 | if (!pud_none(*pud)) { |
55 | pmd = pmd_offset(pud, addr); | 226 | pmd = pmd_offset(pud, addr); |
56 | if (pmd) | 227 | if (!pmd_none(*pmd)) |
57 | pte = pte_offset_map(pmd, addr); | 228 | pte = pte_offset_map(pmd, addr); |
58 | } | 229 | } |
59 | } | 230 | } |
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 1e44ee26cee8..c2b556106fc1 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c | |||
@@ -6,6 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/config.h> | 8 | #include <linux/config.h> |
9 | #include <linux/module.h> | ||
9 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
10 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
11 | #include <linux/string.h> | 12 | #include <linux/string.h> |
@@ -39,9 +40,27 @@ | |||
39 | #include <asm/tlb.h> | 40 | #include <asm/tlb.h> |
40 | #include <asm/spitfire.h> | 41 | #include <asm/spitfire.h> |
41 | #include <asm/sections.h> | 42 | #include <asm/sections.h> |
43 | #include <asm/tsb.h> | ||
44 | #include <asm/hypervisor.h> | ||
42 | 45 | ||
43 | extern void device_scan(void); | 46 | extern void device_scan(void); |
44 | 47 | ||
48 | #define MAX_PHYS_ADDRESS (1UL << 42UL) | ||
49 | #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) | ||
50 | #define KPTE_BITMAP_BYTES \ | ||
51 | ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8) | ||
52 | |||
53 | unsigned long kern_linear_pte_xor[2] __read_mostly; | ||
54 | |||
55 | /* A bitmap, one bit for every 256MB of physical memory. If the bit | ||
56 | * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else | ||
57 | * if set we should use a 256MB page (via kern_linear_pte_xor[1]). | ||
58 | */ | ||
59 | unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; | ||
60 | |||
61 | /* A special kernel TSB for 4MB and 256MB linear mappings. */ | ||
62 | struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; | ||
63 | |||
45 | #define MAX_BANKS 32 | 64 | #define MAX_BANKS 32 |
46 | 65 | ||
47 | static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; | 66 | static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; |
@@ -111,11 +130,9 @@ static void __init read_obp_memory(const char *property, | |||
111 | 130 | ||
112 | unsigned long *sparc64_valid_addr_bitmap __read_mostly; | 131 | unsigned long *sparc64_valid_addr_bitmap __read_mostly; |
113 | 132 | ||
114 | /* Ugly, but necessary... -DaveM */ | 133 | /* Kernel physical address base and size in bytes. */ |
115 | unsigned long phys_base __read_mostly; | ||
116 | unsigned long kern_base __read_mostly; | 134 | unsigned long kern_base __read_mostly; |
117 | unsigned long kern_size __read_mostly; | 135 | unsigned long kern_size __read_mostly; |
118 | unsigned long pfn_base __read_mostly; | ||
119 | 136 | ||
120 | /* get_new_mmu_context() uses "cache + 1". */ | 137 | /* get_new_mmu_context() uses "cache + 1". */ |
121 | DEFINE_SPINLOCK(ctx_alloc_lock); | 138 | DEFINE_SPINLOCK(ctx_alloc_lock); |
@@ -141,24 +158,28 @@ unsigned long sparc64_kern_sec_context __read_mostly; | |||
141 | 158 | ||
142 | int bigkernel = 0; | 159 | int bigkernel = 0; |
143 | 160 | ||
144 | /* XXX Tune this... */ | 161 | kmem_cache_t *pgtable_cache __read_mostly; |
145 | #define PGT_CACHE_LOW 25 | 162 | |
146 | #define PGT_CACHE_HIGH 50 | 163 | static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) |
164 | { | ||
165 | clear_page(addr); | ||
166 | } | ||
167 | |||
168 | extern void tsb_cache_init(void); | ||
147 | 169 | ||
148 | void check_pgt_cache(void) | 170 | void pgtable_cache_init(void) |
149 | { | 171 | { |
150 | preempt_disable(); | 172 | pgtable_cache = kmem_cache_create("pgtable_cache", |
151 | if (pgtable_cache_size > PGT_CACHE_HIGH) { | 173 | PAGE_SIZE, PAGE_SIZE, |
152 | do { | 174 | SLAB_HWCACHE_ALIGN | |
153 | if (pgd_quicklist) | 175 | SLAB_MUST_HWCACHE_ALIGN, |
154 | free_pgd_slow(get_pgd_fast()); | 176 | zero_ctor, |
155 | if (pte_quicklist[0]) | 177 | NULL); |
156 | free_pte_slow(pte_alloc_one_fast(NULL, 0)); | 178 | if (!pgtable_cache) { |
157 | if (pte_quicklist[1]) | 179 | prom_printf("Could not create pgtable_cache\n"); |
158 | free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10))); | 180 | prom_halt(); |
159 | } while (pgtable_cache_size > PGT_CACHE_LOW); | ||
160 | } | 181 | } |
161 | preempt_enable(); | 182 | tsb_cache_init(); |
162 | } | 183 | } |
163 | 184 | ||
164 | #ifdef CONFIG_DEBUG_DCFLUSH | 185 | #ifdef CONFIG_DEBUG_DCFLUSH |
@@ -168,8 +189,9 @@ atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); | |||
168 | #endif | 189 | #endif |
169 | #endif | 190 | #endif |
170 | 191 | ||
171 | __inline__ void flush_dcache_page_impl(struct page *page) | 192 | inline void flush_dcache_page_impl(struct page *page) |
172 | { | 193 | { |
194 | BUG_ON(tlb_type == hypervisor); | ||
173 | #ifdef CONFIG_DEBUG_DCFLUSH | 195 | #ifdef CONFIG_DEBUG_DCFLUSH |
174 | atomic_inc(&dcpage_flushes); | 196 | atomic_inc(&dcpage_flushes); |
175 | #endif | 197 | #endif |
@@ -186,8 +208,8 @@ __inline__ void flush_dcache_page_impl(struct page *page) | |||
186 | } | 208 | } |
187 | 209 | ||
188 | #define PG_dcache_dirty PG_arch_1 | 210 | #define PG_dcache_dirty PG_arch_1 |
189 | #define PG_dcache_cpu_shift 24 | 211 | #define PG_dcache_cpu_shift 24UL |
190 | #define PG_dcache_cpu_mask (256 - 1) | 212 | #define PG_dcache_cpu_mask (256UL - 1UL) |
191 | 213 | ||
192 | #if NR_CPUS > 256 | 214 | #if NR_CPUS > 256 |
193 | #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus | 215 | #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus |
@@ -243,32 +265,61 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c | |||
243 | : "g1", "g7"); | 265 | : "g1", "g7"); |
244 | } | 266 | } |
245 | 267 | ||
268 | static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) | ||
269 | { | ||
270 | unsigned long tsb_addr = (unsigned long) ent; | ||
271 | |||
272 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | ||
273 | tsb_addr = __pa(tsb_addr); | ||
274 | |||
275 | __tsb_insert(tsb_addr, tag, pte); | ||
276 | } | ||
277 | |||
278 | unsigned long _PAGE_ALL_SZ_BITS __read_mostly; | ||
279 | unsigned long _PAGE_SZBITS __read_mostly; | ||
280 | |||
246 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) | 281 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) |
247 | { | 282 | { |
248 | struct page *page; | 283 | struct mm_struct *mm; |
249 | unsigned long pfn; | 284 | struct tsb *tsb; |
250 | unsigned long pg_flags; | 285 | unsigned long tag, flags; |
251 | 286 | ||
252 | pfn = pte_pfn(pte); | 287 | if (tlb_type != hypervisor) { |
253 | if (pfn_valid(pfn) && | 288 | unsigned long pfn = pte_pfn(pte); |
254 | (page = pfn_to_page(pfn), page_mapping(page)) && | 289 | unsigned long pg_flags; |
255 | ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { | 290 | struct page *page; |
256 | int cpu = ((pg_flags >> PG_dcache_cpu_shift) & | 291 | |
257 | PG_dcache_cpu_mask); | 292 | if (pfn_valid(pfn) && |
258 | int this_cpu = get_cpu(); | 293 | (page = pfn_to_page(pfn), page_mapping(page)) && |
259 | 294 | ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { | |
260 | /* This is just to optimize away some function calls | 295 | int cpu = ((pg_flags >> PG_dcache_cpu_shift) & |
261 | * in the SMP case. | 296 | PG_dcache_cpu_mask); |
262 | */ | 297 | int this_cpu = get_cpu(); |
263 | if (cpu == this_cpu) | 298 | |
264 | flush_dcache_page_impl(page); | 299 | /* This is just to optimize away some function calls |
265 | else | 300 | * in the SMP case. |
266 | smp_flush_dcache_page_impl(page, cpu); | 301 | */ |
302 | if (cpu == this_cpu) | ||
303 | flush_dcache_page_impl(page); | ||
304 | else | ||
305 | smp_flush_dcache_page_impl(page, cpu); | ||
267 | 306 | ||
268 | clear_dcache_dirty_cpu(page, cpu); | 307 | clear_dcache_dirty_cpu(page, cpu); |
269 | 308 | ||
270 | put_cpu(); | 309 | put_cpu(); |
310 | } | ||
271 | } | 311 | } |
312 | |||
313 | mm = vma->vm_mm; | ||
314 | |||
315 | spin_lock_irqsave(&mm->context.lock, flags); | ||
316 | |||
317 | tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & | ||
318 | (mm->context.tsb_nentries - 1UL)]; | ||
319 | tag = (address >> 22UL); | ||
320 | tsb_insert(tsb, tag, pte_val(pte)); | ||
321 | |||
322 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
272 | } | 323 | } |
273 | 324 | ||
274 | void flush_dcache_page(struct page *page) | 325 | void flush_dcache_page(struct page *page) |
@@ -276,6 +327,9 @@ void flush_dcache_page(struct page *page) | |||
276 | struct address_space *mapping; | 327 | struct address_space *mapping; |
277 | int this_cpu; | 328 | int this_cpu; |
278 | 329 | ||
330 | if (tlb_type == hypervisor) | ||
331 | return; | ||
332 | |||
279 | /* Do not bother with the expensive D-cache flush if it | 333 | /* Do not bother with the expensive D-cache flush if it |
280 | * is merely the zero page. The 'bigcore' testcase in GDB | 334 | * is merely the zero page. The 'bigcore' testcase in GDB |
281 | * causes this case to run millions of times. | 335 | * causes this case to run millions of times. |
@@ -311,7 +365,7 @@ out: | |||
311 | 365 | ||
312 | void __kprobes flush_icache_range(unsigned long start, unsigned long end) | 366 | void __kprobes flush_icache_range(unsigned long start, unsigned long end) |
313 | { | 367 | { |
314 | /* Cheetah has coherent I-cache. */ | 368 | /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ |
315 | if (tlb_type == spitfire) { | 369 | if (tlb_type == spitfire) { |
316 | unsigned long kaddr; | 370 | unsigned long kaddr; |
317 | 371 | ||
@@ -320,16 +374,6 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end) | |||
320 | } | 374 | } |
321 | } | 375 | } |
322 | 376 | ||
323 | unsigned long page_to_pfn(struct page *page) | ||
324 | { | ||
325 | return (unsigned long) ((page - mem_map) + pfn_base); | ||
326 | } | ||
327 | |||
328 | struct page *pfn_to_page(unsigned long pfn) | ||
329 | { | ||
330 | return (mem_map + (pfn - pfn_base)); | ||
331 | } | ||
332 | |||
333 | void show_mem(void) | 377 | void show_mem(void) |
334 | { | 378 | { |
335 | printk("Mem-info:\n"); | 379 | printk("Mem-info:\n"); |
@@ -338,7 +382,6 @@ void show_mem(void) | |||
338 | nr_swap_pages << (PAGE_SHIFT-10)); | 382 | nr_swap_pages << (PAGE_SHIFT-10)); |
339 | printk("%ld pages of RAM\n", num_physpages); | 383 | printk("%ld pages of RAM\n", num_physpages); |
340 | printk("%d free pages\n", nr_free_pages()); | 384 | printk("%d free pages\n", nr_free_pages()); |
341 | printk("%d pages in page table cache\n",pgtable_cache_size); | ||
342 | } | 385 | } |
343 | 386 | ||
344 | void mmu_info(struct seq_file *m) | 387 | void mmu_info(struct seq_file *m) |
@@ -349,6 +392,8 @@ void mmu_info(struct seq_file *m) | |||
349 | seq_printf(m, "MMU Type\t: Cheetah+\n"); | 392 | seq_printf(m, "MMU Type\t: Cheetah+\n"); |
350 | else if (tlb_type == spitfire) | 393 | else if (tlb_type == spitfire) |
351 | seq_printf(m, "MMU Type\t: Spitfire\n"); | 394 | seq_printf(m, "MMU Type\t: Spitfire\n"); |
395 | else if (tlb_type == hypervisor) | ||
396 | seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); | ||
352 | else | 397 | else |
353 | seq_printf(m, "MMU Type\t: ???\n"); | 398 | seq_printf(m, "MMU Type\t: ???\n"); |
354 | 399 | ||
@@ -371,45 +416,13 @@ struct linux_prom_translation { | |||
371 | /* Exported for kernel TLB miss handling in ktlb.S */ | 416 | /* Exported for kernel TLB miss handling in ktlb.S */ |
372 | struct linux_prom_translation prom_trans[512] __read_mostly; | 417 | struct linux_prom_translation prom_trans[512] __read_mostly; |
373 | unsigned int prom_trans_ents __read_mostly; | 418 | unsigned int prom_trans_ents __read_mostly; |
374 | unsigned int swapper_pgd_zero __read_mostly; | ||
375 | |||
376 | extern unsigned long prom_boot_page; | ||
377 | extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); | ||
378 | extern int prom_get_mmu_ihandle(void); | ||
379 | extern void register_prom_callbacks(void); | ||
380 | 419 | ||
381 | /* Exported for SMP bootup purposes. */ | 420 | /* Exported for SMP bootup purposes. */ |
382 | unsigned long kern_locked_tte_data; | 421 | unsigned long kern_locked_tte_data; |
383 | 422 | ||
384 | /* | ||
385 | * Translate PROM's mapping we capture at boot time into physical address. | ||
386 | * The second parameter is only set from prom_callback() invocations. | ||
387 | */ | ||
388 | unsigned long prom_virt_to_phys(unsigned long promva, int *error) | ||
389 | { | ||
390 | int i; | ||
391 | |||
392 | for (i = 0; i < prom_trans_ents; i++) { | ||
393 | struct linux_prom_translation *p = &prom_trans[i]; | ||
394 | |||
395 | if (promva >= p->virt && | ||
396 | promva < (p->virt + p->size)) { | ||
397 | unsigned long base = p->data & _PAGE_PADDR; | ||
398 | |||
399 | if (error) | ||
400 | *error = 0; | ||
401 | return base + (promva & (8192 - 1)); | ||
402 | } | ||
403 | } | ||
404 | if (error) | ||
405 | *error = 1; | ||
406 | return 0UL; | ||
407 | } | ||
408 | |||
409 | /* The obp translations are saved based on 8k pagesize, since obp can | 423 | /* The obp translations are saved based on 8k pagesize, since obp can |
410 | * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> | 424 | * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> |
411 | * HI_OBP_ADDRESS range are handled in ktlb.S and do not use the vpte | 425 | * HI_OBP_ADDRESS range are handled in ktlb.S. |
412 | * scheme (also, see rant in inherit_locked_prom_mappings()). | ||
413 | */ | 426 | */ |
414 | static inline int in_obp_range(unsigned long vaddr) | 427 | static inline int in_obp_range(unsigned long vaddr) |
415 | { | 428 | { |
@@ -490,6 +503,36 @@ static void __init read_obp_translations(void) | |||
490 | } | 503 | } |
491 | } | 504 | } |
492 | 505 | ||
506 | static void __init hypervisor_tlb_lock(unsigned long vaddr, | ||
507 | unsigned long pte, | ||
508 | unsigned long mmu) | ||
509 | { | ||
510 | register unsigned long func asm("%o5"); | ||
511 | register unsigned long arg0 asm("%o0"); | ||
512 | register unsigned long arg1 asm("%o1"); | ||
513 | register unsigned long arg2 asm("%o2"); | ||
514 | register unsigned long arg3 asm("%o3"); | ||
515 | |||
516 | func = HV_FAST_MMU_MAP_PERM_ADDR; | ||
517 | arg0 = vaddr; | ||
518 | arg1 = 0; | ||
519 | arg2 = pte; | ||
520 | arg3 = mmu; | ||
521 | __asm__ __volatile__("ta 0x80" | ||
522 | : "=&r" (func), "=&r" (arg0), | ||
523 | "=&r" (arg1), "=&r" (arg2), | ||
524 | "=&r" (arg3) | ||
525 | : "0" (func), "1" (arg0), "2" (arg1), | ||
526 | "3" (arg2), "4" (arg3)); | ||
527 | if (arg0 != 0) { | ||
528 | prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " | ||
529 | "errors with %lx\n", vaddr, 0, pte, mmu, arg0); | ||
530 | prom_halt(); | ||
531 | } | ||
532 | } | ||
533 | |||
534 | static unsigned long kern_large_tte(unsigned long paddr); | ||
535 | |||
493 | static void __init remap_kernel(void) | 536 | static void __init remap_kernel(void) |
494 | { | 537 | { |
495 | unsigned long phys_page, tte_vaddr, tte_data; | 538 | unsigned long phys_page, tte_vaddr, tte_data; |
@@ -497,25 +540,34 @@ static void __init remap_kernel(void) | |||
497 | 540 | ||
498 | tte_vaddr = (unsigned long) KERNBASE; | 541 | tte_vaddr = (unsigned long) KERNBASE; |
499 | phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; | 542 | phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; |
500 | tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB | | 543 | tte_data = kern_large_tte(phys_page); |
501 | _PAGE_CP | _PAGE_CV | _PAGE_P | | ||
502 | _PAGE_L | _PAGE_W)); | ||
503 | 544 | ||
504 | kern_locked_tte_data = tte_data; | 545 | kern_locked_tte_data = tte_data; |
505 | 546 | ||
506 | /* Now lock us into the TLBs via OBP. */ | 547 | /* Now lock us into the TLBs via Hypervisor or OBP. */ |
507 | prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); | 548 | if (tlb_type == hypervisor) { |
508 | prom_itlb_load(tlb_ent, tte_data, tte_vaddr); | 549 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); |
509 | if (bigkernel) { | 550 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); |
510 | tlb_ent -= 1; | 551 | if (bigkernel) { |
511 | prom_dtlb_load(tlb_ent, | 552 | tte_vaddr += 0x400000; |
512 | tte_data + 0x400000, | 553 | tte_data += 0x400000; |
513 | tte_vaddr + 0x400000); | 554 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); |
514 | prom_itlb_load(tlb_ent, | 555 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); |
515 | tte_data + 0x400000, | 556 | } |
516 | tte_vaddr + 0x400000); | 557 | } else { |
558 | prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); | ||
559 | prom_itlb_load(tlb_ent, tte_data, tte_vaddr); | ||
560 | if (bigkernel) { | ||
561 | tlb_ent -= 1; | ||
562 | prom_dtlb_load(tlb_ent, | ||
563 | tte_data + 0x400000, | ||
564 | tte_vaddr + 0x400000); | ||
565 | prom_itlb_load(tlb_ent, | ||
566 | tte_data + 0x400000, | ||
567 | tte_vaddr + 0x400000); | ||
568 | } | ||
569 | sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; | ||
517 | } | 570 | } |
518 | sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; | ||
519 | if (tlb_type == cheetah_plus) { | 571 | if (tlb_type == cheetah_plus) { |
520 | sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | | 572 | sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | |
521 | CTX_CHEETAH_PLUS_NUC); | 573 | CTX_CHEETAH_PLUS_NUC); |
@@ -533,372 +585,14 @@ static void __init inherit_prom_mappings(void) | |||
533 | prom_printf("Remapping the kernel... "); | 585 | prom_printf("Remapping the kernel... "); |
534 | remap_kernel(); | 586 | remap_kernel(); |
535 | prom_printf("done.\n"); | 587 | prom_printf("done.\n"); |
536 | |||
537 | prom_printf("Registering callbacks... "); | ||
538 | register_prom_callbacks(); | ||
539 | prom_printf("done.\n"); | ||
540 | } | ||
541 | |||
542 | /* The OBP specifications for sun4u mark 0xfffffffc00000000 and | ||
543 | * upwards as reserved for use by the firmware (I wonder if this | ||
544 | * will be the same on Cheetah...). We use this virtual address | ||
545 | * range for the VPTE table mappings of the nucleus so we need | ||
546 | * to zap them when we enter the PROM. -DaveM | ||
547 | */ | ||
548 | static void __flush_nucleus_vptes(void) | ||
549 | { | ||
550 | unsigned long prom_reserved_base = 0xfffffffc00000000UL; | ||
551 | int i; | ||
552 | |||
553 | /* Only DTLB must be checked for VPTE entries. */ | ||
554 | if (tlb_type == spitfire) { | ||
555 | for (i = 0; i < 63; i++) { | ||
556 | unsigned long tag; | ||
557 | |||
558 | /* Spitfire Errata #32 workaround */ | ||
559 | /* NOTE: Always runs on spitfire, so no cheetah+ | ||
560 | * page size encodings. | ||
561 | */ | ||
562 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
563 | "flush %%g6" | ||
564 | : /* No outputs */ | ||
565 | : "r" (0), | ||
566 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
567 | |||
568 | tag = spitfire_get_dtlb_tag(i); | ||
569 | if (((tag & ~(PAGE_MASK)) == 0) && | ||
570 | ((tag & (PAGE_MASK)) >= prom_reserved_base)) { | ||
571 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
572 | "membar #Sync" | ||
573 | : /* no outputs */ | ||
574 | : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | ||
575 | spitfire_put_dtlb_data(i, 0x0UL); | ||
576 | } | ||
577 | } | ||
578 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
579 | for (i = 0; i < 512; i++) { | ||
580 | unsigned long tag = cheetah_get_dtlb_tag(i, 2); | ||
581 | |||
582 | if ((tag & ~PAGE_MASK) == 0 && | ||
583 | (tag & PAGE_MASK) >= prom_reserved_base) { | ||
584 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
585 | "membar #Sync" | ||
586 | : /* no outputs */ | ||
587 | : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | ||
588 | cheetah_put_dtlb_data(i, 0x0UL, 2); | ||
589 | } | ||
590 | |||
591 | if (tlb_type != cheetah_plus) | ||
592 | continue; | ||
593 | |||
594 | tag = cheetah_get_dtlb_tag(i, 3); | ||
595 | |||
596 | if ((tag & ~PAGE_MASK) == 0 && | ||
597 | (tag & PAGE_MASK) >= prom_reserved_base) { | ||
598 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
599 | "membar #Sync" | ||
600 | : /* no outputs */ | ||
601 | : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | ||
602 | cheetah_put_dtlb_data(i, 0x0UL, 3); | ||
603 | } | ||
604 | } | ||
605 | } else { | ||
606 | /* Implement me :-) */ | ||
607 | BUG(); | ||
608 | } | ||
609 | } | 588 | } |
610 | 589 | ||
611 | static int prom_ditlb_set; | ||
612 | struct prom_tlb_entry { | ||
613 | int tlb_ent; | ||
614 | unsigned long tlb_tag; | ||
615 | unsigned long tlb_data; | ||
616 | }; | ||
617 | struct prom_tlb_entry prom_itlb[16], prom_dtlb[16]; | ||
618 | |||
619 | void prom_world(int enter) | 590 | void prom_world(int enter) |
620 | { | 591 | { |
621 | unsigned long pstate; | ||
622 | int i; | ||
623 | |||
624 | if (!enter) | 592 | if (!enter) |
625 | set_fs((mm_segment_t) { get_thread_current_ds() }); | 593 | set_fs((mm_segment_t) { get_thread_current_ds() }); |
626 | 594 | ||
627 | if (!prom_ditlb_set) | 595 | __asm__ __volatile__("flushw"); |
628 | return; | ||
629 | |||
630 | /* Make sure the following runs atomically. */ | ||
631 | __asm__ __volatile__("flushw\n\t" | ||
632 | "rdpr %%pstate, %0\n\t" | ||
633 | "wrpr %0, %1, %%pstate" | ||
634 | : "=r" (pstate) | ||
635 | : "i" (PSTATE_IE)); | ||
636 | |||
637 | if (enter) { | ||
638 | /* Kick out nucleus VPTEs. */ | ||
639 | __flush_nucleus_vptes(); | ||
640 | |||
641 | /* Install PROM world. */ | ||
642 | for (i = 0; i < 16; i++) { | ||
643 | if (prom_dtlb[i].tlb_ent != -1) { | ||
644 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
645 | "membar #Sync" | ||
646 | : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS), | ||
647 | "i" (ASI_DMMU)); | ||
648 | if (tlb_type == spitfire) | ||
649 | spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, | ||
650 | prom_dtlb[i].tlb_data); | ||
651 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) | ||
652 | cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, | ||
653 | prom_dtlb[i].tlb_data); | ||
654 | } | ||
655 | if (prom_itlb[i].tlb_ent != -1) { | ||
656 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
657 | "membar #Sync" | ||
658 | : : "r" (prom_itlb[i].tlb_tag), | ||
659 | "r" (TLB_TAG_ACCESS), | ||
660 | "i" (ASI_IMMU)); | ||
661 | if (tlb_type == spitfire) | ||
662 | spitfire_put_itlb_data(prom_itlb[i].tlb_ent, | ||
663 | prom_itlb[i].tlb_data); | ||
664 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) | ||
665 | cheetah_put_litlb_data(prom_itlb[i].tlb_ent, | ||
666 | prom_itlb[i].tlb_data); | ||
667 | } | ||
668 | } | ||
669 | } else { | ||
670 | for (i = 0; i < 16; i++) { | ||
671 | if (prom_dtlb[i].tlb_ent != -1) { | ||
672 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
673 | "membar #Sync" | ||
674 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | ||
675 | if (tlb_type == spitfire) | ||
676 | spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL); | ||
677 | else | ||
678 | cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, 0x0UL); | ||
679 | } | ||
680 | if (prom_itlb[i].tlb_ent != -1) { | ||
681 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
682 | "membar #Sync" | ||
683 | : : "r" (TLB_TAG_ACCESS), | ||
684 | "i" (ASI_IMMU)); | ||
685 | if (tlb_type == spitfire) | ||
686 | spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL); | ||
687 | else | ||
688 | cheetah_put_litlb_data(prom_itlb[i].tlb_ent, 0x0UL); | ||
689 | } | ||
690 | } | ||
691 | } | ||
692 | __asm__ __volatile__("wrpr %0, 0, %%pstate" | ||
693 | : : "r" (pstate)); | ||
694 | } | ||
695 | |||
696 | void inherit_locked_prom_mappings(int save_p) | ||
697 | { | ||
698 | int i; | ||
699 | int dtlb_seen = 0; | ||
700 | int itlb_seen = 0; | ||
701 | |||
702 | /* Fucking losing PROM has more mappings in the TLB, but | ||
703 | * it (conveniently) fails to mention any of these in the | ||
704 | * translations property. The only ones that matter are | ||
705 | * the locked PROM tlb entries, so we impose the following | ||
706 | * irrecovable rule on the PROM, it is allowed 8 locked | ||
707 | * entries in the ITLB and 8 in the DTLB. | ||
708 | * | ||
709 | * Supposedly the upper 16GB of the address space is | ||
710 | * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED | ||
711 | * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface | ||
712 | * used between the client program and the firmware on sun5 | ||
713 | * systems to coordinate mmu mappings is also COMPLETELY | ||
714 | * UNDOCUMENTED!!!!!! Thanks S(t)un! | ||
715 | */ | ||
716 | if (save_p) { | ||
717 | for (i = 0; i < 16; i++) { | ||
718 | prom_itlb[i].tlb_ent = -1; | ||
719 | prom_dtlb[i].tlb_ent = -1; | ||
720 | } | ||
721 | } | ||
722 | if (tlb_type == spitfire) { | ||
723 | int high = sparc64_highest_unlocked_tlb_ent; | ||
724 | for (i = 0; i <= high; i++) { | ||
725 | unsigned long data; | ||
726 | |||
727 | /* Spitfire Errata #32 workaround */ | ||
728 | /* NOTE: Always runs on spitfire, so no cheetah+ | ||
729 | * page size encodings. | ||
730 | */ | ||
731 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
732 | "flush %%g6" | ||
733 | : /* No outputs */ | ||
734 | : "r" (0), | ||
735 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
736 | |||
737 | data = spitfire_get_dtlb_data(i); | ||
738 | if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { | ||
739 | unsigned long tag; | ||
740 | |||
741 | /* Spitfire Errata #32 workaround */ | ||
742 | /* NOTE: Always runs on spitfire, so no | ||
743 | * cheetah+ page size encodings. | ||
744 | */ | ||
745 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
746 | "flush %%g6" | ||
747 | : /* No outputs */ | ||
748 | : "r" (0), | ||
749 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
750 | |||
751 | tag = spitfire_get_dtlb_tag(i); | ||
752 | if (save_p) { | ||
753 | prom_dtlb[dtlb_seen].tlb_ent = i; | ||
754 | prom_dtlb[dtlb_seen].tlb_tag = tag; | ||
755 | prom_dtlb[dtlb_seen].tlb_data = data; | ||
756 | } | ||
757 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
758 | "membar #Sync" | ||
759 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | ||
760 | spitfire_put_dtlb_data(i, 0x0UL); | ||
761 | |||
762 | dtlb_seen++; | ||
763 | if (dtlb_seen > 15) | ||
764 | break; | ||
765 | } | ||
766 | } | ||
767 | |||
768 | for (i = 0; i < high; i++) { | ||
769 | unsigned long data; | ||
770 | |||
771 | /* Spitfire Errata #32 workaround */ | ||
772 | /* NOTE: Always runs on spitfire, so no | ||
773 | * cheetah+ page size encodings. | ||
774 | */ | ||
775 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
776 | "flush %%g6" | ||
777 | : /* No outputs */ | ||
778 | : "r" (0), | ||
779 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
780 | |||
781 | data = spitfire_get_itlb_data(i); | ||
782 | if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { | ||
783 | unsigned long tag; | ||
784 | |||
785 | /* Spitfire Errata #32 workaround */ | ||
786 | /* NOTE: Always runs on spitfire, so no | ||
787 | * cheetah+ page size encodings. | ||
788 | */ | ||
789 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
790 | "flush %%g6" | ||
791 | : /* No outputs */ | ||
792 | : "r" (0), | ||
793 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
794 | |||
795 | tag = spitfire_get_itlb_tag(i); | ||
796 | if (save_p) { | ||
797 | prom_itlb[itlb_seen].tlb_ent = i; | ||
798 | prom_itlb[itlb_seen].tlb_tag = tag; | ||
799 | prom_itlb[itlb_seen].tlb_data = data; | ||
800 | } | ||
801 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
802 | "membar #Sync" | ||
803 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); | ||
804 | spitfire_put_itlb_data(i, 0x0UL); | ||
805 | |||
806 | itlb_seen++; | ||
807 | if (itlb_seen > 15) | ||
808 | break; | ||
809 | } | ||
810 | } | ||
811 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
812 | int high = sparc64_highest_unlocked_tlb_ent; | ||
813 | |||
814 | for (i = 0; i <= high; i++) { | ||
815 | unsigned long data; | ||
816 | |||
817 | data = cheetah_get_ldtlb_data(i); | ||
818 | if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { | ||
819 | unsigned long tag; | ||
820 | |||
821 | tag = cheetah_get_ldtlb_tag(i); | ||
822 | if (save_p) { | ||
823 | prom_dtlb[dtlb_seen].tlb_ent = i; | ||
824 | prom_dtlb[dtlb_seen].tlb_tag = tag; | ||
825 | prom_dtlb[dtlb_seen].tlb_data = data; | ||
826 | } | ||
827 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
828 | "membar #Sync" | ||
829 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | ||
830 | cheetah_put_ldtlb_data(i, 0x0UL); | ||
831 | |||
832 | dtlb_seen++; | ||
833 | if (dtlb_seen > 15) | ||
834 | break; | ||
835 | } | ||
836 | } | ||
837 | |||
838 | for (i = 0; i < high; i++) { | ||
839 | unsigned long data; | ||
840 | |||
841 | data = cheetah_get_litlb_data(i); | ||
842 | if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { | ||
843 | unsigned long tag; | ||
844 | |||
845 | tag = cheetah_get_litlb_tag(i); | ||
846 | if (save_p) { | ||
847 | prom_itlb[itlb_seen].tlb_ent = i; | ||
848 | prom_itlb[itlb_seen].tlb_tag = tag; | ||
849 | prom_itlb[itlb_seen].tlb_data = data; | ||
850 | } | ||
851 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
852 | "membar #Sync" | ||
853 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); | ||
854 | cheetah_put_litlb_data(i, 0x0UL); | ||
855 | |||
856 | itlb_seen++; | ||
857 | if (itlb_seen > 15) | ||
858 | break; | ||
859 | } | ||
860 | } | ||
861 | } else { | ||
862 | /* Implement me :-) */ | ||
863 | BUG(); | ||
864 | } | ||
865 | if (save_p) | ||
866 | prom_ditlb_set = 1; | ||
867 | } | ||
868 | |||
869 | /* Give PROM back his world, done during reboots... */ | ||
870 | void prom_reload_locked(void) | ||
871 | { | ||
872 | int i; | ||
873 | |||
874 | for (i = 0; i < 16; i++) { | ||
875 | if (prom_dtlb[i].tlb_ent != -1) { | ||
876 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
877 | "membar #Sync" | ||
878 | : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS), | ||
879 | "i" (ASI_DMMU)); | ||
880 | if (tlb_type == spitfire) | ||
881 | spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, | ||
882 | prom_dtlb[i].tlb_data); | ||
883 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) | ||
884 | cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, | ||
885 | prom_dtlb[i].tlb_data); | ||
886 | } | ||
887 | |||
888 | if (prom_itlb[i].tlb_ent != -1) { | ||
889 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
890 | "membar #Sync" | ||
891 | : : "r" (prom_itlb[i].tlb_tag), | ||
892 | "r" (TLB_TAG_ACCESS), | ||
893 | "i" (ASI_IMMU)); | ||
894 | if (tlb_type == spitfire) | ||
895 | spitfire_put_itlb_data(prom_itlb[i].tlb_ent, | ||
896 | prom_itlb[i].tlb_data); | ||
897 | else | ||
898 | cheetah_put_litlb_data(prom_itlb[i].tlb_ent, | ||
899 | prom_itlb[i].tlb_data); | ||
900 | } | ||
901 | } | ||
902 | } | 596 | } |
903 | 597 | ||
904 | #ifdef DCACHE_ALIASING_POSSIBLE | 598 | #ifdef DCACHE_ALIASING_POSSIBLE |
@@ -914,7 +608,7 @@ void __flush_dcache_range(unsigned long start, unsigned long end) | |||
914 | if (++n >= 512) | 608 | if (++n >= 512) |
915 | break; | 609 | break; |
916 | } | 610 | } |
917 | } else { | 611 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
918 | start = __pa(start); | 612 | start = __pa(start); |
919 | end = __pa(end); | 613 | end = __pa(end); |
920 | for (va = start; va < end; va += 32) | 614 | for (va = start; va < end; va += 32) |
@@ -927,63 +621,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end) | |||
927 | } | 621 | } |
928 | #endif /* DCACHE_ALIASING_POSSIBLE */ | 622 | #endif /* DCACHE_ALIASING_POSSIBLE */ |
929 | 623 | ||
930 | /* If not locked, zap it. */ | ||
931 | void __flush_tlb_all(void) | ||
932 | { | ||
933 | unsigned long pstate; | ||
934 | int i; | ||
935 | |||
936 | __asm__ __volatile__("flushw\n\t" | ||
937 | "rdpr %%pstate, %0\n\t" | ||
938 | "wrpr %0, %1, %%pstate" | ||
939 | : "=r" (pstate) | ||
940 | : "i" (PSTATE_IE)); | ||
941 | if (tlb_type == spitfire) { | ||
942 | for (i = 0; i < 64; i++) { | ||
943 | /* Spitfire Errata #32 workaround */ | ||
944 | /* NOTE: Always runs on spitfire, so no | ||
945 | * cheetah+ page size encodings. | ||
946 | */ | ||
947 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
948 | "flush %%g6" | ||
949 | : /* No outputs */ | ||
950 | : "r" (0), | ||
951 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
952 | |||
953 | if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) { | ||
954 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
955 | "membar #Sync" | ||
956 | : /* no outputs */ | ||
957 | : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | ||
958 | spitfire_put_dtlb_data(i, 0x0UL); | ||
959 | } | ||
960 | |||
961 | /* Spitfire Errata #32 workaround */ | ||
962 | /* NOTE: Always runs on spitfire, so no | ||
963 | * cheetah+ page size encodings. | ||
964 | */ | ||
965 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
966 | "flush %%g6" | ||
967 | : /* No outputs */ | ||
968 | : "r" (0), | ||
969 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
970 | |||
971 | if (!(spitfire_get_itlb_data(i) & _PAGE_L)) { | ||
972 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
973 | "membar #Sync" | ||
974 | : /* no outputs */ | ||
975 | : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); | ||
976 | spitfire_put_itlb_data(i, 0x0UL); | ||
977 | } | ||
978 | } | ||
979 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
980 | cheetah_flush_dtlb_all(); | ||
981 | cheetah_flush_itlb_all(); | ||
982 | } | ||
983 | __asm__ __volatile__("wrpr %0, 0, %%pstate" | ||
984 | : : "r" (pstate)); | ||
985 | } | ||
986 | |||
987 | /* Caller does TLB context flushing on local CPU if necessary. | 624 | /* Caller does TLB context flushing on local CPU if necessary. |
988 | * The caller also ensures that CTX_VALID(mm->context) is false. | 625 | * The caller also ensures that CTX_VALID(mm->context) is false. |
989 | * | 626 | * |
@@ -991,17 +628,21 @@ void __flush_tlb_all(void) | |||
991 | * let the user have CTX 0 (nucleus) or we ever use a CTX | 628 | * let the user have CTX 0 (nucleus) or we ever use a CTX |
992 | * version of zero (and thus NO_CONTEXT would not be caught | 629 | * version of zero (and thus NO_CONTEXT would not be caught |
993 | * by version mis-match tests in mmu_context.h). | 630 | * by version mis-match tests in mmu_context.h). |
631 | * | ||
632 | * Always invoked with interrupts disabled. | ||
994 | */ | 633 | */ |
995 | void get_new_mmu_context(struct mm_struct *mm) | 634 | void get_new_mmu_context(struct mm_struct *mm) |
996 | { | 635 | { |
997 | unsigned long ctx, new_ctx; | 636 | unsigned long ctx, new_ctx; |
998 | unsigned long orig_pgsz_bits; | 637 | unsigned long orig_pgsz_bits; |
999 | 638 | unsigned long flags; | |
639 | int new_version; | ||
1000 | 640 | ||
1001 | spin_lock(&ctx_alloc_lock); | 641 | spin_lock_irqsave(&ctx_alloc_lock, flags); |
1002 | orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); | 642 | orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); |
1003 | ctx = (tlb_context_cache + 1) & CTX_NR_MASK; | 643 | ctx = (tlb_context_cache + 1) & CTX_NR_MASK; |
1004 | new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); | 644 | new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); |
645 | new_version = 0; | ||
1005 | if (new_ctx >= (1 << CTX_NR_BITS)) { | 646 | if (new_ctx >= (1 << CTX_NR_BITS)) { |
1006 | new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); | 647 | new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); |
1007 | if (new_ctx >= ctx) { | 648 | if (new_ctx >= ctx) { |
@@ -1024,6 +665,7 @@ void get_new_mmu_context(struct mm_struct *mm) | |||
1024 | mmu_context_bmap[i + 2] = 0; | 665 | mmu_context_bmap[i + 2] = 0; |
1025 | mmu_context_bmap[i + 3] = 0; | 666 | mmu_context_bmap[i + 3] = 0; |
1026 | } | 667 | } |
668 | new_version = 1; | ||
1027 | goto out; | 669 | goto out; |
1028 | } | 670 | } |
1029 | } | 671 | } |
@@ -1032,79 +674,10 @@ void get_new_mmu_context(struct mm_struct *mm) | |||
1032 | out: | 674 | out: |
1033 | tlb_context_cache = new_ctx; | 675 | tlb_context_cache = new_ctx; |
1034 | mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; | 676 | mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; |
1035 | spin_unlock(&ctx_alloc_lock); | 677 | spin_unlock_irqrestore(&ctx_alloc_lock, flags); |
1036 | } | ||
1037 | |||
1038 | #ifndef CONFIG_SMP | ||
1039 | struct pgtable_cache_struct pgt_quicklists; | ||
1040 | #endif | ||
1041 | |||
1042 | /* OK, we have to color these pages. The page tables are accessed | ||
1043 | * by non-Dcache enabled mapping in the VPTE area by the dtlb_backend.S | ||
1044 | * code, as well as by PAGE_OFFSET range direct-mapped addresses by | ||
1045 | * other parts of the kernel. By coloring, we make sure that the tlbmiss | ||
1046 | * fast handlers do not get data from old/garbage dcache lines that | ||
1047 | * correspond to an old/stale virtual address (user/kernel) that | ||
1048 | * previously mapped the pagetable page while accessing vpte range | ||
1049 | * addresses. The idea is that if the vpte color and PAGE_OFFSET range | ||
1050 | * color is the same, then when the kernel initializes the pagetable | ||
1051 | * using the later address range, accesses with the first address | ||
1052 | * range will see the newly initialized data rather than the garbage. | ||
1053 | */ | ||
1054 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
1055 | #define DC_ALIAS_SHIFT 1 | ||
1056 | #else | ||
1057 | #define DC_ALIAS_SHIFT 0 | ||
1058 | #endif | ||
1059 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | ||
1060 | { | ||
1061 | struct page *page; | ||
1062 | unsigned long color; | ||
1063 | |||
1064 | { | ||
1065 | pte_t *ptep = pte_alloc_one_fast(mm, address); | ||
1066 | |||
1067 | if (ptep) | ||
1068 | return ptep; | ||
1069 | } | ||
1070 | 678 | ||
1071 | color = VPTE_COLOR(address); | 679 | if (unlikely(new_version)) |
1072 | page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, DC_ALIAS_SHIFT); | 680 | smp_new_mmu_context_version(); |
1073 | if (page) { | ||
1074 | unsigned long *to_free; | ||
1075 | unsigned long paddr; | ||
1076 | pte_t *pte; | ||
1077 | |||
1078 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
1079 | set_page_count(page, 1); | ||
1080 | ClearPageCompound(page); | ||
1081 | |||
1082 | set_page_count((page + 1), 1); | ||
1083 | ClearPageCompound(page + 1); | ||
1084 | #endif | ||
1085 | paddr = (unsigned long) page_address(page); | ||
1086 | memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT)); | ||
1087 | |||
1088 | if (!color) { | ||
1089 | pte = (pte_t *) paddr; | ||
1090 | to_free = (unsigned long *) (paddr + PAGE_SIZE); | ||
1091 | } else { | ||
1092 | pte = (pte_t *) (paddr + PAGE_SIZE); | ||
1093 | to_free = (unsigned long *) paddr; | ||
1094 | } | ||
1095 | |||
1096 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
1097 | /* Now free the other one up, adjust cache size. */ | ||
1098 | preempt_disable(); | ||
1099 | *to_free = (unsigned long) pte_quicklist[color ^ 0x1]; | ||
1100 | pte_quicklist[color ^ 0x1] = to_free; | ||
1101 | pgtable_cache_size++; | ||
1102 | preempt_enable(); | ||
1103 | #endif | ||
1104 | |||
1105 | return pte; | ||
1106 | } | ||
1107 | return NULL; | ||
1108 | } | 681 | } |
1109 | 682 | ||
1110 | void sparc_ultra_dump_itlb(void) | 683 | void sparc_ultra_dump_itlb(void) |
@@ -1196,9 +769,78 @@ void sparc_ultra_dump_dtlb(void) | |||
1196 | 769 | ||
1197 | extern unsigned long cmdline_memory_size; | 770 | extern unsigned long cmdline_memory_size; |
1198 | 771 | ||
1199 | unsigned long __init bootmem_init(unsigned long *pages_avail) | 772 | /* Find a free area for the bootmem map, avoiding the kernel image |
773 | * and the initial ramdisk. | ||
774 | */ | ||
775 | static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn, | ||
776 | unsigned long end_pfn) | ||
777 | { | ||
778 | unsigned long avoid_start, avoid_end, bootmap_size; | ||
779 | int i; | ||
780 | |||
781 | bootmap_size = ((end_pfn - start_pfn) + 7) / 8; | ||
782 | bootmap_size = ALIGN(bootmap_size, sizeof(long)); | ||
783 | |||
784 | avoid_start = avoid_end = 0; | ||
785 | #ifdef CONFIG_BLK_DEV_INITRD | ||
786 | avoid_start = initrd_start; | ||
787 | avoid_end = PAGE_ALIGN(initrd_end); | ||
788 | #endif | ||
789 | |||
790 | #ifdef CONFIG_DEBUG_BOOTMEM | ||
791 | prom_printf("choose_bootmap_pfn: kern[%lx:%lx] avoid[%lx:%lx]\n", | ||
792 | kern_base, PAGE_ALIGN(kern_base + kern_size), | ||
793 | avoid_start, avoid_end); | ||
794 | #endif | ||
795 | for (i = 0; i < pavail_ents; i++) { | ||
796 | unsigned long start, end; | ||
797 | |||
798 | start = pavail[i].phys_addr; | ||
799 | end = start + pavail[i].reg_size; | ||
800 | |||
801 | while (start < end) { | ||
802 | if (start >= kern_base && | ||
803 | start < PAGE_ALIGN(kern_base + kern_size)) { | ||
804 | start = PAGE_ALIGN(kern_base + kern_size); | ||
805 | continue; | ||
806 | } | ||
807 | if (start >= avoid_start && start < avoid_end) { | ||
808 | start = avoid_end; | ||
809 | continue; | ||
810 | } | ||
811 | |||
812 | if ((end - start) < bootmap_size) | ||
813 | break; | ||
814 | |||
815 | if (start < kern_base && | ||
816 | (start + bootmap_size) > kern_base) { | ||
817 | start = PAGE_ALIGN(kern_base + kern_size); | ||
818 | continue; | ||
819 | } | ||
820 | |||
821 | if (start < avoid_start && | ||
822 | (start + bootmap_size) > avoid_start) { | ||
823 | start = avoid_end; | ||
824 | continue; | ||
825 | } | ||
826 | |||
827 | /* OK, it doesn't overlap anything, use it. */ | ||
828 | #ifdef CONFIG_DEBUG_BOOTMEM | ||
829 | prom_printf("choose_bootmap_pfn: Using %lx [%lx]\n", | ||
830 | start >> PAGE_SHIFT, start); | ||
831 | #endif | ||
832 | return start >> PAGE_SHIFT; | ||
833 | } | ||
834 | } | ||
835 | |||
836 | prom_printf("Cannot find free area for bootmap, aborting.\n"); | ||
837 | prom_halt(); | ||
838 | } | ||
839 | |||
840 | static unsigned long __init bootmem_init(unsigned long *pages_avail, | ||
841 | unsigned long phys_base) | ||
1200 | { | 842 | { |
1201 | unsigned long bootmap_size, start_pfn, end_pfn; | 843 | unsigned long bootmap_size, end_pfn; |
1202 | unsigned long end_of_phys_memory = 0UL; | 844 | unsigned long end_of_phys_memory = 0UL; |
1203 | unsigned long bootmap_pfn, bytes_avail, size; | 845 | unsigned long bootmap_pfn, bytes_avail, size; |
1204 | int i; | 846 | int i; |
@@ -1236,14 +878,6 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) | |||
1236 | 878 | ||
1237 | *pages_avail = bytes_avail >> PAGE_SHIFT; | 879 | *pages_avail = bytes_avail >> PAGE_SHIFT; |
1238 | 880 | ||
1239 | /* Start with page aligned address of last symbol in kernel | ||
1240 | * image. The kernel is hard mapped below PAGE_OFFSET in a | ||
1241 | * 4MB locked TLB translation. | ||
1242 | */ | ||
1243 | start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT; | ||
1244 | |||
1245 | bootmap_pfn = start_pfn; | ||
1246 | |||
1247 | end_pfn = end_of_phys_memory >> PAGE_SHIFT; | 881 | end_pfn = end_of_phys_memory >> PAGE_SHIFT; |
1248 | 882 | ||
1249 | #ifdef CONFIG_BLK_DEV_INITRD | 883 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -1260,23 +894,22 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) | |||
1260 | "(0x%016lx > 0x%016lx)\ndisabling initrd\n", | 894 | "(0x%016lx > 0x%016lx)\ndisabling initrd\n", |
1261 | initrd_end, end_of_phys_memory); | 895 | initrd_end, end_of_phys_memory); |
1262 | initrd_start = 0; | 896 | initrd_start = 0; |
1263 | } | 897 | initrd_end = 0; |
1264 | if (initrd_start) { | ||
1265 | if (initrd_start >= (start_pfn << PAGE_SHIFT) && | ||
1266 | initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE) | ||
1267 | bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT; | ||
1268 | } | 898 | } |
1269 | } | 899 | } |
1270 | #endif | 900 | #endif |
1271 | /* Initialize the boot-time allocator. */ | 901 | /* Initialize the boot-time allocator. */ |
1272 | max_pfn = max_low_pfn = end_pfn; | 902 | max_pfn = max_low_pfn = end_pfn; |
1273 | min_low_pfn = pfn_base; | 903 | min_low_pfn = (phys_base >> PAGE_SHIFT); |
904 | |||
905 | bootmap_pfn = choose_bootmap_pfn(min_low_pfn, end_pfn); | ||
1274 | 906 | ||
1275 | #ifdef CONFIG_DEBUG_BOOTMEM | 907 | #ifdef CONFIG_DEBUG_BOOTMEM |
1276 | prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n", | 908 | prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n", |
1277 | min_low_pfn, bootmap_pfn, max_low_pfn); | 909 | min_low_pfn, bootmap_pfn, max_low_pfn); |
1278 | #endif | 910 | #endif |
1279 | bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn); | 911 | bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, |
912 | min_low_pfn, end_pfn); | ||
1280 | 913 | ||
1281 | /* Now register the available physical memory with the | 914 | /* Now register the available physical memory with the |
1282 | * allocator. | 915 | * allocator. |
@@ -1324,9 +957,26 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) | |||
1324 | reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size); | 957 | reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size); |
1325 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; | 958 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; |
1326 | 959 | ||
960 | for (i = 0; i < pavail_ents; i++) { | ||
961 | unsigned long start_pfn, end_pfn; | ||
962 | |||
963 | start_pfn = pavail[i].phys_addr >> PAGE_SHIFT; | ||
964 | end_pfn = (start_pfn + (pavail[i].reg_size >> PAGE_SHIFT)); | ||
965 | #ifdef CONFIG_DEBUG_BOOTMEM | ||
966 | prom_printf("memory_present(0, %lx, %lx)\n", | ||
967 | start_pfn, end_pfn); | ||
968 | #endif | ||
969 | memory_present(0, start_pfn, end_pfn); | ||
970 | } | ||
971 | |||
972 | sparse_init(); | ||
973 | |||
1327 | return end_pfn; | 974 | return end_pfn; |
1328 | } | 975 | } |
1329 | 976 | ||
977 | static struct linux_prom64_registers pall[MAX_BANKS] __initdata; | ||
978 | static int pall_ents __initdata; | ||
979 | |||
1330 | #ifdef CONFIG_DEBUG_PAGEALLOC | 980 | #ifdef CONFIG_DEBUG_PAGEALLOC |
1331 | static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot) | 981 | static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot) |
1332 | { | 982 | { |
@@ -1382,14 +1032,44 @@ static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, | |||
1382 | return alloc_bytes; | 1032 | return alloc_bytes; |
1383 | } | 1033 | } |
1384 | 1034 | ||
1385 | static struct linux_prom64_registers pall[MAX_BANKS] __initdata; | ||
1386 | static int pall_ents __initdata; | ||
1387 | |||
1388 | extern unsigned int kvmap_linear_patch[1]; | 1035 | extern unsigned int kvmap_linear_patch[1]; |
1036 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | ||
1037 | |||
1038 | static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) | ||
1039 | { | ||
1040 | const unsigned long shift_256MB = 28; | ||
1041 | const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL); | ||
1042 | const unsigned long size_256MB = (1UL << shift_256MB); | ||
1043 | |||
1044 | while (start < end) { | ||
1045 | long remains; | ||
1046 | |||
1047 | remains = end - start; | ||
1048 | if (remains < size_256MB) | ||
1049 | break; | ||
1050 | |||
1051 | if (start & mask_256MB) { | ||
1052 | start = (start + size_256MB) & ~mask_256MB; | ||
1053 | continue; | ||
1054 | } | ||
1055 | |||
1056 | while (remains >= size_256MB) { | ||
1057 | unsigned long index = start >> shift_256MB; | ||
1058 | |||
1059 | __set_bit(index, kpte_linear_bitmap); | ||
1060 | |||
1061 | start += size_256MB; | ||
1062 | remains -= size_256MB; | ||
1063 | } | ||
1064 | } | ||
1065 | } | ||
1389 | 1066 | ||
1390 | static void __init kernel_physical_mapping_init(void) | 1067 | static void __init kernel_physical_mapping_init(void) |
1391 | { | 1068 | { |
1392 | unsigned long i, mem_alloced = 0UL; | 1069 | unsigned long i; |
1070 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1071 | unsigned long mem_alloced = 0UL; | ||
1072 | #endif | ||
1393 | 1073 | ||
1394 | read_obp_memory("reg", &pall[0], &pall_ents); | 1074 | read_obp_memory("reg", &pall[0], &pall_ents); |
1395 | 1075 | ||
@@ -1398,10 +1078,16 @@ static void __init kernel_physical_mapping_init(void) | |||
1398 | 1078 | ||
1399 | phys_start = pall[i].phys_addr; | 1079 | phys_start = pall[i].phys_addr; |
1400 | phys_end = phys_start + pall[i].reg_size; | 1080 | phys_end = phys_start + pall[i].reg_size; |
1081 | |||
1082 | mark_kpte_bitmap(phys_start, phys_end); | ||
1083 | |||
1084 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1401 | mem_alloced += kernel_map_range(phys_start, phys_end, | 1085 | mem_alloced += kernel_map_range(phys_start, phys_end, |
1402 | PAGE_KERNEL); | 1086 | PAGE_KERNEL); |
1087 | #endif | ||
1403 | } | 1088 | } |
1404 | 1089 | ||
1090 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1405 | printk("Allocated %ld bytes for kernel page tables.\n", | 1091 | printk("Allocated %ld bytes for kernel page tables.\n", |
1406 | mem_alloced); | 1092 | mem_alloced); |
1407 | 1093 | ||
@@ -1409,8 +1095,10 @@ static void __init kernel_physical_mapping_init(void) | |||
1409 | flushi(&kvmap_linear_patch[0]); | 1095 | flushi(&kvmap_linear_patch[0]); |
1410 | 1096 | ||
1411 | __flush_tlb_all(); | 1097 | __flush_tlb_all(); |
1098 | #endif | ||
1412 | } | 1099 | } |
1413 | 1100 | ||
1101 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1414 | void kernel_map_pages(struct page *page, int numpages, int enable) | 1102 | void kernel_map_pages(struct page *page, int numpages, int enable) |
1415 | { | 1103 | { |
1416 | unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; | 1104 | unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; |
@@ -1419,6 +1107,9 @@ void kernel_map_pages(struct page *page, int numpages, int enable) | |||
1419 | kernel_map_range(phys_start, phys_end, | 1107 | kernel_map_range(phys_start, phys_end, |
1420 | (enable ? PAGE_KERNEL : __pgprot(0))); | 1108 | (enable ? PAGE_KERNEL : __pgprot(0))); |
1421 | 1109 | ||
1110 | flush_tsb_kernel_range(PAGE_OFFSET + phys_start, | ||
1111 | PAGE_OFFSET + phys_end); | ||
1112 | |||
1422 | /* we should perform an IPI and flush all tlbs, | 1113 | /* we should perform an IPI and flush all tlbs, |
1423 | * but that can deadlock->flush only current cpu. | 1114 | * but that can deadlock->flush only current cpu. |
1424 | */ | 1115 | */ |
@@ -1439,18 +1130,150 @@ unsigned long __init find_ecache_flush_span(unsigned long size) | |||
1439 | return ~0UL; | 1130 | return ~0UL; |
1440 | } | 1131 | } |
1441 | 1132 | ||
1133 | static void __init tsb_phys_patch(void) | ||
1134 | { | ||
1135 | struct tsb_ldquad_phys_patch_entry *pquad; | ||
1136 | struct tsb_phys_patch_entry *p; | ||
1137 | |||
1138 | pquad = &__tsb_ldquad_phys_patch; | ||
1139 | while (pquad < &__tsb_ldquad_phys_patch_end) { | ||
1140 | unsigned long addr = pquad->addr; | ||
1141 | |||
1142 | if (tlb_type == hypervisor) | ||
1143 | *(unsigned int *) addr = pquad->sun4v_insn; | ||
1144 | else | ||
1145 | *(unsigned int *) addr = pquad->sun4u_insn; | ||
1146 | wmb(); | ||
1147 | __asm__ __volatile__("flush %0" | ||
1148 | : /* no outputs */ | ||
1149 | : "r" (addr)); | ||
1150 | |||
1151 | pquad++; | ||
1152 | } | ||
1153 | |||
1154 | p = &__tsb_phys_patch; | ||
1155 | while (p < &__tsb_phys_patch_end) { | ||
1156 | unsigned long addr = p->addr; | ||
1157 | |||
1158 | *(unsigned int *) addr = p->insn; | ||
1159 | wmb(); | ||
1160 | __asm__ __volatile__("flush %0" | ||
1161 | : /* no outputs */ | ||
1162 | : "r" (addr)); | ||
1163 | |||
1164 | p++; | ||
1165 | } | ||
1166 | } | ||
1167 | |||
1168 | /* Don't mark as init, we give this to the Hypervisor. */ | ||
1169 | static struct hv_tsb_descr ktsb_descr[2]; | ||
1170 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; | ||
1171 | |||
1172 | static void __init sun4v_ktsb_init(void) | ||
1173 | { | ||
1174 | unsigned long ktsb_pa; | ||
1175 | |||
1176 | /* First KTSB for PAGE_SIZE mappings. */ | ||
1177 | ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); | ||
1178 | |||
1179 | switch (PAGE_SIZE) { | ||
1180 | case 8 * 1024: | ||
1181 | default: | ||
1182 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K; | ||
1183 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K; | ||
1184 | break; | ||
1185 | |||
1186 | case 64 * 1024: | ||
1187 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K; | ||
1188 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K; | ||
1189 | break; | ||
1190 | |||
1191 | case 512 * 1024: | ||
1192 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K; | ||
1193 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K; | ||
1194 | break; | ||
1195 | |||
1196 | case 4 * 1024 * 1024: | ||
1197 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; | ||
1198 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; | ||
1199 | break; | ||
1200 | }; | ||
1201 | |||
1202 | ktsb_descr[0].assoc = 1; | ||
1203 | ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; | ||
1204 | ktsb_descr[0].ctx_idx = 0; | ||
1205 | ktsb_descr[0].tsb_base = ktsb_pa; | ||
1206 | ktsb_descr[0].resv = 0; | ||
1207 | |||
1208 | /* Second KTSB for 4MB/256MB mappings. */ | ||
1209 | ktsb_pa = (kern_base + | ||
1210 | ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); | ||
1211 | |||
1212 | ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB; | ||
1213 | ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB | | ||
1214 | HV_PGSZ_MASK_256MB); | ||
1215 | ktsb_descr[1].assoc = 1; | ||
1216 | ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES; | ||
1217 | ktsb_descr[1].ctx_idx = 0; | ||
1218 | ktsb_descr[1].tsb_base = ktsb_pa; | ||
1219 | ktsb_descr[1].resv = 0; | ||
1220 | } | ||
1221 | |||
1222 | void __cpuinit sun4v_ktsb_register(void) | ||
1223 | { | ||
1224 | register unsigned long func asm("%o5"); | ||
1225 | register unsigned long arg0 asm("%o0"); | ||
1226 | register unsigned long arg1 asm("%o1"); | ||
1227 | unsigned long pa; | ||
1228 | |||
1229 | pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); | ||
1230 | |||
1231 | func = HV_FAST_MMU_TSB_CTX0; | ||
1232 | arg0 = 2; | ||
1233 | arg1 = pa; | ||
1234 | __asm__ __volatile__("ta %6" | ||
1235 | : "=&r" (func), "=&r" (arg0), "=&r" (arg1) | ||
1236 | : "0" (func), "1" (arg0), "2" (arg1), | ||
1237 | "i" (HV_FAST_TRAP)); | ||
1238 | } | ||
1239 | |||
1442 | /* paging_init() sets up the page tables */ | 1240 | /* paging_init() sets up the page tables */ |
1443 | 1241 | ||
1444 | extern void cheetah_ecache_flush_init(void); | 1242 | extern void cheetah_ecache_flush_init(void); |
1243 | extern void sun4v_patch_tlb_handlers(void); | ||
1445 | 1244 | ||
1446 | static unsigned long last_valid_pfn; | 1245 | static unsigned long last_valid_pfn; |
1447 | pgd_t swapper_pg_dir[2048]; | 1246 | pgd_t swapper_pg_dir[2048]; |
1448 | 1247 | ||
1248 | static void sun4u_pgprot_init(void); | ||
1249 | static void sun4v_pgprot_init(void); | ||
1250 | |||
1449 | void __init paging_init(void) | 1251 | void __init paging_init(void) |
1450 | { | 1252 | { |
1451 | unsigned long end_pfn, pages_avail, shift; | 1253 | unsigned long end_pfn, pages_avail, shift, phys_base; |
1452 | unsigned long real_end, i; | 1254 | unsigned long real_end, i; |
1453 | 1255 | ||
1256 | kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; | ||
1257 | kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; | ||
1258 | |||
1259 | /* Invalidate both kernel TSBs. */ | ||
1260 | memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); | ||
1261 | memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); | ||
1262 | |||
1263 | if (tlb_type == hypervisor) | ||
1264 | sun4v_pgprot_init(); | ||
1265 | else | ||
1266 | sun4u_pgprot_init(); | ||
1267 | |||
1268 | if (tlb_type == cheetah_plus || | ||
1269 | tlb_type == hypervisor) | ||
1270 | tsb_phys_patch(); | ||
1271 | |||
1272 | if (tlb_type == hypervisor) { | ||
1273 | sun4v_patch_tlb_handlers(); | ||
1274 | sun4v_ktsb_init(); | ||
1275 | } | ||
1276 | |||
1454 | /* Find available physical memory... */ | 1277 | /* Find available physical memory... */ |
1455 | read_obp_memory("available", &pavail[0], &pavail_ents); | 1278 | read_obp_memory("available", &pavail[0], &pavail_ents); |
1456 | 1279 | ||
@@ -1458,11 +1281,6 @@ void __init paging_init(void) | |||
1458 | for (i = 0; i < pavail_ents; i++) | 1281 | for (i = 0; i < pavail_ents; i++) |
1459 | phys_base = min(phys_base, pavail[i].phys_addr); | 1282 | phys_base = min(phys_base, pavail[i].phys_addr); |
1460 | 1283 | ||
1461 | pfn_base = phys_base >> PAGE_SHIFT; | ||
1462 | |||
1463 | kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; | ||
1464 | kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; | ||
1465 | |||
1466 | set_bit(0, mmu_context_bmap); | 1284 | set_bit(0, mmu_context_bmap); |
1467 | 1285 | ||
1468 | shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); | 1286 | shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); |
@@ -1486,47 +1304,38 @@ void __init paging_init(void) | |||
1486 | pud_set(pud_offset(&swapper_pg_dir[0], 0), | 1304 | pud_set(pud_offset(&swapper_pg_dir[0], 0), |
1487 | swapper_low_pmd_dir + (shift / sizeof(pgd_t))); | 1305 | swapper_low_pmd_dir + (shift / sizeof(pgd_t))); |
1488 | 1306 | ||
1489 | swapper_pgd_zero = pgd_val(swapper_pg_dir[0]); | ||
1490 | |||
1491 | inherit_prom_mappings(); | 1307 | inherit_prom_mappings(); |
1492 | 1308 | ||
1493 | /* Ok, we can use our TLB miss and window trap handlers safely. | 1309 | /* Ok, we can use our TLB miss and window trap handlers safely. */ |
1494 | * We need to do a quick peek here to see if we are on StarFire | 1310 | setup_tba(); |
1495 | * or not, so setup_tba can setup the IRQ globals correctly (it | ||
1496 | * needs to get the hard smp processor id correctly). | ||
1497 | */ | ||
1498 | { | ||
1499 | extern void setup_tba(int); | ||
1500 | setup_tba(this_is_starfire); | ||
1501 | } | ||
1502 | |||
1503 | inherit_locked_prom_mappings(1); | ||
1504 | 1311 | ||
1505 | __flush_tlb_all(); | 1312 | __flush_tlb_all(); |
1506 | 1313 | ||
1314 | if (tlb_type == hypervisor) | ||
1315 | sun4v_ktsb_register(); | ||
1316 | |||
1507 | /* Setup bootmem... */ | 1317 | /* Setup bootmem... */ |
1508 | pages_avail = 0; | 1318 | pages_avail = 0; |
1509 | last_valid_pfn = end_pfn = bootmem_init(&pages_avail); | 1319 | last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base); |
1320 | |||
1321 | max_mapnr = last_valid_pfn; | ||
1510 | 1322 | ||
1511 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1512 | kernel_physical_mapping_init(); | 1323 | kernel_physical_mapping_init(); |
1513 | #endif | ||
1514 | 1324 | ||
1515 | { | 1325 | { |
1516 | unsigned long zones_size[MAX_NR_ZONES]; | 1326 | unsigned long zones_size[MAX_NR_ZONES]; |
1517 | unsigned long zholes_size[MAX_NR_ZONES]; | 1327 | unsigned long zholes_size[MAX_NR_ZONES]; |
1518 | unsigned long npages; | ||
1519 | int znum; | 1328 | int znum; |
1520 | 1329 | ||
1521 | for (znum = 0; znum < MAX_NR_ZONES; znum++) | 1330 | for (znum = 0; znum < MAX_NR_ZONES; znum++) |
1522 | zones_size[znum] = zholes_size[znum] = 0; | 1331 | zones_size[znum] = zholes_size[znum] = 0; |
1523 | 1332 | ||
1524 | npages = end_pfn - pfn_base; | 1333 | zones_size[ZONE_DMA] = end_pfn; |
1525 | zones_size[ZONE_DMA] = npages; | 1334 | zholes_size[ZONE_DMA] = end_pfn - pages_avail; |
1526 | zholes_size[ZONE_DMA] = npages - pages_avail; | ||
1527 | 1335 | ||
1528 | free_area_init_node(0, &contig_page_data, zones_size, | 1336 | free_area_init_node(0, &contig_page_data, zones_size, |
1529 | phys_base >> PAGE_SHIFT, zholes_size); | 1337 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, |
1338 | zholes_size); | ||
1530 | } | 1339 | } |
1531 | 1340 | ||
1532 | device_scan(); | 1341 | device_scan(); |
@@ -1596,7 +1405,6 @@ void __init mem_init(void) | |||
1596 | 1405 | ||
1597 | taint_real_pages(); | 1406 | taint_real_pages(); |
1598 | 1407 | ||
1599 | max_mapnr = last_valid_pfn - pfn_base; | ||
1600 | high_memory = __va(last_valid_pfn << PAGE_SHIFT); | 1408 | high_memory = __va(last_valid_pfn << PAGE_SHIFT); |
1601 | 1409 | ||
1602 | #ifdef CONFIG_DEBUG_BOOTMEM | 1410 | #ifdef CONFIG_DEBUG_BOOTMEM |
@@ -1676,3 +1484,342 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
1676 | } | 1484 | } |
1677 | } | 1485 | } |
1678 | #endif | 1486 | #endif |
1487 | |||
1488 | #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) | ||
1489 | #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) | ||
1490 | #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) | ||
1491 | #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) | ||
1492 | #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) | ||
1493 | #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) | ||
1494 | |||
1495 | pgprot_t PAGE_KERNEL __read_mostly; | ||
1496 | EXPORT_SYMBOL(PAGE_KERNEL); | ||
1497 | |||
1498 | pgprot_t PAGE_KERNEL_LOCKED __read_mostly; | ||
1499 | pgprot_t PAGE_COPY __read_mostly; | ||
1500 | |||
1501 | pgprot_t PAGE_SHARED __read_mostly; | ||
1502 | EXPORT_SYMBOL(PAGE_SHARED); | ||
1503 | |||
1504 | pgprot_t PAGE_EXEC __read_mostly; | ||
1505 | unsigned long pg_iobits __read_mostly; | ||
1506 | |||
1507 | unsigned long _PAGE_IE __read_mostly; | ||
1508 | |||
1509 | unsigned long _PAGE_E __read_mostly; | ||
1510 | EXPORT_SYMBOL(_PAGE_E); | ||
1511 | |||
1512 | unsigned long _PAGE_CACHE __read_mostly; | ||
1513 | EXPORT_SYMBOL(_PAGE_CACHE); | ||
1514 | |||
1515 | static void prot_init_common(unsigned long page_none, | ||
1516 | unsigned long page_shared, | ||
1517 | unsigned long page_copy, | ||
1518 | unsigned long page_readonly, | ||
1519 | unsigned long page_exec_bit) | ||
1520 | { | ||
1521 | PAGE_COPY = __pgprot(page_copy); | ||
1522 | PAGE_SHARED = __pgprot(page_shared); | ||
1523 | |||
1524 | protection_map[0x0] = __pgprot(page_none); | ||
1525 | protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); | ||
1526 | protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit); | ||
1527 | protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit); | ||
1528 | protection_map[0x4] = __pgprot(page_readonly); | ||
1529 | protection_map[0x5] = __pgprot(page_readonly); | ||
1530 | protection_map[0x6] = __pgprot(page_copy); | ||
1531 | protection_map[0x7] = __pgprot(page_copy); | ||
1532 | protection_map[0x8] = __pgprot(page_none); | ||
1533 | protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit); | ||
1534 | protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit); | ||
1535 | protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit); | ||
1536 | protection_map[0xc] = __pgprot(page_readonly); | ||
1537 | protection_map[0xd] = __pgprot(page_readonly); | ||
1538 | protection_map[0xe] = __pgprot(page_shared); | ||
1539 | protection_map[0xf] = __pgprot(page_shared); | ||
1540 | } | ||
1541 | |||
1542 | static void __init sun4u_pgprot_init(void) | ||
1543 | { | ||
1544 | unsigned long page_none, page_shared, page_copy, page_readonly; | ||
1545 | unsigned long page_exec_bit; | ||
1546 | |||
1547 | PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | | ||
1548 | _PAGE_CACHE_4U | _PAGE_P_4U | | ||
1549 | __ACCESS_BITS_4U | __DIRTY_BITS_4U | | ||
1550 | _PAGE_EXEC_4U); | ||
1551 | PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | | ||
1552 | _PAGE_CACHE_4U | _PAGE_P_4U | | ||
1553 | __ACCESS_BITS_4U | __DIRTY_BITS_4U | | ||
1554 | _PAGE_EXEC_4U | _PAGE_L_4U); | ||
1555 | PAGE_EXEC = __pgprot(_PAGE_EXEC_4U); | ||
1556 | |||
1557 | _PAGE_IE = _PAGE_IE_4U; | ||
1558 | _PAGE_E = _PAGE_E_4U; | ||
1559 | _PAGE_CACHE = _PAGE_CACHE_4U; | ||
1560 | |||
1561 | pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | | ||
1562 | __ACCESS_BITS_4U | _PAGE_E_4U); | ||
1563 | |||
1564 | kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ | ||
1565 | 0xfffff80000000000; | ||
1566 | kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | | ||
1567 | _PAGE_P_4U | _PAGE_W_4U); | ||
1568 | |||
1569 | /* XXX Should use 256MB on Panther. XXX */ | ||
1570 | kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; | ||
1571 | |||
1572 | _PAGE_SZBITS = _PAGE_SZBITS_4U; | ||
1573 | _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | | ||
1574 | _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | | ||
1575 | _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); | ||
1576 | |||
1577 | |||
1578 | page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; | ||
1579 | page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | | ||
1580 | __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U); | ||
1581 | page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | | ||
1582 | __ACCESS_BITS_4U | _PAGE_EXEC_4U); | ||
1583 | page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | | ||
1584 | __ACCESS_BITS_4U | _PAGE_EXEC_4U); | ||
1585 | |||
1586 | page_exec_bit = _PAGE_EXEC_4U; | ||
1587 | |||
1588 | prot_init_common(page_none, page_shared, page_copy, page_readonly, | ||
1589 | page_exec_bit); | ||
1590 | } | ||
1591 | |||
1592 | static void __init sun4v_pgprot_init(void) | ||
1593 | { | ||
1594 | unsigned long page_none, page_shared, page_copy, page_readonly; | ||
1595 | unsigned long page_exec_bit; | ||
1596 | |||
1597 | PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | | ||
1598 | _PAGE_CACHE_4V | _PAGE_P_4V | | ||
1599 | __ACCESS_BITS_4V | __DIRTY_BITS_4V | | ||
1600 | _PAGE_EXEC_4V); | ||
1601 | PAGE_KERNEL_LOCKED = PAGE_KERNEL; | ||
1602 | PAGE_EXEC = __pgprot(_PAGE_EXEC_4V); | ||
1603 | |||
1604 | _PAGE_IE = _PAGE_IE_4V; | ||
1605 | _PAGE_E = _PAGE_E_4V; | ||
1606 | _PAGE_CACHE = _PAGE_CACHE_4V; | ||
1607 | |||
1608 | kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ | ||
1609 | 0xfffff80000000000; | ||
1610 | kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | | ||
1611 | _PAGE_P_4V | _PAGE_W_4V); | ||
1612 | |||
1613 | kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ | ||
1614 | 0xfffff80000000000; | ||
1615 | kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | | ||
1616 | _PAGE_P_4V | _PAGE_W_4V); | ||
1617 | |||
1618 | pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | | ||
1619 | __ACCESS_BITS_4V | _PAGE_E_4V); | ||
1620 | |||
1621 | _PAGE_SZBITS = _PAGE_SZBITS_4V; | ||
1622 | _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | | ||
1623 | _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | | ||
1624 | _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | | ||
1625 | _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); | ||
1626 | |||
1627 | page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; | ||
1628 | page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | | ||
1629 | __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); | ||
1630 | page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | | ||
1631 | __ACCESS_BITS_4V | _PAGE_EXEC_4V); | ||
1632 | page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | | ||
1633 | __ACCESS_BITS_4V | _PAGE_EXEC_4V); | ||
1634 | |||
1635 | page_exec_bit = _PAGE_EXEC_4V; | ||
1636 | |||
1637 | prot_init_common(page_none, page_shared, page_copy, page_readonly, | ||
1638 | page_exec_bit); | ||
1639 | } | ||
1640 | |||
1641 | unsigned long pte_sz_bits(unsigned long sz) | ||
1642 | { | ||
1643 | if (tlb_type == hypervisor) { | ||
1644 | switch (sz) { | ||
1645 | case 8 * 1024: | ||
1646 | default: | ||
1647 | return _PAGE_SZ8K_4V; | ||
1648 | case 64 * 1024: | ||
1649 | return _PAGE_SZ64K_4V; | ||
1650 | case 512 * 1024: | ||
1651 | return _PAGE_SZ512K_4V; | ||
1652 | case 4 * 1024 * 1024: | ||
1653 | return _PAGE_SZ4MB_4V; | ||
1654 | }; | ||
1655 | } else { | ||
1656 | switch (sz) { | ||
1657 | case 8 * 1024: | ||
1658 | default: | ||
1659 | return _PAGE_SZ8K_4U; | ||
1660 | case 64 * 1024: | ||
1661 | return _PAGE_SZ64K_4U; | ||
1662 | case 512 * 1024: | ||
1663 | return _PAGE_SZ512K_4U; | ||
1664 | case 4 * 1024 * 1024: | ||
1665 | return _PAGE_SZ4MB_4U; | ||
1666 | }; | ||
1667 | } | ||
1668 | } | ||
1669 | |||
1670 | pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) | ||
1671 | { | ||
1672 | pte_t pte; | ||
1673 | |||
1674 | pte_val(pte) = page | pgprot_val(pgprot_noncached(prot)); | ||
1675 | pte_val(pte) |= (((unsigned long)space) << 32); | ||
1676 | pte_val(pte) |= pte_sz_bits(page_size); | ||
1677 | |||
1678 | return pte; | ||
1679 | } | ||
1680 | |||
1681 | static unsigned long kern_large_tte(unsigned long paddr) | ||
1682 | { | ||
1683 | unsigned long val; | ||
1684 | |||
1685 | val = (_PAGE_VALID | _PAGE_SZ4MB_4U | | ||
1686 | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | | ||
1687 | _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); | ||
1688 | if (tlb_type == hypervisor) | ||
1689 | val = (_PAGE_VALID | _PAGE_SZ4MB_4V | | ||
1690 | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | | ||
1691 | _PAGE_EXEC_4V | _PAGE_W_4V); | ||
1692 | |||
1693 | return val | paddr; | ||
1694 | } | ||
1695 | |||
1696 | /* | ||
1697 | * Translate PROM's mapping we capture at boot time into physical address. | ||
1698 | * The second parameter is only set from prom_callback() invocations. | ||
1699 | */ | ||
1700 | unsigned long prom_virt_to_phys(unsigned long promva, int *error) | ||
1701 | { | ||
1702 | unsigned long mask; | ||
1703 | int i; | ||
1704 | |||
1705 | mask = _PAGE_PADDR_4U; | ||
1706 | if (tlb_type == hypervisor) | ||
1707 | mask = _PAGE_PADDR_4V; | ||
1708 | |||
1709 | for (i = 0; i < prom_trans_ents; i++) { | ||
1710 | struct linux_prom_translation *p = &prom_trans[i]; | ||
1711 | |||
1712 | if (promva >= p->virt && | ||
1713 | promva < (p->virt + p->size)) { | ||
1714 | unsigned long base = p->data & mask; | ||
1715 | |||
1716 | if (error) | ||
1717 | *error = 0; | ||
1718 | return base + (promva & (8192 - 1)); | ||
1719 | } | ||
1720 | } | ||
1721 | if (error) | ||
1722 | *error = 1; | ||
1723 | return 0UL; | ||
1724 | } | ||
1725 | |||
1726 | /* XXX We should kill off this ugly thing at so me point. XXX */ | ||
1727 | unsigned long sun4u_get_pte(unsigned long addr) | ||
1728 | { | ||
1729 | pgd_t *pgdp; | ||
1730 | pud_t *pudp; | ||
1731 | pmd_t *pmdp; | ||
1732 | pte_t *ptep; | ||
1733 | unsigned long mask = _PAGE_PADDR_4U; | ||
1734 | |||
1735 | if (tlb_type == hypervisor) | ||
1736 | mask = _PAGE_PADDR_4V; | ||
1737 | |||
1738 | if (addr >= PAGE_OFFSET) | ||
1739 | return addr & mask; | ||
1740 | |||
1741 | if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS)) | ||
1742 | return prom_virt_to_phys(addr, NULL); | ||
1743 | |||
1744 | pgdp = pgd_offset_k(addr); | ||
1745 | pudp = pud_offset(pgdp, addr); | ||
1746 | pmdp = pmd_offset(pudp, addr); | ||
1747 | ptep = pte_offset_kernel(pmdp, addr); | ||
1748 | |||
1749 | return pte_val(*ptep) & mask; | ||
1750 | } | ||
1751 | |||
1752 | /* If not locked, zap it. */ | ||
1753 | void __flush_tlb_all(void) | ||
1754 | { | ||
1755 | unsigned long pstate; | ||
1756 | int i; | ||
1757 | |||
1758 | __asm__ __volatile__("flushw\n\t" | ||
1759 | "rdpr %%pstate, %0\n\t" | ||
1760 | "wrpr %0, %1, %%pstate" | ||
1761 | : "=r" (pstate) | ||
1762 | : "i" (PSTATE_IE)); | ||
1763 | if (tlb_type == spitfire) { | ||
1764 | for (i = 0; i < 64; i++) { | ||
1765 | /* Spitfire Errata #32 workaround */ | ||
1766 | /* NOTE: Always runs on spitfire, so no | ||
1767 | * cheetah+ page size encodings. | ||
1768 | */ | ||
1769 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
1770 | "flush %%g6" | ||
1771 | : /* No outputs */ | ||
1772 | : "r" (0), | ||
1773 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
1774 | |||
1775 | if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) { | ||
1776 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
1777 | "membar #Sync" | ||
1778 | : /* no outputs */ | ||
1779 | : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | ||
1780 | spitfire_put_dtlb_data(i, 0x0UL); | ||
1781 | } | ||
1782 | |||
1783 | /* Spitfire Errata #32 workaround */ | ||
1784 | /* NOTE: Always runs on spitfire, so no | ||
1785 | * cheetah+ page size encodings. | ||
1786 | */ | ||
1787 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
1788 | "flush %%g6" | ||
1789 | : /* No outputs */ | ||
1790 | : "r" (0), | ||
1791 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
1792 | |||
1793 | if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) { | ||
1794 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
1795 | "membar #Sync" | ||
1796 | : /* no outputs */ | ||
1797 | : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); | ||
1798 | spitfire_put_itlb_data(i, 0x0UL); | ||
1799 | } | ||
1800 | } | ||
1801 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
1802 | cheetah_flush_dtlb_all(); | ||
1803 | cheetah_flush_itlb_all(); | ||
1804 | } | ||
1805 | __asm__ __volatile__("wrpr %0, 0, %%pstate" | ||
1806 | : : "r" (pstate)); | ||
1807 | } | ||
1808 | |||
1809 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
1810 | |||
1811 | void online_page(struct page *page) | ||
1812 | { | ||
1813 | ClearPageReserved(page); | ||
1814 | set_page_count(page, 0); | ||
1815 | free_cold_page(page); | ||
1816 | totalram_pages++; | ||
1817 | num_physpages++; | ||
1818 | } | ||
1819 | |||
1820 | int remove_memory(u64 start, u64 size) | ||
1821 | { | ||
1822 | return -EINVAL; | ||
1823 | } | ||
1824 | |||
1825 | #endif /* CONFIG_MEMORY_HOTPLUG */ | ||
diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c index 8b104be4662b..a079cf42505e 100644 --- a/arch/sparc64/mm/tlb.c +++ b/arch/sparc64/mm/tlb.c | |||
@@ -25,6 +25,8 @@ void flush_tlb_pending(void) | |||
25 | struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); | 25 | struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); |
26 | 26 | ||
27 | if (mp->tlb_nr) { | 27 | if (mp->tlb_nr) { |
28 | flush_tsb_user(mp); | ||
29 | |||
28 | if (CTX_VALID(mp->mm->context)) { | 30 | if (CTX_VALID(mp->mm->context)) { |
29 | #ifdef CONFIG_SMP | 31 | #ifdef CONFIG_SMP |
30 | smp_flush_tlb_pending(mp->mm, mp->tlb_nr, | 32 | smp_flush_tlb_pending(mp->mm, mp->tlb_nr, |
@@ -47,7 +49,8 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t | |||
47 | if (pte_exec(orig)) | 49 | if (pte_exec(orig)) |
48 | vaddr |= 0x1UL; | 50 | vaddr |= 0x1UL; |
49 | 51 | ||
50 | if (pte_dirty(orig)) { | 52 | if (tlb_type != hypervisor && |
53 | pte_dirty(orig)) { | ||
51 | unsigned long paddr, pfn = pte_pfn(orig); | 54 | unsigned long paddr, pfn = pte_pfn(orig); |
52 | struct address_space *mapping; | 55 | struct address_space *mapping; |
53 | struct page *page; | 56 | struct page *page; |
@@ -89,62 +92,3 @@ no_cache_flush: | |||
89 | if (nr >= TLB_BATCH_NR) | 92 | if (nr >= TLB_BATCH_NR) |
90 | flush_tlb_pending(); | 93 | flush_tlb_pending(); |
91 | } | 94 | } |
92 | |||
93 | void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) | ||
94 | { | ||
95 | struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); | ||
96 | unsigned long nr = mp->tlb_nr; | ||
97 | long s = start, e = end, vpte_base; | ||
98 | |||
99 | if (mp->fullmm) | ||
100 | return; | ||
101 | |||
102 | /* If start is greater than end, that is a real problem. */ | ||
103 | BUG_ON(start > end); | ||
104 | |||
105 | /* However, straddling the VA space hole is quite normal. */ | ||
106 | s &= PMD_MASK; | ||
107 | e = (e + PMD_SIZE - 1) & PMD_MASK; | ||
108 | |||
109 | vpte_base = (tlb_type == spitfire ? | ||
110 | VPTE_BASE_SPITFIRE : | ||
111 | VPTE_BASE_CHEETAH); | ||
112 | |||
113 | if (unlikely(nr != 0 && mm != mp->mm)) { | ||
114 | flush_tlb_pending(); | ||
115 | nr = 0; | ||
116 | } | ||
117 | |||
118 | if (nr == 0) | ||
119 | mp->mm = mm; | ||
120 | |||
121 | start = vpte_base + (s >> (PAGE_SHIFT - 3)); | ||
122 | end = vpte_base + (e >> (PAGE_SHIFT - 3)); | ||
123 | |||
124 | /* If the request straddles the VA space hole, we | ||
125 | * need to swap start and end. The reason this | ||
126 | * occurs is that "vpte_base" is the center of | ||
127 | * the linear page table mapping area. Thus, | ||
128 | * high addresses with the sign bit set map to | ||
129 | * addresses below vpte_base and non-sign bit | ||
130 | * addresses map to addresses above vpte_base. | ||
131 | */ | ||
132 | if (end < start) { | ||
133 | unsigned long tmp = start; | ||
134 | |||
135 | start = end; | ||
136 | end = tmp; | ||
137 | } | ||
138 | |||
139 | while (start < end) { | ||
140 | mp->vaddrs[nr] = start; | ||
141 | mp->tlb_nr = ++nr; | ||
142 | if (nr >= TLB_BATCH_NR) { | ||
143 | flush_tlb_pending(); | ||
144 | nr = 0; | ||
145 | } | ||
146 | start += PAGE_SIZE; | ||
147 | } | ||
148 | if (nr) | ||
149 | flush_tlb_pending(); | ||
150 | } | ||
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c new file mode 100644 index 000000000000..b2064e2a44d6 --- /dev/null +++ b/arch/sparc64/mm/tsb.c | |||
@@ -0,0 +1,440 @@ | |||
1 | /* arch/sparc64/mm/tsb.c | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <asm/system.h> | ||
8 | #include <asm/page.h> | ||
9 | #include <asm/tlbflush.h> | ||
10 | #include <asm/tlb.h> | ||
11 | #include <asm/mmu_context.h> | ||
12 | #include <asm/pgtable.h> | ||
13 | #include <asm/tsb.h> | ||
14 | #include <asm/oplib.h> | ||
15 | |||
16 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; | ||
17 | |||
18 | static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries) | ||
19 | { | ||
20 | vaddr >>= PAGE_SHIFT; | ||
21 | return vaddr & (nentries - 1); | ||
22 | } | ||
23 | |||
24 | static inline int tag_compare(unsigned long tag, unsigned long vaddr) | ||
25 | { | ||
26 | return (tag == (vaddr >> 22)); | ||
27 | } | ||
28 | |||
29 | /* TSB flushes need only occur on the processor initiating the address | ||
30 | * space modification, not on each cpu the address space has run on. | ||
31 | * Only the TLB flush needs that treatment. | ||
32 | */ | ||
33 | |||
34 | void flush_tsb_kernel_range(unsigned long start, unsigned long end) | ||
35 | { | ||
36 | unsigned long v; | ||
37 | |||
38 | for (v = start; v < end; v += PAGE_SIZE) { | ||
39 | unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES); | ||
40 | struct tsb *ent = &swapper_tsb[hash]; | ||
41 | |||
42 | if (tag_compare(ent->tag, v)) { | ||
43 | ent->tag = (1UL << TSB_TAG_INVALID_BIT); | ||
44 | membar_storeload_storestore(); | ||
45 | } | ||
46 | } | ||
47 | } | ||
48 | |||
49 | void flush_tsb_user(struct mmu_gather *mp) | ||
50 | { | ||
51 | struct mm_struct *mm = mp->mm; | ||
52 | unsigned long nentries, base, flags; | ||
53 | struct tsb *tsb; | ||
54 | int i; | ||
55 | |||
56 | spin_lock_irqsave(&mm->context.lock, flags); | ||
57 | |||
58 | tsb = mm->context.tsb; | ||
59 | nentries = mm->context.tsb_nentries; | ||
60 | |||
61 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | ||
62 | base = __pa(tsb); | ||
63 | else | ||
64 | base = (unsigned long) tsb; | ||
65 | |||
66 | for (i = 0; i < mp->tlb_nr; i++) { | ||
67 | unsigned long v = mp->vaddrs[i]; | ||
68 | unsigned long tag, ent, hash; | ||
69 | |||
70 | v &= ~0x1UL; | ||
71 | |||
72 | hash = tsb_hash(v, nentries); | ||
73 | ent = base + (hash * sizeof(struct tsb)); | ||
74 | tag = (v >> 22UL); | ||
75 | |||
76 | tsb_flush(ent, tag); | ||
77 | } | ||
78 | |||
79 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
80 | } | ||
81 | |||
82 | static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) | ||
83 | { | ||
84 | unsigned long tsb_reg, base, tsb_paddr; | ||
85 | unsigned long page_sz, tte; | ||
86 | |||
87 | mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb); | ||
88 | |||
89 | base = TSBMAP_BASE; | ||
90 | tte = pgprot_val(PAGE_KERNEL_LOCKED); | ||
91 | tsb_paddr = __pa(mm->context.tsb); | ||
92 | BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); | ||
93 | |||
94 | /* Use the smallest page size that can map the whole TSB | ||
95 | * in one TLB entry. | ||
96 | */ | ||
97 | switch (tsb_bytes) { | ||
98 | case 8192 << 0: | ||
99 | tsb_reg = 0x0UL; | ||
100 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
101 | base += (tsb_paddr & 8192); | ||
102 | #endif | ||
103 | page_sz = 8192; | ||
104 | break; | ||
105 | |||
106 | case 8192 << 1: | ||
107 | tsb_reg = 0x1UL; | ||
108 | page_sz = 64 * 1024; | ||
109 | break; | ||
110 | |||
111 | case 8192 << 2: | ||
112 | tsb_reg = 0x2UL; | ||
113 | page_sz = 64 * 1024; | ||
114 | break; | ||
115 | |||
116 | case 8192 << 3: | ||
117 | tsb_reg = 0x3UL; | ||
118 | page_sz = 64 * 1024; | ||
119 | break; | ||
120 | |||
121 | case 8192 << 4: | ||
122 | tsb_reg = 0x4UL; | ||
123 | page_sz = 512 * 1024; | ||
124 | break; | ||
125 | |||
126 | case 8192 << 5: | ||
127 | tsb_reg = 0x5UL; | ||
128 | page_sz = 512 * 1024; | ||
129 | break; | ||
130 | |||
131 | case 8192 << 6: | ||
132 | tsb_reg = 0x6UL; | ||
133 | page_sz = 512 * 1024; | ||
134 | break; | ||
135 | |||
136 | case 8192 << 7: | ||
137 | tsb_reg = 0x7UL; | ||
138 | page_sz = 4 * 1024 * 1024; | ||
139 | break; | ||
140 | |||
141 | default: | ||
142 | BUG(); | ||
143 | }; | ||
144 | tte |= pte_sz_bits(page_sz); | ||
145 | |||
146 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) { | ||
147 | /* Physical mapping, no locked TLB entry for TSB. */ | ||
148 | tsb_reg |= tsb_paddr; | ||
149 | |||
150 | mm->context.tsb_reg_val = tsb_reg; | ||
151 | mm->context.tsb_map_vaddr = 0; | ||
152 | mm->context.tsb_map_pte = 0; | ||
153 | } else { | ||
154 | tsb_reg |= base; | ||
155 | tsb_reg |= (tsb_paddr & (page_sz - 1UL)); | ||
156 | tte |= (tsb_paddr & ~(page_sz - 1UL)); | ||
157 | |||
158 | mm->context.tsb_reg_val = tsb_reg; | ||
159 | mm->context.tsb_map_vaddr = base; | ||
160 | mm->context.tsb_map_pte = tte; | ||
161 | } | ||
162 | |||
163 | /* Setup the Hypervisor TSB descriptor. */ | ||
164 | if (tlb_type == hypervisor) { | ||
165 | struct hv_tsb_descr *hp = &mm->context.tsb_descr; | ||
166 | |||
167 | switch (PAGE_SIZE) { | ||
168 | case 8192: | ||
169 | default: | ||
170 | hp->pgsz_idx = HV_PGSZ_IDX_8K; | ||
171 | break; | ||
172 | |||
173 | case 64 * 1024: | ||
174 | hp->pgsz_idx = HV_PGSZ_IDX_64K; | ||
175 | break; | ||
176 | |||
177 | case 512 * 1024: | ||
178 | hp->pgsz_idx = HV_PGSZ_IDX_512K; | ||
179 | break; | ||
180 | |||
181 | case 4 * 1024 * 1024: | ||
182 | hp->pgsz_idx = HV_PGSZ_IDX_4MB; | ||
183 | break; | ||
184 | }; | ||
185 | hp->assoc = 1; | ||
186 | hp->num_ttes = tsb_bytes / 16; | ||
187 | hp->ctx_idx = 0; | ||
188 | switch (PAGE_SIZE) { | ||
189 | case 8192: | ||
190 | default: | ||
191 | hp->pgsz_mask = HV_PGSZ_MASK_8K; | ||
192 | break; | ||
193 | |||
194 | case 64 * 1024: | ||
195 | hp->pgsz_mask = HV_PGSZ_MASK_64K; | ||
196 | break; | ||
197 | |||
198 | case 512 * 1024: | ||
199 | hp->pgsz_mask = HV_PGSZ_MASK_512K; | ||
200 | break; | ||
201 | |||
202 | case 4 * 1024 * 1024: | ||
203 | hp->pgsz_mask = HV_PGSZ_MASK_4MB; | ||
204 | break; | ||
205 | }; | ||
206 | hp->tsb_base = tsb_paddr; | ||
207 | hp->resv = 0; | ||
208 | } | ||
209 | } | ||
210 | |||
211 | static kmem_cache_t *tsb_caches[8] __read_mostly; | ||
212 | |||
213 | static const char *tsb_cache_names[8] = { | ||
214 | "tsb_8KB", | ||
215 | "tsb_16KB", | ||
216 | "tsb_32KB", | ||
217 | "tsb_64KB", | ||
218 | "tsb_128KB", | ||
219 | "tsb_256KB", | ||
220 | "tsb_512KB", | ||
221 | "tsb_1MB", | ||
222 | }; | ||
223 | |||
224 | void __init tsb_cache_init(void) | ||
225 | { | ||
226 | unsigned long i; | ||
227 | |||
228 | for (i = 0; i < 8; i++) { | ||
229 | unsigned long size = 8192 << i; | ||
230 | const char *name = tsb_cache_names[i]; | ||
231 | |||
232 | tsb_caches[i] = kmem_cache_create(name, | ||
233 | size, size, | ||
234 | SLAB_HWCACHE_ALIGN | | ||
235 | SLAB_MUST_HWCACHE_ALIGN, | ||
236 | NULL, NULL); | ||
237 | if (!tsb_caches[i]) { | ||
238 | prom_printf("Could not create %s cache\n", name); | ||
239 | prom_halt(); | ||
240 | } | ||
241 | } | ||
242 | } | ||
243 | |||
244 | /* When the RSS of an address space exceeds mm->context.tsb_rss_limit, | ||
245 | * do_sparc64_fault() invokes this routine to try and grow the TSB. | ||
246 | * | ||
247 | * When we reach the maximum TSB size supported, we stick ~0UL into | ||
248 | * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache() | ||
249 | * will not trigger any longer. | ||
250 | * | ||
251 | * The TSB can be anywhere from 8K to 1MB in size, in increasing powers | ||
252 | * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB | ||
253 | * must be 512K aligned. It also must be physically contiguous, so we | ||
254 | * cannot use vmalloc(). | ||
255 | * | ||
256 | * The idea here is to grow the TSB when the RSS of the process approaches | ||
257 | * the number of entries that the current TSB can hold at once. Currently, | ||
258 | * we trigger when the RSS hits 3/4 of the TSB capacity. | ||
259 | */ | ||
260 | void tsb_grow(struct mm_struct *mm, unsigned long rss) | ||
261 | { | ||
262 | unsigned long max_tsb_size = 1 * 1024 * 1024; | ||
263 | unsigned long new_size, old_size, flags; | ||
264 | struct tsb *old_tsb, *new_tsb; | ||
265 | unsigned long new_cache_index, old_cache_index; | ||
266 | unsigned long new_rss_limit; | ||
267 | gfp_t gfp_flags; | ||
268 | |||
269 | if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) | ||
270 | max_tsb_size = (PAGE_SIZE << MAX_ORDER); | ||
271 | |||
272 | new_cache_index = 0; | ||
273 | for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) { | ||
274 | unsigned long n_entries = new_size / sizeof(struct tsb); | ||
275 | |||
276 | n_entries = (n_entries * 3) / 4; | ||
277 | if (n_entries > rss) | ||
278 | break; | ||
279 | |||
280 | new_cache_index++; | ||
281 | } | ||
282 | |||
283 | if (new_size == max_tsb_size) | ||
284 | new_rss_limit = ~0UL; | ||
285 | else | ||
286 | new_rss_limit = ((new_size / sizeof(struct tsb)) * 3) / 4; | ||
287 | |||
288 | retry_tsb_alloc: | ||
289 | gfp_flags = GFP_KERNEL; | ||
290 | if (new_size > (PAGE_SIZE * 2)) | ||
291 | gfp_flags = __GFP_NOWARN | __GFP_NORETRY; | ||
292 | |||
293 | new_tsb = kmem_cache_alloc(tsb_caches[new_cache_index], gfp_flags); | ||
294 | if (unlikely(!new_tsb)) { | ||
295 | /* Not being able to fork due to a high-order TSB | ||
296 | * allocation failure is very bad behavior. Just back | ||
297 | * down to a 0-order allocation and force no TSB | ||
298 | * growing for this address space. | ||
299 | */ | ||
300 | if (mm->context.tsb == NULL && new_cache_index > 0) { | ||
301 | new_cache_index = 0; | ||
302 | new_size = 8192; | ||
303 | new_rss_limit = ~0UL; | ||
304 | goto retry_tsb_alloc; | ||
305 | } | ||
306 | |||
307 | /* If we failed on a TSB grow, we are under serious | ||
308 | * memory pressure so don't try to grow any more. | ||
309 | */ | ||
310 | if (mm->context.tsb != NULL) | ||
311 | mm->context.tsb_rss_limit = ~0UL; | ||
312 | return; | ||
313 | } | ||
314 | |||
315 | /* Mark all tags as invalid. */ | ||
316 | tsb_init(new_tsb, new_size); | ||
317 | |||
318 | /* Ok, we are about to commit the changes. If we are | ||
319 | * growing an existing TSB the locking is very tricky, | ||
320 | * so WATCH OUT! | ||
321 | * | ||
322 | * We have to hold mm->context.lock while committing to the | ||
323 | * new TSB, this synchronizes us with processors in | ||
324 | * flush_tsb_user() and switch_mm() for this address space. | ||
325 | * | ||
326 | * But even with that lock held, processors run asynchronously | ||
327 | * accessing the old TSB via TLB miss handling. This is OK | ||
328 | * because those actions are just propagating state from the | ||
329 | * Linux page tables into the TSB, page table mappings are not | ||
330 | * being changed. If a real fault occurs, the processor will | ||
331 | * synchronize with us when it hits flush_tsb_user(), this is | ||
332 | * also true for the case where vmscan is modifying the page | ||
333 | * tables. The only thing we need to be careful with is to | ||
334 | * skip any locked TSB entries during copy_tsb(). | ||
335 | * | ||
336 | * When we finish committing to the new TSB, we have to drop | ||
337 | * the lock and ask all other cpus running this address space | ||
338 | * to run tsb_context_switch() to see the new TSB table. | ||
339 | */ | ||
340 | spin_lock_irqsave(&mm->context.lock, flags); | ||
341 | |||
342 | old_tsb = mm->context.tsb; | ||
343 | old_cache_index = (mm->context.tsb_reg_val & 0x7UL); | ||
344 | old_size = mm->context.tsb_nentries * sizeof(struct tsb); | ||
345 | |||
346 | |||
347 | /* Handle multiple threads trying to grow the TSB at the same time. | ||
348 | * One will get in here first, and bump the size and the RSS limit. | ||
349 | * The others will get in here next and hit this check. | ||
350 | */ | ||
351 | if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) { | ||
352 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
353 | |||
354 | kmem_cache_free(tsb_caches[new_cache_index], new_tsb); | ||
355 | return; | ||
356 | } | ||
357 | |||
358 | mm->context.tsb_rss_limit = new_rss_limit; | ||
359 | |||
360 | if (old_tsb) { | ||
361 | extern void copy_tsb(unsigned long old_tsb_base, | ||
362 | unsigned long old_tsb_size, | ||
363 | unsigned long new_tsb_base, | ||
364 | unsigned long new_tsb_size); | ||
365 | unsigned long old_tsb_base = (unsigned long) old_tsb; | ||
366 | unsigned long new_tsb_base = (unsigned long) new_tsb; | ||
367 | |||
368 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) { | ||
369 | old_tsb_base = __pa(old_tsb_base); | ||
370 | new_tsb_base = __pa(new_tsb_base); | ||
371 | } | ||
372 | copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); | ||
373 | } | ||
374 | |||
375 | mm->context.tsb = new_tsb; | ||
376 | setup_tsb_params(mm, new_size); | ||
377 | |||
378 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
379 | |||
380 | /* If old_tsb is NULL, we're being invoked for the first time | ||
381 | * from init_new_context(). | ||
382 | */ | ||
383 | if (old_tsb) { | ||
384 | /* Reload it on the local cpu. */ | ||
385 | tsb_context_switch(mm); | ||
386 | |||
387 | /* Now force other processors to do the same. */ | ||
388 | smp_tsb_sync(mm); | ||
389 | |||
390 | /* Now it is safe to free the old tsb. */ | ||
391 | kmem_cache_free(tsb_caches[old_cache_index], old_tsb); | ||
392 | } | ||
393 | } | ||
394 | |||
395 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
396 | { | ||
397 | spin_lock_init(&mm->context.lock); | ||
398 | |||
399 | mm->context.sparc64_ctx_val = 0UL; | ||
400 | |||
401 | /* copy_mm() copies over the parent's mm_struct before calling | ||
402 | * us, so we need to zero out the TSB pointer or else tsb_grow() | ||
403 | * will be confused and think there is an older TSB to free up. | ||
404 | */ | ||
405 | mm->context.tsb = NULL; | ||
406 | |||
407 | /* If this is fork, inherit the parent's TSB size. We would | ||
408 | * grow it to that size on the first page fault anyways. | ||
409 | */ | ||
410 | tsb_grow(mm, get_mm_rss(mm)); | ||
411 | |||
412 | if (unlikely(!mm->context.tsb)) | ||
413 | return -ENOMEM; | ||
414 | |||
415 | return 0; | ||
416 | } | ||
417 | |||
418 | void destroy_context(struct mm_struct *mm) | ||
419 | { | ||
420 | unsigned long flags, cache_index; | ||
421 | |||
422 | cache_index = (mm->context.tsb_reg_val & 0x7UL); | ||
423 | kmem_cache_free(tsb_caches[cache_index], mm->context.tsb); | ||
424 | |||
425 | /* We can remove these later, but for now it's useful | ||
426 | * to catch any bogus post-destroy_context() references | ||
427 | * to the TSB. | ||
428 | */ | ||
429 | mm->context.tsb = NULL; | ||
430 | mm->context.tsb_reg_val = 0UL; | ||
431 | |||
432 | spin_lock_irqsave(&ctx_alloc_lock, flags); | ||
433 | |||
434 | if (CTX_VALID(mm->context)) { | ||
435 | unsigned long nr = CTX_NRBITS(mm->context); | ||
436 | mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); | ||
437 | } | ||
438 | |||
439 | spin_unlock_irqrestore(&ctx_alloc_lock, flags); | ||
440 | } | ||
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index e4c9151fa116..f8479fad4047 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/head.h> | 15 | #include <asm/head.h> |
16 | #include <asm/thread_info.h> | 16 | #include <asm/thread_info.h> |
17 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
18 | #include <asm/hypervisor.h> | ||
18 | 19 | ||
19 | /* Basically, most of the Spitfire vs. Cheetah madness | 20 | /* Basically, most of the Spitfire vs. Cheetah madness |
20 | * has to do with the fact that Cheetah does not support | 21 | * has to do with the fact that Cheetah does not support |
@@ -29,16 +30,18 @@ | |||
29 | .text | 30 | .text |
30 | .align 32 | 31 | .align 32 |
31 | .globl __flush_tlb_mm | 32 | .globl __flush_tlb_mm |
32 | __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ | 33 | __flush_tlb_mm: /* 18 insns */ |
34 | /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ | ||
33 | ldxa [%o1] ASI_DMMU, %g2 | 35 | ldxa [%o1] ASI_DMMU, %g2 |
34 | cmp %g2, %o0 | 36 | cmp %g2, %o0 |
35 | bne,pn %icc, __spitfire_flush_tlb_mm_slow | 37 | bne,pn %icc, __spitfire_flush_tlb_mm_slow |
36 | mov 0x50, %g3 | 38 | mov 0x50, %g3 |
37 | stxa %g0, [%g3] ASI_DMMU_DEMAP | 39 | stxa %g0, [%g3] ASI_DMMU_DEMAP |
38 | stxa %g0, [%g3] ASI_IMMU_DEMAP | 40 | stxa %g0, [%g3] ASI_IMMU_DEMAP |
41 | sethi %hi(KERNBASE), %g3 | ||
42 | flush %g3 | ||
39 | retl | 43 | retl |
40 | flush %g6 | 44 | nop |
41 | nop | ||
42 | nop | 45 | nop |
43 | nop | 46 | nop |
44 | nop | 47 | nop |
@@ -51,7 +54,7 @@ __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ | |||
51 | 54 | ||
52 | .align 32 | 55 | .align 32 |
53 | .globl __flush_tlb_pending | 56 | .globl __flush_tlb_pending |
54 | __flush_tlb_pending: | 57 | __flush_tlb_pending: /* 26 insns */ |
55 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ | 58 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ |
56 | rdpr %pstate, %g7 | 59 | rdpr %pstate, %g7 |
57 | sllx %o1, 3, %o1 | 60 | sllx %o1, 3, %o1 |
@@ -72,7 +75,8 @@ __flush_tlb_pending: | |||
72 | brnz,pt %o1, 1b | 75 | brnz,pt %o1, 1b |
73 | nop | 76 | nop |
74 | stxa %g2, [%o4] ASI_DMMU | 77 | stxa %g2, [%o4] ASI_DMMU |
75 | flush %g6 | 78 | sethi %hi(KERNBASE), %o4 |
79 | flush %o4 | ||
76 | retl | 80 | retl |
77 | wrpr %g7, 0x0, %pstate | 81 | wrpr %g7, 0x0, %pstate |
78 | nop | 82 | nop |
@@ -82,7 +86,8 @@ __flush_tlb_pending: | |||
82 | 86 | ||
83 | .align 32 | 87 | .align 32 |
84 | .globl __flush_tlb_kernel_range | 88 | .globl __flush_tlb_kernel_range |
85 | __flush_tlb_kernel_range: /* %o0=start, %o1=end */ | 89 | __flush_tlb_kernel_range: /* 16 insns */ |
90 | /* %o0=start, %o1=end */ | ||
86 | cmp %o0, %o1 | 91 | cmp %o0, %o1 |
87 | be,pn %xcc, 2f | 92 | be,pn %xcc, 2f |
88 | sethi %hi(PAGE_SIZE), %o4 | 93 | sethi %hi(PAGE_SIZE), %o4 |
@@ -94,8 +99,11 @@ __flush_tlb_kernel_range: /* %o0=start, %o1=end */ | |||
94 | membar #Sync | 99 | membar #Sync |
95 | brnz,pt %o3, 1b | 100 | brnz,pt %o3, 1b |
96 | sub %o3, %o4, %o3 | 101 | sub %o3, %o4, %o3 |
97 | 2: retl | 102 | 2: sethi %hi(KERNBASE), %o3 |
98 | flush %g6 | 103 | flush %o3 |
104 | retl | ||
105 | nop | ||
106 | nop | ||
99 | 107 | ||
100 | __spitfire_flush_tlb_mm_slow: | 108 | __spitfire_flush_tlb_mm_slow: |
101 | rdpr %pstate, %g1 | 109 | rdpr %pstate, %g1 |
@@ -105,7 +113,8 @@ __spitfire_flush_tlb_mm_slow: | |||
105 | stxa %g0, [%g3] ASI_IMMU_DEMAP | 113 | stxa %g0, [%g3] ASI_IMMU_DEMAP |
106 | flush %g6 | 114 | flush %g6 |
107 | stxa %g2, [%o1] ASI_DMMU | 115 | stxa %g2, [%o1] ASI_DMMU |
108 | flush %g6 | 116 | sethi %hi(KERNBASE), %o1 |
117 | flush %o1 | ||
109 | retl | 118 | retl |
110 | wrpr %g1, 0, %pstate | 119 | wrpr %g1, 0, %pstate |
111 | 120 | ||
@@ -181,7 +190,7 @@ __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ | |||
181 | .previous | 190 | .previous |
182 | 191 | ||
183 | /* Cheetah specific versions, patched at boot time. */ | 192 | /* Cheetah specific versions, patched at boot time. */ |
184 | __cheetah_flush_tlb_mm: /* 18 insns */ | 193 | __cheetah_flush_tlb_mm: /* 19 insns */ |
185 | rdpr %pstate, %g7 | 194 | rdpr %pstate, %g7 |
186 | andn %g7, PSTATE_IE, %g2 | 195 | andn %g7, PSTATE_IE, %g2 |
187 | wrpr %g2, 0x0, %pstate | 196 | wrpr %g2, 0x0, %pstate |
@@ -196,12 +205,13 @@ __cheetah_flush_tlb_mm: /* 18 insns */ | |||
196 | stxa %g0, [%g3] ASI_DMMU_DEMAP | 205 | stxa %g0, [%g3] ASI_DMMU_DEMAP |
197 | stxa %g0, [%g3] ASI_IMMU_DEMAP | 206 | stxa %g0, [%g3] ASI_IMMU_DEMAP |
198 | stxa %g2, [%o2] ASI_DMMU | 207 | stxa %g2, [%o2] ASI_DMMU |
199 | flush %g6 | 208 | sethi %hi(KERNBASE), %o2 |
209 | flush %o2 | ||
200 | wrpr %g0, 0, %tl | 210 | wrpr %g0, 0, %tl |
201 | retl | 211 | retl |
202 | wrpr %g7, 0x0, %pstate | 212 | wrpr %g7, 0x0, %pstate |
203 | 213 | ||
204 | __cheetah_flush_tlb_pending: /* 26 insns */ | 214 | __cheetah_flush_tlb_pending: /* 27 insns */ |
205 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ | 215 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ |
206 | rdpr %pstate, %g7 | 216 | rdpr %pstate, %g7 |
207 | sllx %o1, 3, %o1 | 217 | sllx %o1, 3, %o1 |
@@ -225,7 +235,8 @@ __cheetah_flush_tlb_pending: /* 26 insns */ | |||
225 | brnz,pt %o1, 1b | 235 | brnz,pt %o1, 1b |
226 | nop | 236 | nop |
227 | stxa %g2, [%o4] ASI_DMMU | 237 | stxa %g2, [%o4] ASI_DMMU |
228 | flush %g6 | 238 | sethi %hi(KERNBASE), %o4 |
239 | flush %o4 | ||
229 | wrpr %g0, 0, %tl | 240 | wrpr %g0, 0, %tl |
230 | retl | 241 | retl |
231 | wrpr %g7, 0x0, %pstate | 242 | wrpr %g7, 0x0, %pstate |
@@ -245,7 +256,76 @@ __cheetah_flush_dcache_page: /* 11 insns */ | |||
245 | nop | 256 | nop |
246 | #endif /* DCACHE_ALIASING_POSSIBLE */ | 257 | #endif /* DCACHE_ALIASING_POSSIBLE */ |
247 | 258 | ||
248 | cheetah_patch_one: | 259 | /* Hypervisor specific versions, patched at boot time. */ |
260 | __hypervisor_tlb_tl0_error: | ||
261 | save %sp, -192, %sp | ||
262 | mov %i0, %o0 | ||
263 | call hypervisor_tlbop_error | ||
264 | mov %i1, %o1 | ||
265 | ret | ||
266 | restore | ||
267 | |||
268 | __hypervisor_flush_tlb_mm: /* 10 insns */ | ||
269 | mov %o0, %o2 /* ARG2: mmu context */ | ||
270 | mov 0, %o0 /* ARG0: CPU lists unimplemented */ | ||
271 | mov 0, %o1 /* ARG1: CPU lists unimplemented */ | ||
272 | mov HV_MMU_ALL, %o3 /* ARG3: flags */ | ||
273 | mov HV_FAST_MMU_DEMAP_CTX, %o5 | ||
274 | ta HV_FAST_TRAP | ||
275 | brnz,pn %o0, __hypervisor_tlb_tl0_error | ||
276 | mov HV_FAST_MMU_DEMAP_CTX, %o1 | ||
277 | retl | ||
278 | nop | ||
279 | |||
280 | __hypervisor_flush_tlb_pending: /* 16 insns */ | ||
281 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ | ||
282 | sllx %o1, 3, %g1 | ||
283 | mov %o2, %g2 | ||
284 | mov %o0, %g3 | ||
285 | 1: sub %g1, (1 << 3), %g1 | ||
286 | ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */ | ||
287 | mov %g3, %o1 /* ARG1: mmu context */ | ||
288 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ | ||
289 | srlx %o0, PAGE_SHIFT, %o0 | ||
290 | sllx %o0, PAGE_SHIFT, %o0 | ||
291 | ta HV_MMU_UNMAP_ADDR_TRAP | ||
292 | brnz,pn %o0, __hypervisor_tlb_tl0_error | ||
293 | mov HV_MMU_UNMAP_ADDR_TRAP, %o1 | ||
294 | brnz,pt %g1, 1b | ||
295 | nop | ||
296 | retl | ||
297 | nop | ||
298 | |||
299 | __hypervisor_flush_tlb_kernel_range: /* 16 insns */ | ||
300 | /* %o0=start, %o1=end */ | ||
301 | cmp %o0, %o1 | ||
302 | be,pn %xcc, 2f | ||
303 | sethi %hi(PAGE_SIZE), %g3 | ||
304 | mov %o0, %g1 | ||
305 | sub %o1, %g1, %g2 | ||
306 | sub %g2, %g3, %g2 | ||
307 | 1: add %g1, %g2, %o0 /* ARG0: virtual address */ | ||
308 | mov 0, %o1 /* ARG1: mmu context */ | ||
309 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ | ||
310 | ta HV_MMU_UNMAP_ADDR_TRAP | ||
311 | brnz,pn %o0, __hypervisor_tlb_tl0_error | ||
312 | mov HV_MMU_UNMAP_ADDR_TRAP, %o1 | ||
313 | brnz,pt %g2, 1b | ||
314 | sub %g2, %g3, %g2 | ||
315 | 2: retl | ||
316 | nop | ||
317 | |||
318 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
319 | /* XXX Niagara and friends have an 8K cache, so no aliasing is | ||
320 | * XXX possible, but nothing explicit in the Hypervisor API | ||
321 | * XXX guarantees this. | ||
322 | */ | ||
323 | __hypervisor_flush_dcache_page: /* 2 insns */ | ||
324 | retl | ||
325 | nop | ||
326 | #endif | ||
327 | |||
328 | tlb_patch_one: | ||
249 | 1: lduw [%o1], %g1 | 329 | 1: lduw [%o1], %g1 |
250 | stw %g1, [%o0] | 330 | stw %g1, [%o0] |
251 | flush %o0 | 331 | flush %o0 |
@@ -264,22 +344,22 @@ cheetah_patch_cachetlbops: | |||
264 | or %o0, %lo(__flush_tlb_mm), %o0 | 344 | or %o0, %lo(__flush_tlb_mm), %o0 |
265 | sethi %hi(__cheetah_flush_tlb_mm), %o1 | 345 | sethi %hi(__cheetah_flush_tlb_mm), %o1 |
266 | or %o1, %lo(__cheetah_flush_tlb_mm), %o1 | 346 | or %o1, %lo(__cheetah_flush_tlb_mm), %o1 |
267 | call cheetah_patch_one | 347 | call tlb_patch_one |
268 | mov 18, %o2 | 348 | mov 19, %o2 |
269 | 349 | ||
270 | sethi %hi(__flush_tlb_pending), %o0 | 350 | sethi %hi(__flush_tlb_pending), %o0 |
271 | or %o0, %lo(__flush_tlb_pending), %o0 | 351 | or %o0, %lo(__flush_tlb_pending), %o0 |
272 | sethi %hi(__cheetah_flush_tlb_pending), %o1 | 352 | sethi %hi(__cheetah_flush_tlb_pending), %o1 |
273 | or %o1, %lo(__cheetah_flush_tlb_pending), %o1 | 353 | or %o1, %lo(__cheetah_flush_tlb_pending), %o1 |
274 | call cheetah_patch_one | 354 | call tlb_patch_one |
275 | mov 26, %o2 | 355 | mov 27, %o2 |
276 | 356 | ||
277 | #ifdef DCACHE_ALIASING_POSSIBLE | 357 | #ifdef DCACHE_ALIASING_POSSIBLE |
278 | sethi %hi(__flush_dcache_page), %o0 | 358 | sethi %hi(__flush_dcache_page), %o0 |
279 | or %o0, %lo(__flush_dcache_page), %o0 | 359 | or %o0, %lo(__flush_dcache_page), %o0 |
280 | sethi %hi(__cheetah_flush_dcache_page), %o1 | 360 | sethi %hi(__cheetah_flush_dcache_page), %o1 |
281 | or %o1, %lo(__cheetah_flush_dcache_page), %o1 | 361 | or %o1, %lo(__cheetah_flush_dcache_page), %o1 |
282 | call cheetah_patch_one | 362 | call tlb_patch_one |
283 | mov 11, %o2 | 363 | mov 11, %o2 |
284 | #endif /* DCACHE_ALIASING_POSSIBLE */ | 364 | #endif /* DCACHE_ALIASING_POSSIBLE */ |
285 | 365 | ||
@@ -295,16 +375,14 @@ cheetah_patch_cachetlbops: | |||
295 | * %g1 address arg 1 (tlb page and range flushes) | 375 | * %g1 address arg 1 (tlb page and range flushes) |
296 | * %g7 address arg 2 (tlb range flush only) | 376 | * %g7 address arg 2 (tlb range flush only) |
297 | * | 377 | * |
298 | * %g6 ivector table, don't touch | 378 | * %g6 scratch 1 |
299 | * %g2 scratch 1 | 379 | * %g2 scratch 2 |
300 | * %g3 scratch 2 | 380 | * %g3 scratch 3 |
301 | * %g4 scratch 3 | 381 | * %g4 scratch 4 |
302 | * | ||
303 | * TODO: Make xcall TLB range flushes use the tricks above... -DaveM | ||
304 | */ | 382 | */ |
305 | .align 32 | 383 | .align 32 |
306 | .globl xcall_flush_tlb_mm | 384 | .globl xcall_flush_tlb_mm |
307 | xcall_flush_tlb_mm: | 385 | xcall_flush_tlb_mm: /* 21 insns */ |
308 | mov PRIMARY_CONTEXT, %g2 | 386 | mov PRIMARY_CONTEXT, %g2 |
309 | ldxa [%g2] ASI_DMMU, %g3 | 387 | ldxa [%g2] ASI_DMMU, %g3 |
310 | srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 | 388 | srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 |
@@ -316,9 +394,19 @@ xcall_flush_tlb_mm: | |||
316 | stxa %g0, [%g4] ASI_IMMU_DEMAP | 394 | stxa %g0, [%g4] ASI_IMMU_DEMAP |
317 | stxa %g3, [%g2] ASI_DMMU | 395 | stxa %g3, [%g2] ASI_DMMU |
318 | retry | 396 | retry |
397 | nop | ||
398 | nop | ||
399 | nop | ||
400 | nop | ||
401 | nop | ||
402 | nop | ||
403 | nop | ||
404 | nop | ||
405 | nop | ||
406 | nop | ||
319 | 407 | ||
320 | .globl xcall_flush_tlb_pending | 408 | .globl xcall_flush_tlb_pending |
321 | xcall_flush_tlb_pending: | 409 | xcall_flush_tlb_pending: /* 21 insns */ |
322 | /* %g5=context, %g1=nr, %g7=vaddrs[] */ | 410 | /* %g5=context, %g1=nr, %g7=vaddrs[] */ |
323 | sllx %g1, 3, %g1 | 411 | sllx %g1, 3, %g1 |
324 | mov PRIMARY_CONTEXT, %g4 | 412 | mov PRIMARY_CONTEXT, %g4 |
@@ -341,9 +429,10 @@ xcall_flush_tlb_pending: | |||
341 | nop | 429 | nop |
342 | stxa %g2, [%g4] ASI_DMMU | 430 | stxa %g2, [%g4] ASI_DMMU |
343 | retry | 431 | retry |
432 | nop | ||
344 | 433 | ||
345 | .globl xcall_flush_tlb_kernel_range | 434 | .globl xcall_flush_tlb_kernel_range |
346 | xcall_flush_tlb_kernel_range: | 435 | xcall_flush_tlb_kernel_range: /* 25 insns */ |
347 | sethi %hi(PAGE_SIZE - 1), %g2 | 436 | sethi %hi(PAGE_SIZE - 1), %g2 |
348 | or %g2, %lo(PAGE_SIZE - 1), %g2 | 437 | or %g2, %lo(PAGE_SIZE - 1), %g2 |
349 | andn %g1, %g2, %g1 | 438 | andn %g1, %g2, %g1 |
@@ -360,14 +449,30 @@ xcall_flush_tlb_kernel_range: | |||
360 | retry | 449 | retry |
361 | nop | 450 | nop |
362 | nop | 451 | nop |
452 | nop | ||
453 | nop | ||
454 | nop | ||
455 | nop | ||
456 | nop | ||
457 | nop | ||
458 | nop | ||
459 | nop | ||
460 | nop | ||
363 | 461 | ||
364 | /* This runs in a very controlled environment, so we do | 462 | /* This runs in a very controlled environment, so we do |
365 | * not need to worry about BH races etc. | 463 | * not need to worry about BH races etc. |
366 | */ | 464 | */ |
367 | .globl xcall_sync_tick | 465 | .globl xcall_sync_tick |
368 | xcall_sync_tick: | 466 | xcall_sync_tick: |
369 | rdpr %pstate, %g2 | 467 | |
468 | 661: rdpr %pstate, %g2 | ||
370 | wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate | 469 | wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate |
470 | .section .sun4v_2insn_patch, "ax" | ||
471 | .word 661b | ||
472 | nop | ||
473 | nop | ||
474 | .previous | ||
475 | |||
371 | rdpr %pil, %g2 | 476 | rdpr %pil, %g2 |
372 | wrpr %g0, 15, %pil | 477 | wrpr %g0, 15, %pil |
373 | sethi %hi(109f), %g7 | 478 | sethi %hi(109f), %g7 |
@@ -390,8 +495,15 @@ xcall_sync_tick: | |||
390 | */ | 495 | */ |
391 | .globl xcall_report_regs | 496 | .globl xcall_report_regs |
392 | xcall_report_regs: | 497 | xcall_report_regs: |
393 | rdpr %pstate, %g2 | 498 | |
499 | 661: rdpr %pstate, %g2 | ||
394 | wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate | 500 | wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate |
501 | .section .sun4v_2insn_patch, "ax" | ||
502 | .word 661b | ||
503 | nop | ||
504 | nop | ||
505 | .previous | ||
506 | |||
395 | rdpr %pil, %g2 | 507 | rdpr %pil, %g2 |
396 | wrpr %g0, 15, %pil | 508 | wrpr %g0, 15, %pil |
397 | sethi %hi(109f), %g7 | 509 | sethi %hi(109f), %g7 |
@@ -453,62 +565,96 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address | |||
453 | nop | 565 | nop |
454 | nop | 566 | nop |
455 | 567 | ||
456 | .data | 568 | /* %g5: error |
457 | 569 | * %g6: tlb op | |
458 | errata32_hwbug: | 570 | */ |
459 | .xword 0 | 571 | __hypervisor_tlb_xcall_error: |
460 | 572 | mov %g5, %g4 | |
461 | .text | 573 | mov %g6, %g5 |
462 | 574 | ba,pt %xcc, etrap | |
463 | /* These two are not performance critical... */ | 575 | rd %pc, %g7 |
464 | .globl xcall_flush_tlb_all_spitfire | 576 | mov %l4, %o0 |
465 | xcall_flush_tlb_all_spitfire: | 577 | call hypervisor_tlbop_error_xcall |
466 | /* Spitfire Errata #32 workaround. */ | 578 | mov %l5, %o1 |
467 | sethi %hi(errata32_hwbug), %g4 | 579 | ba,a,pt %xcc, rtrap_clr_l6 |
468 | stx %g0, [%g4 + %lo(errata32_hwbug)] | 580 | |
469 | 581 | .globl __hypervisor_xcall_flush_tlb_mm | |
470 | clr %g2 | 582 | __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ |
471 | clr %g3 | 583 | /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ |
472 | 1: ldxa [%g3] ASI_DTLB_DATA_ACCESS, %g4 | 584 | mov %o0, %g2 |
473 | and %g4, _PAGE_L, %g5 | 585 | mov %o1, %g3 |
474 | brnz,pn %g5, 2f | 586 | mov %o2, %g4 |
475 | mov TLB_TAG_ACCESS, %g7 | 587 | mov %o3, %g1 |
476 | 588 | mov %o5, %g7 | |
477 | stxa %g0, [%g7] ASI_DMMU | 589 | clr %o0 /* ARG0: CPU lists unimplemented */ |
478 | membar #Sync | 590 | clr %o1 /* ARG1: CPU lists unimplemented */ |
479 | stxa %g0, [%g3] ASI_DTLB_DATA_ACCESS | 591 | mov %g5, %o2 /* ARG2: mmu context */ |
592 | mov HV_MMU_ALL, %o3 /* ARG3: flags */ | ||
593 | mov HV_FAST_MMU_DEMAP_CTX, %o5 | ||
594 | ta HV_FAST_TRAP | ||
595 | mov HV_FAST_MMU_DEMAP_CTX, %g6 | ||
596 | brnz,pn %o0, __hypervisor_tlb_xcall_error | ||
597 | mov %o0, %g5 | ||
598 | mov %g2, %o0 | ||
599 | mov %g3, %o1 | ||
600 | mov %g4, %o2 | ||
601 | mov %g1, %o3 | ||
602 | mov %g7, %o5 | ||
480 | membar #Sync | 603 | membar #Sync |
604 | retry | ||
481 | 605 | ||
482 | /* Spitfire Errata #32 workaround. */ | 606 | .globl __hypervisor_xcall_flush_tlb_pending |
483 | sethi %hi(errata32_hwbug), %g4 | 607 | __hypervisor_xcall_flush_tlb_pending: /* 21 insns */ |
484 | stx %g0, [%g4 + %lo(errata32_hwbug)] | 608 | /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ |
485 | 609 | sllx %g1, 3, %g1 | |
486 | 2: ldxa [%g3] ASI_ITLB_DATA_ACCESS, %g4 | 610 | mov %o0, %g2 |
487 | and %g4, _PAGE_L, %g5 | 611 | mov %o1, %g3 |
488 | brnz,pn %g5, 2f | 612 | mov %o2, %g4 |
489 | mov TLB_TAG_ACCESS, %g7 | 613 | 1: sub %g1, (1 << 3), %g1 |
490 | 614 | ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ | |
491 | stxa %g0, [%g7] ASI_IMMU | 615 | mov %g5, %o1 /* ARG1: mmu context */ |
492 | membar #Sync | 616 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ |
493 | stxa %g0, [%g3] ASI_ITLB_DATA_ACCESS | 617 | srlx %o0, PAGE_SHIFT, %o0 |
618 | sllx %o0, PAGE_SHIFT, %o0 | ||
619 | ta HV_MMU_UNMAP_ADDR_TRAP | ||
620 | mov HV_MMU_UNMAP_ADDR_TRAP, %g6 | ||
621 | brnz,a,pn %o0, __hypervisor_tlb_xcall_error | ||
622 | mov %o0, %g5 | ||
623 | brnz,pt %g1, 1b | ||
624 | nop | ||
625 | mov %g2, %o0 | ||
626 | mov %g3, %o1 | ||
627 | mov %g4, %o2 | ||
494 | membar #Sync | 628 | membar #Sync |
495 | |||
496 | /* Spitfire Errata #32 workaround. */ | ||
497 | sethi %hi(errata32_hwbug), %g4 | ||
498 | stx %g0, [%g4 + %lo(errata32_hwbug)] | ||
499 | |||
500 | 2: add %g2, 1, %g2 | ||
501 | cmp %g2, SPITFIRE_HIGHEST_LOCKED_TLBENT | ||
502 | ble,pt %icc, 1b | ||
503 | sll %g2, 3, %g3 | ||
504 | flush %g6 | ||
505 | retry | 629 | retry |
506 | 630 | ||
507 | .globl xcall_flush_tlb_all_cheetah | 631 | .globl __hypervisor_xcall_flush_tlb_kernel_range |
508 | xcall_flush_tlb_all_cheetah: | 632 | __hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */ |
509 | mov 0x80, %g2 | 633 | /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */ |
510 | stxa %g0, [%g2] ASI_DMMU_DEMAP | 634 | sethi %hi(PAGE_SIZE - 1), %g2 |
511 | stxa %g0, [%g2] ASI_IMMU_DEMAP | 635 | or %g2, %lo(PAGE_SIZE - 1), %g2 |
636 | andn %g1, %g2, %g1 | ||
637 | andn %g7, %g2, %g7 | ||
638 | sub %g7, %g1, %g3 | ||
639 | add %g2, 1, %g2 | ||
640 | sub %g3, %g2, %g3 | ||
641 | mov %o0, %g2 | ||
642 | mov %o1, %g4 | ||
643 | mov %o2, %g7 | ||
644 | 1: add %g1, %g3, %o0 /* ARG0: virtual address */ | ||
645 | mov 0, %o1 /* ARG1: mmu context */ | ||
646 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ | ||
647 | ta HV_MMU_UNMAP_ADDR_TRAP | ||
648 | mov HV_MMU_UNMAP_ADDR_TRAP, %g6 | ||
649 | brnz,pn %o0, __hypervisor_tlb_xcall_error | ||
650 | mov %o0, %g5 | ||
651 | sethi %hi(PAGE_SIZE), %o2 | ||
652 | brnz,pt %g3, 1b | ||
653 | sub %g3, %o2, %g3 | ||
654 | mov %g2, %o0 | ||
655 | mov %g4, %o1 | ||
656 | mov %g7, %o2 | ||
657 | membar #Sync | ||
512 | retry | 658 | retry |
513 | 659 | ||
514 | /* These just get rescheduled to PIL vectors. */ | 660 | /* These just get rescheduled to PIL vectors. */ |
@@ -527,4 +673,70 @@ xcall_capture: | |||
527 | wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint | 673 | wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint |
528 | retry | 674 | retry |
529 | 675 | ||
676 | .globl xcall_new_mmu_context_version | ||
677 | xcall_new_mmu_context_version: | ||
678 | wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint | ||
679 | retry | ||
680 | |||
530 | #endif /* CONFIG_SMP */ | 681 | #endif /* CONFIG_SMP */ |
682 | |||
683 | |||
684 | .globl hypervisor_patch_cachetlbops | ||
685 | hypervisor_patch_cachetlbops: | ||
686 | save %sp, -128, %sp | ||
687 | |||
688 | sethi %hi(__flush_tlb_mm), %o0 | ||
689 | or %o0, %lo(__flush_tlb_mm), %o0 | ||
690 | sethi %hi(__hypervisor_flush_tlb_mm), %o1 | ||
691 | or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 | ||
692 | call tlb_patch_one | ||
693 | mov 10, %o2 | ||
694 | |||
695 | sethi %hi(__flush_tlb_pending), %o0 | ||
696 | or %o0, %lo(__flush_tlb_pending), %o0 | ||
697 | sethi %hi(__hypervisor_flush_tlb_pending), %o1 | ||
698 | or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 | ||
699 | call tlb_patch_one | ||
700 | mov 16, %o2 | ||
701 | |||
702 | sethi %hi(__flush_tlb_kernel_range), %o0 | ||
703 | or %o0, %lo(__flush_tlb_kernel_range), %o0 | ||
704 | sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 | ||
705 | or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 | ||
706 | call tlb_patch_one | ||
707 | mov 16, %o2 | ||
708 | |||
709 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
710 | sethi %hi(__flush_dcache_page), %o0 | ||
711 | or %o0, %lo(__flush_dcache_page), %o0 | ||
712 | sethi %hi(__hypervisor_flush_dcache_page), %o1 | ||
713 | or %o1, %lo(__hypervisor_flush_dcache_page), %o1 | ||
714 | call tlb_patch_one | ||
715 | mov 2, %o2 | ||
716 | #endif /* DCACHE_ALIASING_POSSIBLE */ | ||
717 | |||
718 | #ifdef CONFIG_SMP | ||
719 | sethi %hi(xcall_flush_tlb_mm), %o0 | ||
720 | or %o0, %lo(xcall_flush_tlb_mm), %o0 | ||
721 | sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 | ||
722 | or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 | ||
723 | call tlb_patch_one | ||
724 | mov 21, %o2 | ||
725 | |||
726 | sethi %hi(xcall_flush_tlb_pending), %o0 | ||
727 | or %o0, %lo(xcall_flush_tlb_pending), %o0 | ||
728 | sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 | ||
729 | or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 | ||
730 | call tlb_patch_one | ||
731 | mov 21, %o2 | ||
732 | |||
733 | sethi %hi(xcall_flush_tlb_kernel_range), %o0 | ||
734 | or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 | ||
735 | sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 | ||
736 | or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 | ||
737 | call tlb_patch_one | ||
738 | mov 25, %o2 | ||
739 | #endif /* CONFIG_SMP */ | ||
740 | |||
741 | ret | ||
742 | restore | ||
diff --git a/arch/sparc64/prom/cif.S b/arch/sparc64/prom/cif.S index 29d0ae74aed8..5f27ad779c0c 100644 --- a/arch/sparc64/prom/cif.S +++ b/arch/sparc64/prom/cif.S | |||
@@ -1,10 +1,12 @@ | |||
1 | /* cif.S: PROM entry/exit assembler trampolines. | 1 | /* cif.S: PROM entry/exit assembler trampolines. |
2 | * | 2 | * |
3 | * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | 3 | * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) |
4 | * Copyright (C) 2005 David S. Miller <davem@davemloft.net> | 4 | * Copyright (C) 2005, 2006 David S. Miller <davem@davemloft.net> |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <asm/pstate.h> | 7 | #include <asm/pstate.h> |
8 | #include <asm/cpudata.h> | ||
9 | #include <asm/thread_info.h> | ||
8 | 10 | ||
9 | .text | 11 | .text |
10 | .globl prom_cif_interface | 12 | .globl prom_cif_interface |
@@ -12,78 +14,16 @@ prom_cif_interface: | |||
12 | sethi %hi(p1275buf), %o0 | 14 | sethi %hi(p1275buf), %o0 |
13 | or %o0, %lo(p1275buf), %o0 | 15 | or %o0, %lo(p1275buf), %o0 |
14 | ldx [%o0 + 0x010], %o1 ! prom_cif_stack | 16 | ldx [%o0 + 0x010], %o1 ! prom_cif_stack |
15 | save %o1, -0x190, %sp | 17 | save %o1, -192, %sp |
16 | ldx [%i0 + 0x008], %l2 ! prom_cif_handler | 18 | ldx [%i0 + 0x008], %l2 ! prom_cif_handler |
17 | rdpr %pstate, %l4 | 19 | mov %g4, %l0 |
18 | wrpr %g0, 0x15, %pstate ! save alternate globals | 20 | mov %g5, %l1 |
19 | stx %g1, [%sp + 2047 + 0x0b0] | 21 | mov %g6, %l3 |
20 | stx %g2, [%sp + 2047 + 0x0b8] | ||
21 | stx %g3, [%sp + 2047 + 0x0c0] | ||
22 | stx %g4, [%sp + 2047 + 0x0c8] | ||
23 | stx %g5, [%sp + 2047 + 0x0d0] | ||
24 | stx %g6, [%sp + 2047 + 0x0d8] | ||
25 | stx %g7, [%sp + 2047 + 0x0e0] | ||
26 | wrpr %g0, 0x814, %pstate ! save interrupt globals | ||
27 | stx %g1, [%sp + 2047 + 0x0e8] | ||
28 | stx %g2, [%sp + 2047 + 0x0f0] | ||
29 | stx %g3, [%sp + 2047 + 0x0f8] | ||
30 | stx %g4, [%sp + 2047 + 0x100] | ||
31 | stx %g5, [%sp + 2047 + 0x108] | ||
32 | stx %g6, [%sp + 2047 + 0x110] | ||
33 | stx %g7, [%sp + 2047 + 0x118] | ||
34 | wrpr %g0, 0x14, %pstate ! save normal globals | ||
35 | stx %g1, [%sp + 2047 + 0x120] | ||
36 | stx %g2, [%sp + 2047 + 0x128] | ||
37 | stx %g3, [%sp + 2047 + 0x130] | ||
38 | stx %g4, [%sp + 2047 + 0x138] | ||
39 | stx %g5, [%sp + 2047 + 0x140] | ||
40 | stx %g6, [%sp + 2047 + 0x148] | ||
41 | stx %g7, [%sp + 2047 + 0x150] | ||
42 | wrpr %g0, 0x414, %pstate ! save mmu globals | ||
43 | stx %g1, [%sp + 2047 + 0x158] | ||
44 | stx %g2, [%sp + 2047 + 0x160] | ||
45 | stx %g3, [%sp + 2047 + 0x168] | ||
46 | stx %g4, [%sp + 2047 + 0x170] | ||
47 | stx %g5, [%sp + 2047 + 0x178] | ||
48 | stx %g6, [%sp + 2047 + 0x180] | ||
49 | stx %g7, [%sp + 2047 + 0x188] | ||
50 | mov %g1, %l0 ! also save to locals, so we can handle | ||
51 | mov %g2, %l1 ! tlb faults later on, when accessing | ||
52 | mov %g3, %l3 ! the stack. | ||
53 | mov %g7, %l5 | ||
54 | wrpr %l4, PSTATE_IE, %pstate ! turn off interrupts | ||
55 | call %l2 | 22 | call %l2 |
56 | add %i0, 0x018, %o0 ! prom_args | 23 | add %i0, 0x018, %o0 ! prom_args |
57 | wrpr %g0, 0x414, %pstate ! restore mmu globals | 24 | mov %l0, %g4 |
58 | mov %l0, %g1 | 25 | mov %l1, %g5 |
59 | mov %l1, %g2 | 26 | mov %l3, %g6 |
60 | mov %l3, %g3 | ||
61 | mov %l5, %g7 | ||
62 | wrpr %g0, 0x14, %pstate ! restore normal globals | ||
63 | ldx [%sp + 2047 + 0x120], %g1 | ||
64 | ldx [%sp + 2047 + 0x128], %g2 | ||
65 | ldx [%sp + 2047 + 0x130], %g3 | ||
66 | ldx [%sp + 2047 + 0x138], %g4 | ||
67 | ldx [%sp + 2047 + 0x140], %g5 | ||
68 | ldx [%sp + 2047 + 0x148], %g6 | ||
69 | ldx [%sp + 2047 + 0x150], %g7 | ||
70 | wrpr %g0, 0x814, %pstate ! restore interrupt globals | ||
71 | ldx [%sp + 2047 + 0x0e8], %g1 | ||
72 | ldx [%sp + 2047 + 0x0f0], %g2 | ||
73 | ldx [%sp + 2047 + 0x0f8], %g3 | ||
74 | ldx [%sp + 2047 + 0x100], %g4 | ||
75 | ldx [%sp + 2047 + 0x108], %g5 | ||
76 | ldx [%sp + 2047 + 0x110], %g6 | ||
77 | ldx [%sp + 2047 + 0x118], %g7 | ||
78 | wrpr %g0, 0x15, %pstate ! restore alternate globals | ||
79 | ldx [%sp + 2047 + 0x0b0], %g1 | ||
80 | ldx [%sp + 2047 + 0x0b8], %g2 | ||
81 | ldx [%sp + 2047 + 0x0c0], %g3 | ||
82 | ldx [%sp + 2047 + 0x0c8], %g4 | ||
83 | ldx [%sp + 2047 + 0x0d0], %g5 | ||
84 | ldx [%sp + 2047 + 0x0d8], %g6 | ||
85 | ldx [%sp + 2047 + 0x0e0], %g7 | ||
86 | wrpr %l4, 0, %pstate ! restore original pstate | ||
87 | ret | 27 | ret |
88 | restore | 28 | restore |
89 | 29 | ||
@@ -91,135 +31,18 @@ prom_cif_interface: | |||
91 | prom_cif_callback: | 31 | prom_cif_callback: |
92 | sethi %hi(p1275buf), %o1 | 32 | sethi %hi(p1275buf), %o1 |
93 | or %o1, %lo(p1275buf), %o1 | 33 | or %o1, %lo(p1275buf), %o1 |
94 | save %sp, -0x270, %sp | 34 | save %sp, -192, %sp |
95 | rdpr %pstate, %l4 | 35 | TRAP_LOAD_THREAD_REG(%g6, %g1) |
96 | wrpr %g0, 0x15, %pstate ! save PROM alternate globals | 36 | LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %o0) |
97 | stx %g1, [%sp + 2047 + 0x0b0] | 37 | ldx [%g6 + TI_TASK], %g4 |
98 | stx %g2, [%sp + 2047 + 0x0b8] | ||
99 | stx %g3, [%sp + 2047 + 0x0c0] | ||
100 | stx %g4, [%sp + 2047 + 0x0c8] | ||
101 | stx %g5, [%sp + 2047 + 0x0d0] | ||
102 | stx %g6, [%sp + 2047 + 0x0d8] | ||
103 | stx %g7, [%sp + 2047 + 0x0e0] | ||
104 | ! restore Linux alternate globals | ||
105 | ldx [%sp + 2047 + 0x190], %g1 | ||
106 | ldx [%sp + 2047 + 0x198], %g2 | ||
107 | ldx [%sp + 2047 + 0x1a0], %g3 | ||
108 | ldx [%sp + 2047 + 0x1a8], %g4 | ||
109 | ldx [%sp + 2047 + 0x1b0], %g5 | ||
110 | ldx [%sp + 2047 + 0x1b8], %g6 | ||
111 | ldx [%sp + 2047 + 0x1c0], %g7 | ||
112 | wrpr %g0, 0x814, %pstate ! save PROM interrupt globals | ||
113 | stx %g1, [%sp + 2047 + 0x0e8] | ||
114 | stx %g2, [%sp + 2047 + 0x0f0] | ||
115 | stx %g3, [%sp + 2047 + 0x0f8] | ||
116 | stx %g4, [%sp + 2047 + 0x100] | ||
117 | stx %g5, [%sp + 2047 + 0x108] | ||
118 | stx %g6, [%sp + 2047 + 0x110] | ||
119 | stx %g7, [%sp + 2047 + 0x118] | ||
120 | ! restore Linux interrupt globals | ||
121 | ldx [%sp + 2047 + 0x1c8], %g1 | ||
122 | ldx [%sp + 2047 + 0x1d0], %g2 | ||
123 | ldx [%sp + 2047 + 0x1d8], %g3 | ||
124 | ldx [%sp + 2047 + 0x1e0], %g4 | ||
125 | ldx [%sp + 2047 + 0x1e8], %g5 | ||
126 | ldx [%sp + 2047 + 0x1f0], %g6 | ||
127 | ldx [%sp + 2047 + 0x1f8], %g7 | ||
128 | wrpr %g0, 0x14, %pstate ! save PROM normal globals | ||
129 | stx %g1, [%sp + 2047 + 0x120] | ||
130 | stx %g2, [%sp + 2047 + 0x128] | ||
131 | stx %g3, [%sp + 2047 + 0x130] | ||
132 | stx %g4, [%sp + 2047 + 0x138] | ||
133 | stx %g5, [%sp + 2047 + 0x140] | ||
134 | stx %g6, [%sp + 2047 + 0x148] | ||
135 | stx %g7, [%sp + 2047 + 0x150] | ||
136 | ! restore Linux normal globals | ||
137 | ldx [%sp + 2047 + 0x200], %g1 | ||
138 | ldx [%sp + 2047 + 0x208], %g2 | ||
139 | ldx [%sp + 2047 + 0x210], %g3 | ||
140 | ldx [%sp + 2047 + 0x218], %g4 | ||
141 | ldx [%sp + 2047 + 0x220], %g5 | ||
142 | ldx [%sp + 2047 + 0x228], %g6 | ||
143 | ldx [%sp + 2047 + 0x230], %g7 | ||
144 | wrpr %g0, 0x414, %pstate ! save PROM mmu globals | ||
145 | stx %g1, [%sp + 2047 + 0x158] | ||
146 | stx %g2, [%sp + 2047 + 0x160] | ||
147 | stx %g3, [%sp + 2047 + 0x168] | ||
148 | stx %g4, [%sp + 2047 + 0x170] | ||
149 | stx %g5, [%sp + 2047 + 0x178] | ||
150 | stx %g6, [%sp + 2047 + 0x180] | ||
151 | stx %g7, [%sp + 2047 + 0x188] | ||
152 | ! restore Linux mmu globals | ||
153 | ldx [%sp + 2047 + 0x238], %o0 | ||
154 | ldx [%sp + 2047 + 0x240], %o1 | ||
155 | ldx [%sp + 2047 + 0x248], %l2 | ||
156 | ldx [%sp + 2047 + 0x250], %l3 | ||
157 | ldx [%sp + 2047 + 0x258], %l5 | ||
158 | ldx [%sp + 2047 + 0x260], %l6 | ||
159 | ldx [%sp + 2047 + 0x268], %l7 | ||
160 | ! switch to Linux tba | ||
161 | sethi %hi(sparc64_ttable_tl0), %l1 | ||
162 | rdpr %tba, %l0 ! save PROM tba | ||
163 | mov %o0, %g1 | ||
164 | mov %o1, %g2 | ||
165 | mov %l2, %g3 | ||
166 | mov %l3, %g4 | ||
167 | mov %l5, %g5 | ||
168 | mov %l6, %g6 | ||
169 | mov %l7, %g7 | ||
170 | wrpr %l1, %tba ! install Linux tba | ||
171 | wrpr %l4, 0, %pstate ! restore PSTATE | ||
172 | call prom_world | 38 | call prom_world |
173 | mov %g0, %o0 | 39 | mov 0, %o0 |
174 | ldx [%i1 + 0x000], %l2 | 40 | ldx [%i1 + 0x000], %l2 |
175 | call %l2 | 41 | call %l2 |
176 | mov %i0, %o0 | 42 | mov %i0, %o0 |
177 | mov %o0, %l1 | 43 | mov %o0, %l1 |
178 | call prom_world | 44 | call prom_world |
179 | or %g0, 1, %o0 | 45 | mov 1, %o0 |
180 | wrpr %g0, 0x14, %pstate ! interrupts off | ||
181 | ! restore PROM mmu globals | ||
182 | ldx [%sp + 2047 + 0x158], %o0 | ||
183 | ldx [%sp + 2047 + 0x160], %o1 | ||
184 | ldx [%sp + 2047 + 0x168], %l2 | ||
185 | ldx [%sp + 2047 + 0x170], %l3 | ||
186 | ldx [%sp + 2047 + 0x178], %l5 | ||
187 | ldx [%sp + 2047 + 0x180], %l6 | ||
188 | ldx [%sp + 2047 + 0x188], %l7 | ||
189 | wrpr %g0, 0x414, %pstate ! restore PROM mmu globals | ||
190 | mov %o0, %g1 | ||
191 | mov %o1, %g2 | ||
192 | mov %l2, %g3 | ||
193 | mov %l3, %g4 | ||
194 | mov %l5, %g5 | ||
195 | mov %l6, %g6 | ||
196 | mov %l7, %g7 | ||
197 | wrpr %l0, %tba ! restore PROM tba | ||
198 | wrpr %g0, 0x14, %pstate ! restore PROM normal globals | ||
199 | ldx [%sp + 2047 + 0x120], %g1 | ||
200 | ldx [%sp + 2047 + 0x128], %g2 | ||
201 | ldx [%sp + 2047 + 0x130], %g3 | ||
202 | ldx [%sp + 2047 + 0x138], %g4 | ||
203 | ldx [%sp + 2047 + 0x140], %g5 | ||
204 | ldx [%sp + 2047 + 0x148], %g6 | ||
205 | ldx [%sp + 2047 + 0x150], %g7 | ||
206 | wrpr %g0, 0x814, %pstate ! restore PROM interrupt globals | ||
207 | ldx [%sp + 2047 + 0x0e8], %g1 | ||
208 | ldx [%sp + 2047 + 0x0f0], %g2 | ||
209 | ldx [%sp + 2047 + 0x0f8], %g3 | ||
210 | ldx [%sp + 2047 + 0x100], %g4 | ||
211 | ldx [%sp + 2047 + 0x108], %g5 | ||
212 | ldx [%sp + 2047 + 0x110], %g6 | ||
213 | ldx [%sp + 2047 + 0x118], %g7 | ||
214 | wrpr %g0, 0x15, %pstate ! restore PROM alternate globals | ||
215 | ldx [%sp + 2047 + 0x0b0], %g1 | ||
216 | ldx [%sp + 2047 + 0x0b8], %g2 | ||
217 | ldx [%sp + 2047 + 0x0c0], %g3 | ||
218 | ldx [%sp + 2047 + 0x0c8], %g4 | ||
219 | ldx [%sp + 2047 + 0x0d0], %g5 | ||
220 | ldx [%sp + 2047 + 0x0d8], %g6 | ||
221 | ldx [%sp + 2047 + 0x0e0], %g7 | ||
222 | wrpr %l4, 0, %pstate | ||
223 | ret | 46 | ret |
224 | restore %l1, 0, %o0 | 47 | restore %l1, 0, %o0 |
225 | 48 | ||
diff --git a/arch/sparc64/prom/console.c b/arch/sparc64/prom/console.c index ac6d035dd150..7c25c54cefdc 100644 --- a/arch/sparc64/prom/console.c +++ b/arch/sparc64/prom/console.c | |||
@@ -102,6 +102,9 @@ prom_query_input_device(void) | |||
102 | if (!strncmp (propb, "rsc", 3)) | 102 | if (!strncmp (propb, "rsc", 3)) |
103 | return PROMDEV_IRSC; | 103 | return PROMDEV_IRSC; |
104 | 104 | ||
105 | if (!strncmp (propb, "virtual-console", 3)) | ||
106 | return PROMDEV_IVCONS; | ||
107 | |||
105 | if (strncmp (propb, "tty", 3) || !propb[3]) | 108 | if (strncmp (propb, "tty", 3) || !propb[3]) |
106 | return PROMDEV_I_UNK; | 109 | return PROMDEV_I_UNK; |
107 | 110 | ||
@@ -143,6 +146,9 @@ prom_query_output_device(void) | |||
143 | if (!strncmp (propb, "rsc", 3)) | 146 | if (!strncmp (propb, "rsc", 3)) |
144 | return PROMDEV_ORSC; | 147 | return PROMDEV_ORSC; |
145 | 148 | ||
149 | if (!strncmp (propb, "virtual-console", 3)) | ||
150 | return PROMDEV_OVCONS; | ||
151 | |||
146 | if (strncmp (propb, "tty", 3) || !propb[3]) | 152 | if (strncmp (propb, "tty", 3) || !propb[3]) |
147 | return PROMDEV_O_UNK; | 153 | return PROMDEV_O_UNK; |
148 | 154 | ||
diff --git a/arch/sparc64/prom/init.c b/arch/sparc64/prom/init.c index f3cc2d8578b2..1c0db842a6f4 100644 --- a/arch/sparc64/prom/init.c +++ b/arch/sparc64/prom/init.c | |||
@@ -14,11 +14,10 @@ | |||
14 | #include <asm/openprom.h> | 14 | #include <asm/openprom.h> |
15 | #include <asm/oplib.h> | 15 | #include <asm/oplib.h> |
16 | 16 | ||
17 | enum prom_major_version prom_vers; | 17 | /* OBP version string. */ |
18 | unsigned int prom_rev, prom_prev; | 18 | char prom_version[80]; |
19 | 19 | ||
20 | /* The root node of the prom device tree. */ | 20 | /* The root node of the prom device tree. */ |
21 | int prom_root_node; | ||
22 | int prom_stdin, prom_stdout; | 21 | int prom_stdin, prom_stdout; |
23 | int prom_chosen_node; | 22 | int prom_chosen_node; |
24 | 23 | ||
@@ -31,68 +30,25 @@ extern void prom_cif_init(void *, void *); | |||
31 | 30 | ||
32 | void __init prom_init(void *cif_handler, void *cif_stack) | 31 | void __init prom_init(void *cif_handler, void *cif_stack) |
33 | { | 32 | { |
34 | char buffer[80], *p; | ||
35 | int ints[3]; | ||
36 | int node; | 33 | int node; |
37 | int i = 0; | ||
38 | int bufadjust; | ||
39 | |||
40 | prom_vers = PROM_P1275; | ||
41 | 34 | ||
42 | prom_cif_init(cif_handler, cif_stack); | 35 | prom_cif_init(cif_handler, cif_stack); |
43 | 36 | ||
44 | prom_root_node = prom_getsibling(0); | ||
45 | if((prom_root_node == 0) || (prom_root_node == -1)) | ||
46 | prom_halt(); | ||
47 | |||
48 | prom_chosen_node = prom_finddevice(prom_chosen_path); | 37 | prom_chosen_node = prom_finddevice(prom_chosen_path); |
49 | if (!prom_chosen_node || prom_chosen_node == -1) | 38 | if (!prom_chosen_node || prom_chosen_node == -1) |
50 | prom_halt(); | 39 | prom_halt(); |
51 | 40 | ||
52 | prom_stdin = prom_getint (prom_chosen_node, "stdin"); | 41 | prom_stdin = prom_getint(prom_chosen_node, "stdin"); |
53 | prom_stdout = prom_getint (prom_chosen_node, "stdout"); | 42 | prom_stdout = prom_getint(prom_chosen_node, "stdout"); |
54 | 43 | ||
55 | node = prom_finddevice("/openprom"); | 44 | node = prom_finddevice("/openprom"); |
56 | if (!node || node == -1) | 45 | if (!node || node == -1) |
57 | prom_halt(); | 46 | prom_halt(); |
58 | 47 | ||
59 | prom_getstring (node, "version", buffer, sizeof (buffer)); | 48 | prom_getstring(node, "version", prom_version, sizeof(prom_version)); |
60 | |||
61 | prom_printf ("\n"); | ||
62 | |||
63 | if (strncmp (buffer, "OBP ", 4)) | ||
64 | goto strange_version; | ||
65 | |||
66 | /* | ||
67 | * Version field is expected to be 'OBP xx.yy.zz date...' | ||
68 | * However, Sun can't stick to this format very well, so | ||
69 | * we need to check for 'OBP xx.yy.zz date...' and adjust | ||
70 | * accordingly. -spot | ||
71 | */ | ||
72 | |||
73 | if (strncmp (buffer, "OBP ", 5)) | ||
74 | bufadjust = 4; | ||
75 | else | ||
76 | bufadjust = 5; | ||
77 | |||
78 | p = buffer + bufadjust; | ||
79 | while (p && isdigit(*p) && i < 3) { | ||
80 | ints[i++] = simple_strtoul(p, NULL, 0); | ||
81 | if ((p = strchr(p, '.')) != NULL) | ||
82 | p++; | ||
83 | } | ||
84 | if (i != 3) | ||
85 | goto strange_version; | ||
86 | |||
87 | prom_rev = ints[1]; | ||
88 | prom_prev = (ints[0] << 16) | (ints[1] << 8) | ints[2]; | ||
89 | |||
90 | printk ("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust); | ||
91 | 49 | ||
92 | /* Initialization successful. */ | 50 | prom_printf("\n"); |
93 | return; | ||
94 | 51 | ||
95 | strange_version: | 52 | printk("PROMLIB: Sun IEEE Boot Prom '%s'\n", prom_version); |
96 | prom_printf ("Strange OBP version `%s'.\n", buffer); | 53 | printk("PROMLIB: Root node compatible: %s\n", prom_root_compatible); |
97 | prom_halt (); | ||
98 | } | 54 | } |
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c index 87f5cfce23bb..577bde8b6647 100644 --- a/arch/sparc64/prom/misc.c +++ b/arch/sparc64/prom/misc.c | |||
@@ -112,28 +112,20 @@ unsigned char prom_get_idprom(char *idbuf, int num_bytes) | |||
112 | return 0xff; | 112 | return 0xff; |
113 | } | 113 | } |
114 | 114 | ||
115 | /* Get the major prom version number. */ | 115 | /* Install Linux trap table so PROM uses that instead of its own. */ |
116 | int prom_version(void) | 116 | void prom_set_trap_table(unsigned long tba) |
117 | { | ||
118 | return PROM_P1275; | ||
119 | } | ||
120 | |||
121 | /* Get the prom plugin-revision. */ | ||
122 | int prom_getrev(void) | ||
123 | { | ||
124 | return prom_rev; | ||
125 | } | ||
126 | |||
127 | /* Get the prom firmware print revision. */ | ||
128 | int prom_getprev(void) | ||
129 | { | 117 | { |
130 | return prom_prev; | 118 | p1275_cmd("SUNW,set-trap-table", |
119 | (P1275_ARG(0, P1275_ARG_IN_64B) | | ||
120 | P1275_INOUT(1, 0)), tba); | ||
131 | } | 121 | } |
132 | 122 | ||
133 | /* Install Linux trap table so PROM uses that instead of its own. */ | 123 | void prom_set_trap_table_sun4v(unsigned long tba, unsigned long mmfsa) |
134 | void prom_set_trap_table(unsigned long tba) | ||
135 | { | 124 | { |
136 | p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba); | 125 | p1275_cmd("SUNW,set-trap-table", |
126 | (P1275_ARG(0, P1275_ARG_IN_64B) | | ||
127 | P1275_ARG(1, P1275_ARG_IN_64B) | | ||
128 | P1275_INOUT(2, 0)), tba, mmfsa); | ||
137 | } | 129 | } |
138 | 130 | ||
139 | int prom_get_mmu_ihandle(void) | 131 | int prom_get_mmu_ihandle(void) |
@@ -303,9 +295,21 @@ int prom_wakeupsystem(void) | |||
303 | } | 295 | } |
304 | 296 | ||
305 | #ifdef CONFIG_SMP | 297 | #ifdef CONFIG_SMP |
306 | void prom_startcpu(int cpunode, unsigned long pc, unsigned long o0) | 298 | void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg) |
299 | { | ||
300 | p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, arg); | ||
301 | } | ||
302 | |||
303 | void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg) | ||
304 | { | ||
305 | p1275_cmd("SUNW,start-cpu-by-cpuid", P1275_INOUT(3, 0), | ||
306 | cpuid, pc, arg); | ||
307 | } | ||
308 | |||
309 | void prom_stopcpu_cpuid(int cpuid) | ||
307 | { | 310 | { |
308 | p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, o0); | 311 | p1275_cmd("SUNW,stop-cpu-by-cpuid", P1275_INOUT(1, 0), |
312 | cpuid); | ||
309 | } | 313 | } |
310 | 314 | ||
311 | void prom_stopself(void) | 315 | void prom_stopself(void) |
diff --git a/arch/sparc64/prom/p1275.c b/arch/sparc64/prom/p1275.c index a5a7c5712028..2b32c489860c 100644 --- a/arch/sparc64/prom/p1275.c +++ b/arch/sparc64/prom/p1275.c | |||
@@ -30,16 +30,6 @@ extern void prom_world(int); | |||
30 | extern void prom_cif_interface(void); | 30 | extern void prom_cif_interface(void); |
31 | extern void prom_cif_callback(void); | 31 | extern void prom_cif_callback(void); |
32 | 32 | ||
33 | static inline unsigned long spitfire_get_primary_context(void) | ||
34 | { | ||
35 | unsigned long ctx; | ||
36 | |||
37 | __asm__ __volatile__("ldxa [%1] %2, %0" | ||
38 | : "=r" (ctx) | ||
39 | : "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
40 | return ctx; | ||
41 | } | ||
42 | |||
43 | /* | 33 | /* |
44 | * This provides SMP safety on the p1275buf. prom_callback() drops this lock | 34 | * This provides SMP safety on the p1275buf. prom_callback() drops this lock |
45 | * to allow recursuve acquisition. | 35 | * to allow recursuve acquisition. |
@@ -55,7 +45,6 @@ long p1275_cmd(const char *service, long fmt, ...) | |||
55 | long attrs, x; | 45 | long attrs, x; |
56 | 46 | ||
57 | p = p1275buf.prom_buffer; | 47 | p = p1275buf.prom_buffer; |
58 | BUG_ON((spitfire_get_primary_context() & CTX_NR_MASK) != 0); | ||
59 | 48 | ||
60 | spin_lock_irqsave(&prom_entry_lock, flags); | 49 | spin_lock_irqsave(&prom_entry_lock, flags); |
61 | 50 | ||
diff --git a/arch/sparc64/prom/tree.c b/arch/sparc64/prom/tree.c index b1ff9e87dcc6..49075abd7cbc 100644 --- a/arch/sparc64/prom/tree.c +++ b/arch/sparc64/prom/tree.c | |||
@@ -51,7 +51,7 @@ prom_getparent(int node) | |||
51 | __inline__ int | 51 | __inline__ int |
52 | __prom_getsibling(int node) | 52 | __prom_getsibling(int node) |
53 | { | 53 | { |
54 | return p1275_cmd ("peer", P1275_INOUT(1, 1), node); | 54 | return p1275_cmd(prom_peer_name, P1275_INOUT(1, 1), node); |
55 | } | 55 | } |
56 | 56 | ||
57 | __inline__ int | 57 | __inline__ int |
@@ -59,9 +59,12 @@ prom_getsibling(int node) | |||
59 | { | 59 | { |
60 | int sibnode; | 60 | int sibnode; |
61 | 61 | ||
62 | if(node == -1) return 0; | 62 | if (node == -1) |
63 | return 0; | ||
63 | sibnode = __prom_getsibling(node); | 64 | sibnode = __prom_getsibling(node); |
64 | if(sibnode == -1) return 0; | 65 | if (sibnode == -1) |
66 | return 0; | ||
67 | |||
65 | return sibnode; | 68 | return sibnode; |
66 | } | 69 | } |
67 | 70 | ||
diff --git a/arch/sparc64/solaris/misc.c b/arch/sparc64/solaris/misc.c index 3ab4677395f2..5284996780a7 100644 --- a/arch/sparc64/solaris/misc.c +++ b/arch/sparc64/solaris/misc.c | |||
@@ -90,7 +90,7 @@ static u32 do_solaris_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u64 o | |||
90 | len = PAGE_ALIGN(len); | 90 | len = PAGE_ALIGN(len); |
91 | if(!(flags & MAP_FIXED)) | 91 | if(!(flags & MAP_FIXED)) |
92 | addr = 0; | 92 | addr = 0; |
93 | else if (len > 0xf0000000UL || addr > 0xf0000000UL - len) | 93 | else if (len > STACK_TOP32 || addr > STACK_TOP32 - len) |
94 | goto out_putf; | 94 | goto out_putf; |
95 | ret_type = flags & _MAP_NEW; | 95 | ret_type = flags & _MAP_NEW; |
96 | flags &= ~_MAP_NEW; | 96 | flags &= ~_MAP_NEW; |
@@ -102,7 +102,7 @@ static u32 do_solaris_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u64 o | |||
102 | (unsigned long) prot, (unsigned long) flags, off); | 102 | (unsigned long) prot, (unsigned long) flags, off); |
103 | up_write(¤t->mm->mmap_sem); | 103 | up_write(¤t->mm->mmap_sem); |
104 | if(!ret_type) | 104 | if(!ret_type) |
105 | retval = ((retval < 0xf0000000) ? 0 : retval); | 105 | retval = ((retval < STACK_TOP32) ? 0 : retval); |
106 | 106 | ||
107 | out_putf: | 107 | out_putf: |
108 | if (file) | 108 | if (file) |