diff options
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r-- | arch/sparc64/kernel/pci.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/sun4v_tlb_miss.S | 39 | ||||
-rw-r--r-- | arch/sparc64/kernel/traps.c | 21 | ||||
-rw-r--r-- | arch/sparc64/kernel/tsb.S | 210 |
4 files changed, 203 insertions, 69 deletions
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c index 95ffa9418620..dfccff29e182 100644 --- a/arch/sparc64/kernel/pci.c +++ b/arch/sparc64/kernel/pci.c | |||
@@ -656,6 +656,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |||
656 | __pci_mmap_set_flags(dev, vma, mmap_state); | 656 | __pci_mmap_set_flags(dev, vma, mmap_state); |
657 | __pci_mmap_set_pgprot(dev, vma, mmap_state); | 657 | __pci_mmap_set_pgprot(dev, vma, mmap_state); |
658 | 658 | ||
659 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
659 | ret = io_remap_pfn_range(vma, vma->vm_start, | 660 | ret = io_remap_pfn_range(vma, vma->vm_start, |
660 | vma->vm_pgoff, | 661 | vma->vm_pgoff, |
661 | vma->vm_end - vma->vm_start, | 662 | vma->vm_end - vma->vm_start, |
@@ -663,7 +664,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |||
663 | if (ret) | 664 | if (ret) |
664 | return ret; | 665 | return ret; |
665 | 666 | ||
666 | vma->vm_flags |= VM_IO; | ||
667 | return 0; | 667 | return 0; |
668 | } | 668 | } |
669 | 669 | ||
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S index ab23ddb7116e..b731881224e8 100644 --- a/arch/sparc64/kernel/sun4v_tlb_miss.S +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S | |||
@@ -29,15 +29,15 @@ | |||
29 | * | 29 | * |
30 | * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; | 30 | * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; |
31 | * tsb_base = tsb_reg & ~0x7UL; | 31 | * tsb_base = tsb_reg & ~0x7UL; |
32 | * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask); | 32 | * tsb_index = ((vaddr >> HASH_SHIFT) & tsb_mask); |
33 | * tsb_ptr = tsb_base + (tsb_index * 16); | 33 | * tsb_ptr = tsb_base + (tsb_index * 16); |
34 | */ | 34 | */ |
35 | #define COMPUTE_TSB_PTR(TSB_PTR, VADDR, TMP1, TMP2) \ | 35 | #define COMPUTE_TSB_PTR(TSB_PTR, VADDR, HASH_SHIFT, TMP1, TMP2) \ |
36 | and TSB_PTR, 0x7, TMP1; \ | 36 | and TSB_PTR, 0x7, TMP1; \ |
37 | mov 512, TMP2; \ | 37 | mov 512, TMP2; \ |
38 | andn TSB_PTR, 0x7, TSB_PTR; \ | 38 | andn TSB_PTR, 0x7, TSB_PTR; \ |
39 | sllx TMP2, TMP1, TMP2; \ | 39 | sllx TMP2, TMP1, TMP2; \ |
40 | srlx VADDR, PAGE_SHIFT, TMP1; \ | 40 | srlx VADDR, HASH_SHIFT, TMP1; \ |
41 | sub TMP2, 1, TMP2; \ | 41 | sub TMP2, 1, TMP2; \ |
42 | and TMP1, TMP2, TMP1; \ | 42 | and TMP1, TMP2, TMP1; \ |
43 | sllx TMP1, 4, TMP1; \ | 43 | sllx TMP1, 4, TMP1; \ |
@@ -53,7 +53,7 @@ sun4v_itlb_miss: | |||
53 | 53 | ||
54 | LOAD_ITLB_INFO(%g2, %g4, %g5) | 54 | LOAD_ITLB_INFO(%g2, %g4, %g5) |
55 | COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v) | 55 | COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v) |
56 | COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7) | 56 | COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7) |
57 | 57 | ||
58 | /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ | 58 | /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ |
59 | ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 | 59 | ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 |
@@ -99,7 +99,7 @@ sun4v_dtlb_miss: | |||
99 | 99 | ||
100 | LOAD_DTLB_INFO(%g2, %g4, %g5) | 100 | LOAD_DTLB_INFO(%g2, %g4, %g5) |
101 | COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v) | 101 | COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v) |
102 | COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7) | 102 | COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7) |
103 | 103 | ||
104 | /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ | 104 | /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ |
105 | ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 | 105 | ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 |
@@ -171,21 +171,26 @@ sun4v_dtsb_miss: | |||
171 | 171 | ||
172 | /* fallthrough */ | 172 | /* fallthrough */ |
173 | 173 | ||
174 | /* Create TSB pointer into %g1. This is something like: | ||
175 | * | ||
176 | * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; | ||
177 | * tsb_base = tsb_reg & ~0x7UL; | ||
178 | * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask); | ||
179 | * tsb_ptr = tsb_base + (tsb_index * 16); | ||
180 | */ | ||
181 | sun4v_tsb_miss_common: | 174 | sun4v_tsb_miss_common: |
182 | COMPUTE_TSB_PTR(%g1, %g4, %g5, %g7) | 175 | COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g5, %g7) |
183 | 176 | ||
184 | /* Branch directly to page table lookup. We have SCRATCHPAD_MMU_MISS | ||
185 | * still in %g2, so it's quite trivial to get at the PGD PHYS value | ||
186 | * so we can preload it into %g7. | ||
187 | */ | ||
188 | sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2 | 177 | sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2 |
178 | |||
179 | #ifdef CONFIG_HUGETLB_PAGE | ||
180 | mov SCRATCHPAD_UTSBREG2, %g5 | ||
181 | ldxa [%g5] ASI_SCRATCHPAD, %g5 | ||
182 | cmp %g5, -1 | ||
183 | be,pt %xcc, 80f | ||
184 | nop | ||
185 | COMPUTE_TSB_PTR(%g5, %g4, HPAGE_SHIFT, %g2, %g7) | ||
186 | |||
187 | /* That clobbered %g2, reload it. */ | ||
188 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
189 | sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2 | ||
190 | |||
191 | 80: stx %g5, [%g2 + TRAP_PER_CPU_TSB_HUGE_TEMP] | ||
192 | #endif | ||
193 | |||
189 | ba,pt %xcc, tsb_miss_page_table_walk_sun4v_fastpath | 194 | ba,pt %xcc, tsb_miss_page_table_walk_sun4v_fastpath |
190 | ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7 | 195 | ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7 |
191 | 196 | ||
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 7f7dba0ca96a..df612e4f75f9 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c | |||
@@ -2482,6 +2482,7 @@ void init_cur_cpu_trap(struct thread_info *t) | |||
2482 | 2482 | ||
2483 | extern void thread_info_offsets_are_bolixed_dave(void); | 2483 | extern void thread_info_offsets_are_bolixed_dave(void); |
2484 | extern void trap_per_cpu_offsets_are_bolixed_dave(void); | 2484 | extern void trap_per_cpu_offsets_are_bolixed_dave(void); |
2485 | extern void tsb_config_offsets_are_bolixed_dave(void); | ||
2485 | 2486 | ||
2486 | /* Only invoked on boot processor. */ | 2487 | /* Only invoked on boot processor. */ |
2487 | void __init trap_init(void) | 2488 | void __init trap_init(void) |
@@ -2535,9 +2536,27 @@ void __init trap_init(void) | |||
2535 | (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA != | 2536 | (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA != |
2536 | offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) || | 2537 | offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) || |
2537 | (TRAP_PER_CPU_CPU_LIST_PA != | 2538 | (TRAP_PER_CPU_CPU_LIST_PA != |
2538 | offsetof(struct trap_per_cpu, cpu_list_pa))) | 2539 | offsetof(struct trap_per_cpu, cpu_list_pa)) || |
2540 | (TRAP_PER_CPU_TSB_HUGE != | ||
2541 | offsetof(struct trap_per_cpu, tsb_huge)) || | ||
2542 | (TRAP_PER_CPU_TSB_HUGE_TEMP != | ||
2543 | offsetof(struct trap_per_cpu, tsb_huge_temp))) | ||
2539 | trap_per_cpu_offsets_are_bolixed_dave(); | 2544 | trap_per_cpu_offsets_are_bolixed_dave(); |
2540 | 2545 | ||
2546 | if ((TSB_CONFIG_TSB != | ||
2547 | offsetof(struct tsb_config, tsb)) || | ||
2548 | (TSB_CONFIG_RSS_LIMIT != | ||
2549 | offsetof(struct tsb_config, tsb_rss_limit)) || | ||
2550 | (TSB_CONFIG_NENTRIES != | ||
2551 | offsetof(struct tsb_config, tsb_nentries)) || | ||
2552 | (TSB_CONFIG_REG_VAL != | ||
2553 | offsetof(struct tsb_config, tsb_reg_val)) || | ||
2554 | (TSB_CONFIG_MAP_VADDR != | ||
2555 | offsetof(struct tsb_config, tsb_map_vaddr)) || | ||
2556 | (TSB_CONFIG_MAP_PTE != | ||
2557 | offsetof(struct tsb_config, tsb_map_pte))) | ||
2558 | tsb_config_offsets_are_bolixed_dave(); | ||
2559 | |||
2541 | /* Attach to the address space of init_task. On SMP we | 2560 | /* Attach to the address space of init_task. On SMP we |
2542 | * do this in smp.c:smp_callin for other cpus. | 2561 | * do this in smp.c:smp_callin for other cpus. |
2543 | */ | 2562 | */ |
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index 118baea44f69..a0c8ba58920b 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S | |||
@@ -3,8 +3,13 @@ | |||
3 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> | 3 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/config.h> | ||
7 | |||
6 | #include <asm/tsb.h> | 8 | #include <asm/tsb.h> |
7 | #include <asm/hypervisor.h> | 9 | #include <asm/hypervisor.h> |
10 | #include <asm/page.h> | ||
11 | #include <asm/cpudata.h> | ||
12 | #include <asm/mmu.h> | ||
8 | 13 | ||
9 | .text | 14 | .text |
10 | .align 32 | 15 | .align 32 |
@@ -34,34 +39,124 @@ tsb_miss_itlb: | |||
34 | ldxa [%g4] ASI_IMMU, %g4 | 39 | ldxa [%g4] ASI_IMMU, %g4 |
35 | 40 | ||
36 | /* At this point we have: | 41 | /* At this point we have: |
37 | * %g1 -- TSB entry address | 42 | * %g1 -- PAGE_SIZE TSB entry address |
38 | * %g3 -- FAULT_CODE_{D,I}TLB | 43 | * %g3 -- FAULT_CODE_{D,I}TLB |
39 | * %g4 -- missing virtual address | 44 | * %g4 -- missing virtual address |
40 | * %g6 -- TAG TARGET (vaddr >> 22) | 45 | * %g6 -- TAG TARGET (vaddr >> 22) |
41 | */ | 46 | */ |
42 | tsb_miss_page_table_walk: | 47 | tsb_miss_page_table_walk: |
43 | TRAP_LOAD_PGD_PHYS(%g7, %g5) | 48 | TRAP_LOAD_TRAP_BLOCK(%g7, %g5) |
44 | 49 | ||
45 | /* And now we have the PGD base physical address in %g7. */ | 50 | /* Before committing to a full page table walk, |
46 | tsb_miss_page_table_walk_sun4v_fastpath: | 51 | * check the huge page TSB. |
47 | USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) | 52 | */ |
53 | #ifdef CONFIG_HUGETLB_PAGE | ||
54 | |||
55 | 661: ldx [%g7 + TRAP_PER_CPU_TSB_HUGE], %g5 | ||
56 | nop | ||
57 | .section .sun4v_2insn_patch, "ax" | ||
58 | .word 661b | ||
59 | mov SCRATCHPAD_UTSBREG2, %g5 | ||
60 | ldxa [%g5] ASI_SCRATCHPAD, %g5 | ||
61 | .previous | ||
62 | |||
63 | cmp %g5, -1 | ||
64 | be,pt %xcc, 80f | ||
65 | nop | ||
66 | |||
67 | /* We need an aligned pair of registers containing 2 values | ||
68 | * which can be easily rematerialized. %g6 and %g7 foot the | ||
69 | * bill just nicely. We'll save %g6 away into %g2 for the | ||
70 | * huge page TSB TAG comparison. | ||
71 | * | ||
72 | * Perform a huge page TSB lookup. | ||
73 | */ | ||
74 | mov %g6, %g2 | ||
75 | and %g5, 0x7, %g6 | ||
76 | mov 512, %g7 | ||
77 | andn %g5, 0x7, %g5 | ||
78 | sllx %g7, %g6, %g7 | ||
79 | srlx %g4, HPAGE_SHIFT, %g6 | ||
80 | sub %g7, 1, %g7 | ||
81 | and %g6, %g7, %g6 | ||
82 | sllx %g6, 4, %g6 | ||
83 | add %g5, %g6, %g5 | ||
84 | |||
85 | TSB_LOAD_QUAD(%g5, %g6) | ||
86 | cmp %g6, %g2 | ||
87 | be,a,pt %xcc, tsb_tlb_reload | ||
88 | mov %g7, %g5 | ||
89 | |||
90 | /* No match, remember the huge page TSB entry address, | ||
91 | * and restore %g6 and %g7. | ||
92 | */ | ||
93 | TRAP_LOAD_TRAP_BLOCK(%g7, %g6) | ||
94 | srlx %g4, 22, %g6 | ||
95 | 80: stx %g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP] | ||
96 | |||
97 | #endif | ||
98 | |||
99 | ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7 | ||
48 | 100 | ||
49 | /* At this point we have: | 101 | /* At this point we have: |
50 | * %g1 -- TSB entry address | 102 | * %g1 -- TSB entry address |
51 | * %g3 -- FAULT_CODE_{D,I}TLB | 103 | * %g3 -- FAULT_CODE_{D,I}TLB |
52 | * %g5 -- physical address of PTE in Linux page tables | 104 | * %g4 -- missing virtual address |
53 | * %g6 -- TAG TARGET (vaddr >> 22) | 105 | * %g6 -- TAG TARGET (vaddr >> 22) |
106 | * %g7 -- page table physical address | ||
107 | * | ||
108 | * We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE | ||
109 | * TSB both lack a matching entry. | ||
54 | */ | 110 | */ |
55 | tsb_reload: | 111 | tsb_miss_page_table_walk_sun4v_fastpath: |
56 | TSB_LOCK_TAG(%g1, %g2, %g7) | 112 | USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) |
57 | 113 | ||
58 | /* Load and check PTE. */ | 114 | /* Load and check PTE. */ |
59 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 | 115 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 |
60 | mov 1, %g7 | 116 | brgez,pn %g5, tsb_do_fault |
61 | sllx %g7, TSB_TAG_INVALID_BIT, %g7 | 117 | nop |
62 | brgez,a,pn %g5, tsb_do_fault | 118 | |
63 | TSB_STORE(%g1, %g7) | 119 | #ifdef CONFIG_HUGETLB_PAGE |
120 | 661: sethi %uhi(_PAGE_SZALL_4U), %g7 | ||
121 | sllx %g7, 32, %g7 | ||
122 | .section .sun4v_2insn_patch, "ax" | ||
123 | .word 661b | ||
124 | mov _PAGE_SZALL_4V, %g7 | ||
125 | nop | ||
126 | .previous | ||
127 | |||
128 | and %g5, %g7, %g2 | ||
129 | |||
130 | 661: sethi %uhi(_PAGE_SZHUGE_4U), %g7 | ||
131 | sllx %g7, 32, %g7 | ||
132 | .section .sun4v_2insn_patch, "ax" | ||
133 | .word 661b | ||
134 | mov _PAGE_SZHUGE_4V, %g7 | ||
135 | nop | ||
136 | .previous | ||
137 | |||
138 | cmp %g2, %g7 | ||
139 | bne,pt %xcc, 60f | ||
140 | nop | ||
141 | |||
142 | /* It is a huge page, use huge page TSB entry address we | ||
143 | * calculated above. | ||
144 | */ | ||
145 | TRAP_LOAD_TRAP_BLOCK(%g7, %g2) | ||
146 | ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2 | ||
147 | cmp %g2, -1 | ||
148 | movne %xcc, %g2, %g1 | ||
149 | 60: | ||
150 | #endif | ||
64 | 151 | ||
152 | /* At this point we have: | ||
153 | * %g1 -- TSB entry address | ||
154 | * %g3 -- FAULT_CODE_{D,I}TLB | ||
155 | * %g5 -- valid PTE | ||
156 | * %g6 -- TAG TARGET (vaddr >> 22) | ||
157 | */ | ||
158 | tsb_reload: | ||
159 | TSB_LOCK_TAG(%g1, %g2, %g7) | ||
65 | TSB_WRITE(%g1, %g5, %g6) | 160 | TSB_WRITE(%g1, %g5, %g6) |
66 | 161 | ||
67 | /* Finally, load TLB and return from trap. */ | 162 | /* Finally, load TLB and return from trap. */ |
@@ -240,10 +335,9 @@ tsb_flush: | |||
240 | * schedule() time. | 335 | * schedule() time. |
241 | * | 336 | * |
242 | * %o0: page table physical address | 337 | * %o0: page table physical address |
243 | * %o1: TSB register value | 338 | * %o1: TSB base config pointer |
244 | * %o2: TSB virtual address | 339 | * %o2: TSB huge config pointer, or NULL if none |
245 | * %o3: TSB mapping locked PTE | 340 | * %o3: Hypervisor TSB descriptor physical address |
246 | * %o4: Hypervisor TSB descriptor physical address | ||
247 | * | 341 | * |
248 | * We have to run this whole thing with interrupts | 342 | * We have to run this whole thing with interrupts |
249 | * disabled so that the current cpu doesn't change | 343 | * disabled so that the current cpu doesn't change |
@@ -253,63 +347,79 @@ tsb_flush: | |||
253 | .globl __tsb_context_switch | 347 | .globl __tsb_context_switch |
254 | .type __tsb_context_switch,#function | 348 | .type __tsb_context_switch,#function |
255 | __tsb_context_switch: | 349 | __tsb_context_switch: |
256 | rdpr %pstate, %o5 | 350 | rdpr %pstate, %g1 |
257 | wrpr %o5, PSTATE_IE, %pstate | 351 | wrpr %g1, PSTATE_IE, %pstate |
352 | |||
353 | TRAP_LOAD_TRAP_BLOCK(%g2, %g3) | ||
258 | 354 | ||
259 | ldub [%g6 + TI_CPU], %g1 | ||
260 | sethi %hi(trap_block), %g2 | ||
261 | sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1 | ||
262 | or %g2, %lo(trap_block), %g2 | ||
263 | add %g2, %g1, %g2 | ||
264 | stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] | 355 | stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] |
265 | 356 | ||
266 | sethi %hi(tlb_type), %g1 | 357 | ldx [%o1 + TSB_CONFIG_REG_VAL], %o0 |
267 | lduw [%g1 + %lo(tlb_type)], %g1 | 358 | brz,pt %o2, 1f |
268 | cmp %g1, 3 | 359 | mov -1, %g3 |
269 | bne,pt %icc, 1f | 360 | |
361 | ldx [%o2 + TSB_CONFIG_REG_VAL], %g3 | ||
362 | |||
363 | 1: stx %g3, [%g2 + TRAP_PER_CPU_TSB_HUGE] | ||
364 | |||
365 | sethi %hi(tlb_type), %g2 | ||
366 | lduw [%g2 + %lo(tlb_type)], %g2 | ||
367 | cmp %g2, 3 | ||
368 | bne,pt %icc, 50f | ||
270 | nop | 369 | nop |
271 | 370 | ||
272 | /* Hypervisor TSB switch. */ | 371 | /* Hypervisor TSB switch. */ |
273 | mov SCRATCHPAD_UTSBREG1, %g1 | 372 | mov SCRATCHPAD_UTSBREG1, %o5 |
274 | stxa %o1, [%g1] ASI_SCRATCHPAD | 373 | stxa %o0, [%o5] ASI_SCRATCHPAD |
275 | mov -1, %g2 | 374 | mov SCRATCHPAD_UTSBREG2, %o5 |
276 | mov SCRATCHPAD_UTSBREG2, %g1 | 375 | stxa %g3, [%o5] ASI_SCRATCHPAD |
277 | stxa %g2, [%g1] ASI_SCRATCHPAD | 376 | |
278 | 377 | mov 2, %o0 | |
279 | /* Save away %o5's %pstate, we have to use %o5 for | 378 | cmp %g3, -1 |
280 | * the hypervisor call. | 379 | move %xcc, 1, %o0 |
281 | */ | ||
282 | mov %o5, %g1 | ||
283 | 380 | ||
284 | mov HV_FAST_MMU_TSB_CTXNON0, %o5 | 381 | mov HV_FAST_MMU_TSB_CTXNON0, %o5 |
285 | mov 1, %o0 | 382 | mov %o3, %o1 |
286 | mov %o4, %o1 | ||
287 | ta HV_FAST_TRAP | 383 | ta HV_FAST_TRAP |
288 | 384 | ||
289 | /* Finish up and restore %o5. */ | 385 | /* Finish up. */ |
290 | ba,pt %xcc, 9f | 386 | ba,pt %xcc, 9f |
291 | mov %g1, %o5 | 387 | nop |
292 | 388 | ||
293 | /* SUN4U TSB switch. */ | 389 | /* SUN4U TSB switch. */ |
294 | 1: mov TSB_REG, %g1 | 390 | 50: mov TSB_REG, %o5 |
295 | stxa %o1, [%g1] ASI_DMMU | 391 | stxa %o0, [%o5] ASI_DMMU |
296 | membar #Sync | 392 | membar #Sync |
297 | stxa %o1, [%g1] ASI_IMMU | 393 | stxa %o0, [%o5] ASI_IMMU |
298 | membar #Sync | 394 | membar #Sync |
299 | 395 | ||
300 | 2: brz %o2, 9f | 396 | 2: ldx [%o1 + TSB_CONFIG_MAP_VADDR], %o4 |
301 | nop | 397 | brz %o4, 9f |
398 | ldx [%o1 + TSB_CONFIG_MAP_PTE], %o5 | ||
302 | 399 | ||
303 | sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2 | 400 | sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2 |
304 | mov TLB_TAG_ACCESS, %g1 | 401 | mov TLB_TAG_ACCESS, %g3 |
305 | lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2 | 402 | lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2 |
306 | stxa %o2, [%g1] ASI_DMMU | 403 | stxa %o4, [%g3] ASI_DMMU |
307 | membar #Sync | 404 | membar #Sync |
308 | sllx %g2, 3, %g2 | 405 | sllx %g2, 3, %g2 |
309 | stxa %o3, [%g2] ASI_DTLB_DATA_ACCESS | 406 | stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS |
407 | membar #Sync | ||
408 | |||
409 | brz,pt %o2, 9f | ||
410 | nop | ||
411 | |||
412 | ldx [%o2 + TSB_CONFIG_MAP_VADDR], %o4 | ||
413 | ldx [%o2 + TSB_CONFIG_MAP_PTE], %o5 | ||
414 | mov TLB_TAG_ACCESS, %g3 | ||
415 | stxa %o4, [%g3] ASI_DMMU | ||
416 | membar #Sync | ||
417 | sub %g2, (1 << 3), %g2 | ||
418 | stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS | ||
310 | membar #Sync | 419 | membar #Sync |
420 | |||
311 | 9: | 421 | 9: |
312 | wrpr %o5, %pstate | 422 | wrpr %g1, %pstate |
313 | 423 | ||
314 | retl | 424 | retl |
315 | nop | 425 | nop |