aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-01 18:55:21 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:11:32 -0500
commit517af33237ecfc3c8a93b335365fa61e741ceca4 (patch)
tree58eff40eb4c517c4fd49fd347d38273ee1e1ee4b
parentb0fd4e49aea8a460afab7bc67cd618e2d19291d4 (diff)
[SPARC64]: Access TSB with physical addresses when possible.
This way we don't need to lock the TSB into the TLB. The trick is that every TSB load/store is registered into a special instruction patch section. The default uses virtual addresses, and the patch instructions use physical address load/stores. We can't do this on all chips because only cheetah+ and later have the physical variant of the atomic quad load. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/kernel/dtlb_miss.S2
-rw-r--r--arch/sparc64/kernel/itlb_miss.S2
-rw-r--r--arch/sparc64/kernel/ktlb.S20
-rw-r--r--arch/sparc64/kernel/tsb.S35
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S4
-rw-r--r--arch/sparc64/mm/init.c32
-rw-r--r--arch/sparc64/mm/tsb.c95
-rw-r--r--include/asm-sparc64/mmu.h3
-rw-r--r--include/asm-sparc64/tsb.h94
9 files changed, 234 insertions, 53 deletions
diff --git a/arch/sparc64/kernel/dtlb_miss.S b/arch/sparc64/kernel/dtlb_miss.S
index d0f1565cb564..2ef6f6e6e72b 100644
--- a/arch/sparc64/kernel/dtlb_miss.S
+++ b/arch/sparc64/kernel/dtlb_miss.S
@@ -4,7 +4,7 @@
4 srlx %g6, 48, %g5 ! Get context 4 srlx %g6, 48, %g5 ! Get context
5 brz,pn %g5, kvmap_dtlb ! Context 0 processing 5 brz,pn %g5, kvmap_dtlb ! Context 0 processing
6 nop ! Delay slot (fill me) 6 nop ! Delay slot (fill me)
7 ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB entry 7 TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry
8 nop ! Push branch to next I$ line 8 nop ! Push branch to next I$ line
9 cmp %g4, %g6 ! Compare TAG 9 cmp %g4, %g6 ! Compare TAG
10 10
diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S
index 6b6c8fee04bd..97facce27aad 100644
--- a/arch/sparc64/kernel/itlb_miss.S
+++ b/arch/sparc64/kernel/itlb_miss.S
@@ -4,7 +4,7 @@
4 srlx %g6, 48, %g5 ! Get context 4 srlx %g6, 48, %g5 ! Get context
5 brz,pn %g5, kvmap_itlb ! Context 0 processing 5 brz,pn %g5, kvmap_itlb ! Context 0 processing
6 nop ! Delay slot (fill me) 6 nop ! Delay slot (fill me)
7 ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB entry 7 TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry
8 cmp %g4, %g6 ! Compare TAG 8 cmp %g4, %g6 ! Compare TAG
9 sethi %hi(_PAGE_EXEC), %g4 ! Setup exec check 9 sethi %hi(_PAGE_EXEC), %g4 ! Setup exec check
10 10
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index 2b5e71b68882..9b415ab6db6b 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -44,14 +44,14 @@ kvmap_itlb_tsb_miss:
44kvmap_itlb_vmalloc_addr: 44kvmap_itlb_vmalloc_addr:
45 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) 45 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
46 46
47 TSB_LOCK_TAG(%g1, %g2, %g4) 47 KTSB_LOCK_TAG(%g1, %g2, %g4)
48 48
49 /* Load and check PTE. */ 49 /* Load and check PTE. */
50 ldxa [%g5] ASI_PHYS_USE_EC, %g5 50 ldxa [%g5] ASI_PHYS_USE_EC, %g5
51 brgez,a,pn %g5, kvmap_itlb_longpath 51 brgez,a,pn %g5, kvmap_itlb_longpath
52 stx %g0, [%g1] 52 KTSB_STORE(%g1, %g0)
53 53
54 TSB_WRITE(%g1, %g5, %g6) 54 KTSB_WRITE(%g1, %g5, %g6)
55 55
56 /* fallthrough to TLB load */ 56 /* fallthrough to TLB load */
57 57
@@ -69,9 +69,9 @@ kvmap_itlb_longpath:
69kvmap_itlb_obp: 69kvmap_itlb_obp:
70 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath) 70 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
71 71
72 TSB_LOCK_TAG(%g1, %g2, %g4) 72 KTSB_LOCK_TAG(%g1, %g2, %g4)
73 73
74 TSB_WRITE(%g1, %g5, %g6) 74 KTSB_WRITE(%g1, %g5, %g6)
75 75
76 ba,pt %xcc, kvmap_itlb_load 76 ba,pt %xcc, kvmap_itlb_load
77 nop 77 nop
@@ -79,9 +79,9 @@ kvmap_itlb_obp:
79kvmap_dtlb_obp: 79kvmap_dtlb_obp:
80 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath) 80 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
81 81
82 TSB_LOCK_TAG(%g1, %g2, %g4) 82 KTSB_LOCK_TAG(%g1, %g2, %g4)
83 83
84 TSB_WRITE(%g1, %g5, %g6) 84 KTSB_WRITE(%g1, %g5, %g6)
85 85
86 ba,pt %xcc, kvmap_dtlb_load 86 ba,pt %xcc, kvmap_dtlb_load
87 nop 87 nop
@@ -114,14 +114,14 @@ kvmap_linear_patch:
114kvmap_dtlb_vmalloc_addr: 114kvmap_dtlb_vmalloc_addr:
115 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) 115 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
116 116
117 TSB_LOCK_TAG(%g1, %g2, %g4) 117 KTSB_LOCK_TAG(%g1, %g2, %g4)
118 118
119 /* Load and check PTE. */ 119 /* Load and check PTE. */
120 ldxa [%g5] ASI_PHYS_USE_EC, %g5 120 ldxa [%g5] ASI_PHYS_USE_EC, %g5
121 brgez,a,pn %g5, kvmap_dtlb_longpath 121 brgez,a,pn %g5, kvmap_dtlb_longpath
122 stx %g0, [%g1] 122 KTSB_STORE(%g1, %g0)
123 123
124 TSB_WRITE(%g1, %g5, %g6) 124 KTSB_WRITE(%g1, %g5, %g6)
125 125
126 /* fallthrough to TLB load */ 126 /* fallthrough to TLB load */
127 127
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index e1dd37f5e535..ff6a79beb98d 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -53,7 +53,7 @@ tsb_reload:
53 /* Load and check PTE. */ 53 /* Load and check PTE. */
54 ldxa [%g5] ASI_PHYS_USE_EC, %g5 54 ldxa [%g5] ASI_PHYS_USE_EC, %g5
55 brgez,a,pn %g5, tsb_do_fault 55 brgez,a,pn %g5, tsb_do_fault
56 stx %g0, [%g1] 56 TSB_STORE(%g1, %g0)
57 57
58 /* If it is larger than the base page size, don't 58 /* If it is larger than the base page size, don't
59 * bother putting it into the TSB. 59 * bother putting it into the TSB.
@@ -64,7 +64,7 @@ tsb_reload:
64 and %g2, %g4, %g2 64 and %g2, %g4, %g2
65 cmp %g2, %g7 65 cmp %g2, %g7
66 bne,a,pn %xcc, tsb_tlb_reload 66 bne,a,pn %xcc, tsb_tlb_reload
67 stx %g0, [%g1] 67 TSB_STORE(%g1, %g0)
68 68
69 TSB_WRITE(%g1, %g5, %g6) 69 TSB_WRITE(%g1, %g5, %g6)
70 70
@@ -131,13 +131,13 @@ winfix_trampoline:
131 131
132 /* Insert an entry into the TSB. 132 /* Insert an entry into the TSB.
133 * 133 *
134 * %o0: TSB entry pointer 134 * %o0: TSB entry pointer (virt or phys address)
135 * %o1: tag 135 * %o1: tag
136 * %o2: pte 136 * %o2: pte
137 */ 137 */
138 .align 32 138 .align 32
139 .globl tsb_insert 139 .globl __tsb_insert
140tsb_insert: 140__tsb_insert:
141 rdpr %pstate, %o5 141 rdpr %pstate, %o5
142 wrpr %o5, PSTATE_IE, %pstate 142 wrpr %o5, PSTATE_IE, %pstate
143 TSB_LOCK_TAG(%o0, %g2, %g3) 143 TSB_LOCK_TAG(%o0, %g2, %g3)
@@ -146,6 +146,31 @@ tsb_insert:
146 retl 146 retl
147 nop 147 nop
148 148
149 /* Flush the given TSB entry if it has the matching
150 * tag.
151 *
152 * %o0: TSB entry pointer (virt or phys address)
153 * %o1: tag
154 */
155 .align 32
156 .globl tsb_flush
157tsb_flush:
158 sethi %hi(TSB_TAG_LOCK_HIGH), %g2
1591: TSB_LOAD_TAG(%o0, %g1)
160 srlx %g1, 32, %o3
161 andcc %o3, %g2, %g0
162 bne,pn %icc, 1b
163 membar #LoadLoad
164 cmp %g1, %o1
165 bne,pt %xcc, 2f
166 clr %o3
167 TSB_CAS_TAG(%o0, %g1, %o3)
168 cmp %g1, %o3
169 bne,pn %xcc, 1b
170 nop
1712: retl
172 TSB_MEMBAR
173
149 /* Reload MMU related context switch state at 174 /* Reload MMU related context switch state at
150 * schedule() time. 175 * schedule() time.
151 * 176 *
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index 467d13a0d5c1..71b943f1c9b1 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -70,6 +70,10 @@ SECTIONS
70 .con_initcall.init : { *(.con_initcall.init) } 70 .con_initcall.init : { *(.con_initcall.init) }
71 __con_initcall_end = .; 71 __con_initcall_end = .;
72 SECURITY_INIT 72 SECURITY_INIT
73 . = ALIGN(4);
74 __tsb_phys_patch = .;
75 .tsb_phys_patch : { *(.tsb_phys_patch) }
76 __tsb_phys_patch_end = .;
73 . = ALIGN(8192); 77 . = ALIGN(8192);
74 __initramfs_start = .; 78 __initramfs_start = .;
75 .init.ramfs : { *(.init.ramfs) } 79 .init.ramfs : { *(.init.ramfs) }
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 2c21d85de78f..4893f3e2c336 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -39,6 +39,7 @@
39#include <asm/tlb.h> 39#include <asm/tlb.h>
40#include <asm/spitfire.h> 40#include <asm/spitfire.h>
41#include <asm/sections.h> 41#include <asm/sections.h>
42#include <asm/tsb.h>
42 43
43extern void device_scan(void); 44extern void device_scan(void);
44 45
@@ -244,6 +245,16 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
244 : "g1", "g7"); 245 : "g1", "g7");
245} 246}
246 247
248static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
249{
250 unsigned long tsb_addr = (unsigned long) ent;
251
252 if (tlb_type == cheetah_plus)
253 tsb_addr = __pa(tsb_addr);
254
255 __tsb_insert(tsb_addr, tag, pte);
256}
257
247void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 258void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
248{ 259{
249 struct mm_struct *mm; 260 struct mm_struct *mm;
@@ -1040,6 +1051,24 @@ unsigned long __init find_ecache_flush_span(unsigned long size)
1040 return ~0UL; 1051 return ~0UL;
1041} 1052}
1042 1053
1054static void __init tsb_phys_patch(void)
1055{
1056 struct tsb_phys_patch_entry *p;
1057
1058 p = &__tsb_phys_patch;
1059 while (p < &__tsb_phys_patch_end) {
1060 unsigned long addr = p->addr;
1061
1062 *(unsigned int *) addr = p->insn;
1063 wmb();
1064 __asm__ __volatile__("flush %0"
1065 : /* no outputs */
1066 : "r" (addr));
1067
1068 p++;
1069 }
1070}
1071
1043/* paging_init() sets up the page tables */ 1072/* paging_init() sets up the page tables */
1044 1073
1045extern void cheetah_ecache_flush_init(void); 1074extern void cheetah_ecache_flush_init(void);
@@ -1052,6 +1081,9 @@ void __init paging_init(void)
1052 unsigned long end_pfn, pages_avail, shift; 1081 unsigned long end_pfn, pages_avail, shift;
1053 unsigned long real_end, i; 1082 unsigned long real_end, i;
1054 1083
1084 if (tlb_type == cheetah_plus)
1085 tsb_phys_patch();
1086
1055 /* Find available physical memory... */ 1087 /* Find available physical memory... */
1056 read_obp_memory("available", &pavail[0], &pavail_ents); 1088 read_obp_memory("available", &pavail[0], &pavail_ents);
1057 1089
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 1c4e5c2dfc53..787533f01049 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -20,12 +20,9 @@ static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries
20 return vaddr & (nentries - 1); 20 return vaddr & (nentries - 1);
21} 21}
22 22
23static inline int tag_compare(struct tsb *entry, unsigned long vaddr, unsigned long context) 23static inline int tag_compare(unsigned long tag, unsigned long vaddr, unsigned long context)
24{ 24{
25 if (context == ~0UL) 25 return (tag == ((vaddr >> 22) | (context << 48)));
26 return 1;
27
28 return (entry->tag == ((vaddr >> 22) | (context << 48)));
29} 26}
30 27
31/* TSB flushes need only occur on the processor initiating the address 28/* TSB flushes need only occur on the processor initiating the address
@@ -41,7 +38,7 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
41 unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES); 38 unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
42 struct tsb *ent = &swapper_tsb[hash]; 39 struct tsb *ent = &swapper_tsb[hash];
43 40
44 if (tag_compare(ent, v, 0)) { 41 if (tag_compare(ent->tag, v, 0)) {
45 ent->tag = 0UL; 42 ent->tag = 0UL;
46 membar_storeload_storestore(); 43 membar_storeload_storestore();
47 } 44 }
@@ -52,24 +49,31 @@ void flush_tsb_user(struct mmu_gather *mp)
52{ 49{
53 struct mm_struct *mm = mp->mm; 50 struct mm_struct *mm = mp->mm;
54 struct tsb *tsb = mm->context.tsb; 51 struct tsb *tsb = mm->context.tsb;
55 unsigned long ctx = ~0UL;
56 unsigned long nentries = mm->context.tsb_nentries; 52 unsigned long nentries = mm->context.tsb_nentries;
53 unsigned long ctx, base;
57 int i; 54 int i;
58 55
59 if (CTX_VALID(mm->context)) 56 if (unlikely(!CTX_VALID(mm->context)))
60 ctx = CTX_HWBITS(mm->context); 57 return;
58
59 ctx = CTX_HWBITS(mm->context);
61 60
61 if (tlb_type == cheetah_plus)
62 base = __pa(tsb);
63 else
64 base = (unsigned long) tsb;
65
62 for (i = 0; i < mp->tlb_nr; i++) { 66 for (i = 0; i < mp->tlb_nr; i++) {
63 unsigned long v = mp->vaddrs[i]; 67 unsigned long v = mp->vaddrs[i];
64 struct tsb *ent; 68 unsigned long tag, ent, hash;
65 69
66 v &= ~0x1UL; 70 v &= ~0x1UL;
67 71
68 ent = &tsb[tsb_hash(v, nentries)]; 72 hash = tsb_hash(v, nentries);
69 if (tag_compare(ent, v, ctx)) { 73 ent = base + (hash * sizeof(struct tsb));
70 ent->tag = 0UL; 74 tag = (v >> 22UL) | (ctx << 48UL);
71 membar_storeload_storestore(); 75
72 } 76 tsb_flush(ent, tag);
73 } 77 }
74} 78}
75 79
@@ -84,6 +88,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
84 tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP | 88 tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP |
85 _PAGE_CV | _PAGE_P | _PAGE_W); 89 _PAGE_CV | _PAGE_P | _PAGE_W);
86 tsb_paddr = __pa(mm->context.tsb); 90 tsb_paddr = __pa(mm->context.tsb);
91 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
87 92
88 /* Use the smallest page size that can map the whole TSB 93 /* Use the smallest page size that can map the whole TSB
89 * in one TLB entry. 94 * in one TLB entry.
@@ -144,13 +149,23 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
144 BUG(); 149 BUG();
145 }; 150 };
146 151
147 tsb_reg |= base; 152 if (tlb_type == cheetah_plus) {
148 tsb_reg |= (tsb_paddr & (page_sz - 1UL)); 153 /* Physical mapping, no locked TLB entry for TSB. */
149 tte |= (tsb_paddr & ~(page_sz - 1UL)); 154 tsb_reg |= tsb_paddr;
155
156 mm->context.tsb_reg_val = tsb_reg;
157 mm->context.tsb_map_vaddr = 0;
158 mm->context.tsb_map_pte = 0;
159 } else {
160 tsb_reg |= base;
161 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
162 tte |= (tsb_paddr & ~(page_sz - 1UL));
163
164 mm->context.tsb_reg_val = tsb_reg;
165 mm->context.tsb_map_vaddr = base;
166 mm->context.tsb_map_pte = tte;
167 }
150 168
151 mm->context.tsb_reg_val = tsb_reg;
152 mm->context.tsb_map_vaddr = base;
153 mm->context.tsb_map_pte = tte;
154} 169}
155 170
156/* The page tables are locked against modifications while this 171/* The page tables are locked against modifications while this
@@ -168,13 +183,21 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size,
168 for (i = 0; i < old_nentries; i++) { 183 for (i = 0; i < old_nentries; i++) {
169 register unsigned long tag asm("o4"); 184 register unsigned long tag asm("o4");
170 register unsigned long pte asm("o5"); 185 register unsigned long pte asm("o5");
171 unsigned long v; 186 unsigned long v, hash;
172 unsigned int hash; 187
173 188 if (tlb_type == cheetah_plus) {
174 __asm__ __volatile__( 189 __asm__ __volatile__(
175 "ldda [%2] %3, %0" 190 "ldda [%2] %3, %0"
176 : "=r" (tag), "=r" (pte) 191 : "=r" (tag), "=r" (pte)
177 : "r" (&old_tsb[i]), "i" (ASI_NUCLEUS_QUAD_LDD)); 192 : "r" (__pa(&old_tsb[i])),
193 "i" (ASI_QUAD_LDD_PHYS));
194 } else {
195 __asm__ __volatile__(
196 "ldda [%2] %3, %0"
197 : "=r" (tag), "=r" (pte)
198 : "r" (&old_tsb[i]),
199 "i" (ASI_NUCLEUS_QUAD_LDD));
200 }
178 201
179 if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT))) 202 if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT)))
180 continue; 203 continue;
@@ -198,8 +221,20 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size,
198 v |= (i & (512UL - 1UL)) << 13UL; 221 v |= (i & (512UL - 1UL)) << 13UL;
199 222
200 hash = tsb_hash(v, new_nentries); 223 hash = tsb_hash(v, new_nentries);
201 new_tsb[hash].tag = tag; 224 if (tlb_type == cheetah_plus) {
202 new_tsb[hash].pte = pte; 225 __asm__ __volatile__(
226 "stxa %0, [%1] %2\n\t"
227 "stxa %3, [%4] %2"
228 : /* no outputs */
229 : "r" (tag),
230 "r" (__pa(&new_tsb[hash].tag)),
231 "i" (ASI_PHYS_USE_EC),
232 "r" (pte),
233 "r" (__pa(&new_tsb[hash].pte)));
234 } else {
235 new_tsb[hash].tag = tag;
236 new_tsb[hash].pte = pte;
237 }
203 } 238 }
204} 239}
205 240
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 18f98edfbcda..55e622711b96 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -97,7 +97,8 @@ struct tsb {
97 unsigned long pte; 97 unsigned long pte;
98} __attribute__((aligned(TSB_ENTRY_ALIGNMENT))); 98} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
99 99
100extern void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte); 100extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
101extern void tsb_flush(unsigned long ent, unsigned long tag);
101 102
102typedef struct { 103typedef struct {
103 unsigned long sparc64_ctx_val; 104 unsigned long sparc64_ctx_val;
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
index f384565212fe..44709cde5617 100644
--- a/include/asm-sparc64/tsb.h
+++ b/include/asm-sparc64/tsb.h
@@ -44,7 +44,89 @@
44 44
45#define TSB_MEMBAR membar #StoreStore 45#define TSB_MEMBAR membar #StoreStore
46 46
47/* Some cpus support physical address quad loads. We want to use
48 * those if possible so we don't need to hard-lock the TSB mapping
49 * into the TLB. We encode some instruction patching in order to
50 * support this.
51 *
52 * The kernel TSB is locked into the TLB by virtue of being in the
53 * kernel image, so we don't play these games for swapper_tsb access.
54 */
55#ifndef __ASSEMBLY__
56struct tsb_phys_patch_entry {
57 unsigned int addr;
58 unsigned int insn;
59};
60extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
61#endif
62#define TSB_LOAD_QUAD(TSB, REG) \
63661: ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; \
64 .section .tsb_phys_patch, "ax"; \
65 .word 661b; \
66 ldda [TSB] ASI_QUAD_LDD_PHYS, REG; \
67 .previous
68
69#define TSB_LOAD_TAG_HIGH(TSB, REG) \
70661: lduwa [TSB] ASI_N, REG; \
71 .section .tsb_phys_patch, "ax"; \
72 .word 661b; \
73 lduwa [TSB] ASI_PHYS_USE_EC, REG; \
74 .previous
75
76#define TSB_LOAD_TAG(TSB, REG) \
77661: ldxa [TSB] ASI_N, REG; \
78 .section .tsb_phys_patch, "ax"; \
79 .word 661b; \
80 ldxa [TSB] ASI_PHYS_USE_EC, REG; \
81 .previous
82
83#define TSB_CAS_TAG_HIGH(TSB, REG1, REG2) \
84661: casa [TSB] ASI_N, REG1, REG2; \
85 .section .tsb_phys_patch, "ax"; \
86 .word 661b; \
87 casa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
88 .previous
89
90#define TSB_CAS_TAG(TSB, REG1, REG2) \
91661: casxa [TSB] ASI_N, REG1, REG2; \
92 .section .tsb_phys_patch, "ax"; \
93 .word 661b; \
94 casxa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
95 .previous
96
97#define TSB_STORE(ADDR, VAL) \
98661: stxa VAL, [ADDR] ASI_N; \
99 .section .tsb_phys_patch, "ax"; \
100 .word 661b; \
101 stxa VAL, [ADDR] ASI_PHYS_USE_EC; \
102 .previous
103
47#define TSB_LOCK_TAG(TSB, REG1, REG2) \ 104#define TSB_LOCK_TAG(TSB, REG1, REG2) \
10599: TSB_LOAD_TAG_HIGH(TSB, REG1); \
106 sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
107 andcc REG1, REG2, %g0; \
108 bne,pn %icc, 99b; \
109 nop; \
110 TSB_CAS_TAG_HIGH(TSB, REG1, REG2); \
111 cmp REG1, REG2; \
112 bne,pn %icc, 99b; \
113 nop; \
114 TSB_MEMBAR
115
116#define TSB_WRITE(TSB, TTE, TAG) \
117 add TSB, 0x8, TSB; \
118 TSB_STORE(TSB, TTE); \
119 sub TSB, 0x8, TSB; \
120 TSB_MEMBAR; \
121 TSB_STORE(TSB, TAG);
122
123#define KTSB_LOAD_QUAD(TSB, REG) \
124 ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG;
125
126#define KTSB_STORE(ADDR, VAL) \
127 stxa VAL, [ADDR] ASI_N;
128
129#define KTSB_LOCK_TAG(TSB, REG1, REG2) \
4899: lduwa [TSB] ASI_N, REG1; \ 13099: lduwa [TSB] ASI_N, REG1; \
49 sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\ 131 sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
50 andcc REG1, REG2, %g0; \ 132 andcc REG1, REG2, %g0; \
@@ -56,10 +138,12 @@
56 nop; \ 138 nop; \
57 TSB_MEMBAR 139 TSB_MEMBAR
58 140
59#define TSB_WRITE(TSB, TTE, TAG) \ 141#define KTSB_WRITE(TSB, TTE, TAG) \
60 stx TTE, [TSB + 0x08]; \ 142 add TSB, 0x8, TSB; \
61 TSB_MEMBAR; \ 143 stxa TTE, [TSB] ASI_N; \
62 stx TAG, [TSB + 0x00]; 144 sub TSB, 0x8, TSB; \
145 TSB_MEMBAR; \
146 stxa TAG, [TSB] ASI_N;
63 147
64 /* Do a kernel page table walk. Leaves physical PTE pointer in 148 /* Do a kernel page table walk. Leaves physical PTE pointer in
65 * REG1. Jumps to FAIL_LABEL on early page table walk termination. 149 * REG1. Jumps to FAIL_LABEL on early page table walk termination.
@@ -157,7 +241,7 @@
157 and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \ 241 and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
158 sllx REG2, 4, REG2; \ 242 sllx REG2, 4, REG2; \
159 add REG1, REG2, REG2; \ 243 add REG1, REG2, REG2; \
160 ldda [REG2] ASI_NUCLEUS_QUAD_LDD, REG3; \ 244 KTSB_LOAD_QUAD(REG2, REG3); \
161 cmp REG3, TAG; \ 245 cmp REG3, TAG; \
162 be,a,pt %xcc, OK_LABEL; \ 246 be,a,pt %xcc, OK_LABEL; \
163 mov REG4, REG1; 247 mov REG4, REG1;