diff options
author | David S. Miller <davem@davemloft.net> | 2006-02-22 01:31:11 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-03-20 04:13:56 -0500 |
commit | d7744a09504d5ae84edc8289a02254e1f2102410 (patch) | |
tree | be0f245ee0725f2f066bf87d17d254ce1e7279bf | |
parent | 9cc3a1ac9a819cadff05ca37bb7f208013a22035 (diff) |
[SPARC64]: Create a seperate kernel TSB for 4MB/256MB mappings.
It can map all of the linear kernel mappings with zero TSB hash
conflicts for systems with 16GB or less ram. In such cases, on
SUN4V, once we load up this TSB the first time with all the
mappings, we never take a linear kernel mapping TLB miss ever
again, the hypervisor handles them all.
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | arch/sparc64/kernel/ktlb.S | 15 | ||||
-rw-r--r-- | arch/sparc64/mm/init.c | 24 | ||||
-rw-r--r-- | include/asm-sparc64/tsb.h | 15 |
3 files changed, 48 insertions, 6 deletions
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S index ae1dac17bc8d..efcf38b6e284 100644 --- a/arch/sparc64/kernel/ktlb.S +++ b/arch/sparc64/kernel/ktlb.S | |||
@@ -121,6 +121,12 @@ kvmap_dtlb_obp: | |||
121 | nop | 121 | nop |
122 | 122 | ||
123 | .align 32 | 123 | .align 32 |
124 | kvmap_dtlb_tsb4m_load: | ||
125 | KTSB_LOCK_TAG(%g1, %g2, %g7) | ||
126 | KTSB_WRITE(%g1, %g5, %g6) | ||
127 | ba,pt %xcc, kvmap_dtlb_load | ||
128 | nop | ||
129 | |||
124 | kvmap_dtlb: | 130 | kvmap_dtlb: |
125 | /* %g6: TAG TARGET */ | 131 | /* %g6: TAG TARGET */ |
126 | mov TLB_TAG_ACCESS, %g4 | 132 | mov TLB_TAG_ACCESS, %g4 |
@@ -133,6 +139,13 @@ kvmap_dtlb_4v: | |||
133 | brgez,pn %g4, kvmap_dtlb_nonlinear | 139 | brgez,pn %g4, kvmap_dtlb_nonlinear |
134 | nop | 140 | nop |
135 | 141 | ||
142 | /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */ | ||
143 | KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) | ||
144 | |||
145 | /* TSB entry address left in %g1, lookup linear PTE. | ||
146 | * Must preserve %g1 and %g6 (TAG). | ||
147 | */ | ||
148 | kvmap_dtlb_tsb4m_miss: | ||
136 | sethi %hi(kpte_linear_bitmap), %g2 | 149 | sethi %hi(kpte_linear_bitmap), %g2 |
137 | or %g2, %lo(kpte_linear_bitmap), %g2 | 150 | or %g2, %lo(kpte_linear_bitmap), %g2 |
138 | 151 | ||
@@ -163,7 +176,7 @@ kvmap_dtlb_4v: | |||
163 | 176 | ||
164 | .globl kvmap_linear_patch | 177 | .globl kvmap_linear_patch |
165 | kvmap_linear_patch: | 178 | kvmap_linear_patch: |
166 | ba,pt %xcc, kvmap_dtlb_load | 179 | ba,pt %xcc, kvmap_dtlb_tsb4m_load |
167 | xor %g2, %g4, %g5 | 180 | xor %g2, %g4, %g5 |
168 | 181 | ||
169 | kvmap_dtlb_vmalloc_addr: | 182 | kvmap_dtlb_vmalloc_addr: |
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index b5869f00d2d1..2a123135b042 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c | |||
@@ -58,6 +58,9 @@ unsigned long kern_linear_pte_xor[2] __read_mostly; | |||
58 | */ | 58 | */ |
59 | unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; | 59 | unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; |
60 | 60 | ||
61 | /* A special kernel TSB for 4MB and 256MB linear mappings. */ | ||
62 | struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; | ||
63 | |||
61 | #define MAX_BANKS 32 | 64 | #define MAX_BANKS 32 |
62 | 65 | ||
63 | static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; | 66 | static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; |
@@ -1086,6 +1089,7 @@ static void __init sun4v_ktsb_init(void) | |||
1086 | { | 1089 | { |
1087 | unsigned long ktsb_pa; | 1090 | unsigned long ktsb_pa; |
1088 | 1091 | ||
1092 | /* First KTSB for PAGE_SIZE mappings. */ | ||
1089 | ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); | 1093 | ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); |
1090 | 1094 | ||
1091 | switch (PAGE_SIZE) { | 1095 | switch (PAGE_SIZE) { |
@@ -1117,9 +1121,18 @@ static void __init sun4v_ktsb_init(void) | |||
1117 | ktsb_descr[0].tsb_base = ktsb_pa; | 1121 | ktsb_descr[0].tsb_base = ktsb_pa; |
1118 | ktsb_descr[0].resv = 0; | 1122 | ktsb_descr[0].resv = 0; |
1119 | 1123 | ||
1120 | /* XXX When we have a kernel large page size TSB, describe | 1124 | /* Second KTSB for 4MB/256MB mappings. */ |
1121 | * XXX it in ktsb_descr[1] here. | 1125 | ktsb_pa = (kern_base + |
1122 | */ | 1126 | ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); |
1127 | |||
1128 | ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB; | ||
1129 | ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB | | ||
1130 | HV_PGSZ_MASK_256MB); | ||
1131 | ktsb_descr[1].assoc = 1; | ||
1132 | ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES; | ||
1133 | ktsb_descr[1].ctx_idx = 0; | ||
1134 | ktsb_descr[1].tsb_base = ktsb_pa; | ||
1135 | ktsb_descr[1].resv = 0; | ||
1123 | } | 1136 | } |
1124 | 1137 | ||
1125 | void __cpuinit sun4v_ktsb_register(void) | 1138 | void __cpuinit sun4v_ktsb_register(void) |
@@ -1132,8 +1145,7 @@ void __cpuinit sun4v_ktsb_register(void) | |||
1132 | pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); | 1145 | pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); |
1133 | 1146 | ||
1134 | func = HV_FAST_MMU_TSB_CTX0; | 1147 | func = HV_FAST_MMU_TSB_CTX0; |
1135 | /* XXX set arg0 to 2 when we use ktsb_descr[1], see above XXX */ | 1148 | arg0 = 2; |
1136 | arg0 = 1; | ||
1137 | arg1 = pa; | 1149 | arg1 = pa; |
1138 | __asm__ __volatile__("ta %6" | 1150 | __asm__ __volatile__("ta %6" |
1139 | : "=&r" (func), "=&r" (arg0), "=&r" (arg1) | 1151 | : "=&r" (func), "=&r" (arg0), "=&r" (arg1) |
@@ -1160,7 +1172,9 @@ void __init paging_init(void) | |||
1160 | kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; | 1172 | kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; |
1161 | kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; | 1173 | kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; |
1162 | 1174 | ||
1175 | /* Invalidate both kernel TSBs. */ | ||
1163 | memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); | 1176 | memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); |
1177 | memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); | ||
1164 | 1178 | ||
1165 | if (tlb_type == hypervisor) | 1179 | if (tlb_type == hypervisor) |
1166 | sun4v_pgprot_init(); | 1180 | sun4v_pgprot_init(); |
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h index 6e6768067e38..e82612cd9f33 100644 --- a/include/asm-sparc64/tsb.h +++ b/include/asm-sparc64/tsb.h | |||
@@ -243,6 +243,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; | |||
243 | #define KERNEL_TSB_SIZE_BYTES (32 * 1024) | 243 | #define KERNEL_TSB_SIZE_BYTES (32 * 1024) |
244 | #define KERNEL_TSB_NENTRIES \ | 244 | #define KERNEL_TSB_NENTRIES \ |
245 | (KERNEL_TSB_SIZE_BYTES / 16) | 245 | (KERNEL_TSB_SIZE_BYTES / 16) |
246 | #define KERNEL_TSB4M_NENTRIES 4096 | ||
246 | 247 | ||
247 | /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL | 248 | /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL |
248 | * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries | 249 | * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries |
@@ -263,4 +264,18 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; | |||
263 | be,a,pt %xcc, OK_LABEL; \ | 264 | be,a,pt %xcc, OK_LABEL; \ |
264 | mov REG4, REG1; | 265 | mov REG4, REG1; |
265 | 266 | ||
267 | /* This version uses a trick, the TAG is already (VADDR >> 22) so | ||
268 | * we can make use of that for the index computation. | ||
269 | */ | ||
270 | #define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \ | ||
271 | sethi %hi(swapper_4m_tsb), REG1; \ | ||
272 | or REG1, %lo(swapper_4m_tsb), REG1; \ | ||
273 | and TAG, (KERNEL_TSB_NENTRIES - 1), REG2; \ | ||
274 | sllx REG2, 4, REG2; \ | ||
275 | add REG1, REG2, REG2; \ | ||
276 | KTSB_LOAD_QUAD(REG2, REG3); \ | ||
277 | cmp REG3, TAG; \ | ||
278 | be,a,pt %xcc, OK_LABEL; \ | ||
279 | mov REG4, REG1; | ||
280 | |||
266 | #endif /* !(_SPARC64_TSB_H) */ | 281 | #endif /* !(_SPARC64_TSB_H) */ |