aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-02-22 01:31:11 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:13:56 -0500
commitd7744a09504d5ae84edc8289a02254e1f2102410 (patch)
treebe0f245ee0725f2f066bf87d17d254ce1e7279bf /arch
parent9cc3a1ac9a819cadff05ca37bb7f208013a22035 (diff)
[SPARC64]: Create a seperate kernel TSB for 4MB/256MB mappings.
It can map all of the linear kernel mappings with zero TSB hash conflicts for systems with 16GB or less ram. In such cases, on SUN4V, once we load up this TSB the first time with all the mappings, we never take a linear kernel mapping TLB miss ever again, the hypervisor handles them all. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc64/kernel/ktlb.S15
-rw-r--r--arch/sparc64/mm/init.c24
2 files changed, 33 insertions, 6 deletions
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index ae1dac17bc8..efcf38b6e28 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -121,6 +121,12 @@ kvmap_dtlb_obp:
121 nop 121 nop
122 122
123 .align 32 123 .align 32
124kvmap_dtlb_tsb4m_load:
125 KTSB_LOCK_TAG(%g1, %g2, %g7)
126 KTSB_WRITE(%g1, %g5, %g6)
127 ba,pt %xcc, kvmap_dtlb_load
128 nop
129
124kvmap_dtlb: 130kvmap_dtlb:
125 /* %g6: TAG TARGET */ 131 /* %g6: TAG TARGET */
126 mov TLB_TAG_ACCESS, %g4 132 mov TLB_TAG_ACCESS, %g4
@@ -133,6 +139,13 @@ kvmap_dtlb_4v:
133 brgez,pn %g4, kvmap_dtlb_nonlinear 139 brgez,pn %g4, kvmap_dtlb_nonlinear
134 nop 140 nop
135 141
142 /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
143 KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
144
145 /* TSB entry address left in %g1, lookup linear PTE.
146 * Must preserve %g1 and %g6 (TAG).
147 */
148kvmap_dtlb_tsb4m_miss:
136 sethi %hi(kpte_linear_bitmap), %g2 149 sethi %hi(kpte_linear_bitmap), %g2
137 or %g2, %lo(kpte_linear_bitmap), %g2 150 or %g2, %lo(kpte_linear_bitmap), %g2
138 151
@@ -163,7 +176,7 @@ kvmap_dtlb_4v:
163 176
164 .globl kvmap_linear_patch 177 .globl kvmap_linear_patch
165kvmap_linear_patch: 178kvmap_linear_patch:
166 ba,pt %xcc, kvmap_dtlb_load 179 ba,pt %xcc, kvmap_dtlb_tsb4m_load
167 xor %g2, %g4, %g5 180 xor %g2, %g4, %g5
168 181
169kvmap_dtlb_vmalloc_addr: 182kvmap_dtlb_vmalloc_addr:
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index b5869f00d2d..2a123135b04 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -58,6 +58,9 @@ unsigned long kern_linear_pte_xor[2] __read_mostly;
58 */ 58 */
59unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; 59unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
60 60
61/* A special kernel TSB for 4MB and 256MB linear mappings. */
62struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
63
61#define MAX_BANKS 32 64#define MAX_BANKS 32
62 65
63static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; 66static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
@@ -1086,6 +1089,7 @@ static void __init sun4v_ktsb_init(void)
1086{ 1089{
1087 unsigned long ktsb_pa; 1090 unsigned long ktsb_pa;
1088 1091
1092 /* First KTSB for PAGE_SIZE mappings. */
1089 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); 1093 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1090 1094
1091 switch (PAGE_SIZE) { 1095 switch (PAGE_SIZE) {
@@ -1117,9 +1121,18 @@ static void __init sun4v_ktsb_init(void)
1117 ktsb_descr[0].tsb_base = ktsb_pa; 1121 ktsb_descr[0].tsb_base = ktsb_pa;
1118 ktsb_descr[0].resv = 0; 1122 ktsb_descr[0].resv = 0;
1119 1123
1120 /* XXX When we have a kernel large page size TSB, describe 1124 /* Second KTSB for 4MB/256MB mappings. */
1121 * XXX it in ktsb_descr[1] here. 1125 ktsb_pa = (kern_base +
1122 */ 1126 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1127
1128 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
1129 ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB |
1130 HV_PGSZ_MASK_256MB);
1131 ktsb_descr[1].assoc = 1;
1132 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1133 ktsb_descr[1].ctx_idx = 0;
1134 ktsb_descr[1].tsb_base = ktsb_pa;
1135 ktsb_descr[1].resv = 0;
1123} 1136}
1124 1137
1125void __cpuinit sun4v_ktsb_register(void) 1138void __cpuinit sun4v_ktsb_register(void)
@@ -1132,8 +1145,7 @@ void __cpuinit sun4v_ktsb_register(void)
1132 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); 1145 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1133 1146
1134 func = HV_FAST_MMU_TSB_CTX0; 1147 func = HV_FAST_MMU_TSB_CTX0;
1135 /* XXX set arg0 to 2 when we use ktsb_descr[1], see above XXX */ 1148 arg0 = 2;
1136 arg0 = 1;
1137 arg1 = pa; 1149 arg1 = pa;
1138 __asm__ __volatile__("ta %6" 1150 __asm__ __volatile__("ta %6"
1139 : "=&r" (func), "=&r" (arg0), "=&r" (arg1) 1151 : "=&r" (func), "=&r" (arg0), "=&r" (arg1)
@@ -1160,7 +1172,9 @@ void __init paging_init(void)
1160 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 1172 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1161 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; 1173 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1162 1174
1175 /* Invalidate both kernel TSBs. */
1163 memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); 1176 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
1177 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
1164 1178
1165 if (tlb_type == hypervisor) 1179 if (tlb_type == hypervisor)
1166 sun4v_pgprot_init(); 1180 sun4v_pgprot_init();