aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2005-09-26 19:12:18 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-09-26 19:12:18 -0400
commit0dc461069879b45a2d5333bd16990f8080a318fd (patch)
treed1f4b129750655352ac6a2ac1bee80fc95213a4f /arch
parentc5bd50a9533533d7b9ac3469fa679b2368e7e26c (diff)
[SPARC64]: Do not do TLB pre-filling any more.
In order to do it correctly on UltraSPARC-III+ and later we'd need to add some complicated code to set the TAG access extension register before loading the TLB. Since this optimization gives questionable gains, it's best to just remove it for now instead of adding the fix for Ultra-III+ Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc64/mm/init.c6
-rw-r--r--arch/sparc64/mm/ultra.S29
2 files changed, 0 insertions, 35 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 8d72f8a1268e..9f6ca624892d 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -171,8 +171,6 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
171 : "g1", "g7"); 171 : "g1", "g7");
172} 172}
173 173
174extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long address, pte_t pte, int code);
175
176void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 174void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
177{ 175{
178 struct page *page; 176 struct page *page;
@@ -199,10 +197,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
199 197
200 put_cpu(); 198 put_cpu();
201 } 199 }
202
203 if (get_thread_fault_code())
204 __update_mmu_cache(CTX_NRBITS(vma->vm_mm->context),
205 address, pte, get_thread_fault_code());
206} 200}
207 201
208void flush_dcache_page(struct page *page) 202void flush_dcache_page(struct page *page)
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index 5ff5e42fb9d4..058b8126c1a7 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -180,35 +180,6 @@ __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
180 180
181 .previous 181 .previous
182 182
183 .align 32
184__prefill_dtlb:
185 rdpr %pstate, %g7
186 wrpr %g7, PSTATE_IE, %pstate
187 mov TLB_TAG_ACCESS, %g1
188 stxa %o5, [%g1] ASI_DMMU
189 stxa %o2, [%g0] ASI_DTLB_DATA_IN
190 flush %g6
191 retl
192 wrpr %g7, %pstate
193__prefill_itlb:
194 rdpr %pstate, %g7
195 wrpr %g7, PSTATE_IE, %pstate
196 mov TLB_TAG_ACCESS, %g1
197 stxa %o5, [%g1] ASI_IMMU
198 stxa %o2, [%g0] ASI_ITLB_DATA_IN
199 flush %g6
200 retl
201 wrpr %g7, %pstate
202
203 .globl __update_mmu_cache
204__update_mmu_cache: /* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */
205 srlx %o1, PAGE_SHIFT, %o1
206 andcc %o3, FAULT_CODE_DTLB, %g0
207 sllx %o1, PAGE_SHIFT, %o5
208 bne,pt %xcc, __prefill_dtlb
209 or %o5, %o0, %o5
210 ba,a,pt %xcc, __prefill_itlb
211
212 /* Cheetah specific versions, patched at boot time. */ 183 /* Cheetah specific versions, patched at boot time. */
213__cheetah_flush_tlb_mm: /* 18 insns */ 184__cheetah_flush_tlb_mm: /* 18 insns */
214 rdpr %pstate, %g7 185 rdpr %pstate, %g7