aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm/init.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2005-09-26 19:12:18 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-09-26 19:12:18 -0400
commit0dc461069879b45a2d5333bd16990f8080a318fd (patch)
treed1f4b129750655352ac6a2ac1bee80fc95213a4f /arch/sparc64/mm/init.c
parentc5bd50a9533533d7b9ac3469fa679b2368e7e26c (diff)
[SPARC64]: Do not do TLB pre-filling any more.
In order to do it correctly on UltraSPARC-III+ and later we'd need to add some complicated code to set the TAG access extension register before loading the TLB. Since this optimization gives questionable gains, it's best to just remove it for now instead of adding the fix for Ultra-III+ Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/mm/init.c')
-rw-r--r--arch/sparc64/mm/init.c6
1 files changed, 0 insertions, 6 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 8d72f8a1268e..9f6ca624892d 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -171,8 +171,6 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
171 : "g1", "g7"); 171 : "g1", "g7");
172} 172}
173 173
174extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long address, pte_t pte, int code);
175
176void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 174void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
177{ 175{
178 struct page *page; 176 struct page *page;
@@ -199,10 +197,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
199 197
200 put_cpu(); 198 put_cpu();
201 } 199 }
202
203 if (get_thread_fault_code())
204 __update_mmu_cache(CTX_NRBITS(vma->vm_mm->context),
205 address, pte, get_thread_fault_code());
206} 200}
207 201
208void flush_dcache_page(struct page *page) 202void flush_dcache_page(struct page *page)