aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-03-05 21:26:24 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:14:18 -0500
commit74ae998772041b62e9ad420d602e4f7dbb182cd6 (patch)
tree8cbeb2ff56856d357647da6ab62562bac2fe87ad /arch/sparc64
parent3cab0c3e8636d5005041aa52224f796c3a4ef872 (diff)
[SPARC64]: Simplify TSB insert checks.
Don't try to avoid putting non-base page sized entries into the user TSB. It actually costs us more to check this than it helps. Eventually we'll have a multiple TSB scheme for user processes. Once a process starts using larger pages, we'll allocate and use such a TSB. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/kernel/tsb.S14
-rw-r--r--arch/sparc64/mm/init.c15
2 files changed, 6 insertions, 23 deletions
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 563852bf3594..d738910153f6 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -55,20 +55,6 @@ tsb_reload:
55 brgez,a,pn %g5, tsb_do_fault 55 brgez,a,pn %g5, tsb_do_fault
56 TSB_STORE(%g1, %g7) 56 TSB_STORE(%g1, %g7)
57 57
58 /* If it is larger than the base page size, don't
59 * bother putting it into the TSB.
60 */
61 sethi %hi(_PAGE_ALL_SZ_BITS), %g7
62 ldx [%g7 + %lo(_PAGE_ALL_SZ_BITS)], %g7
63 and %g5, %g7, %g2
64 sethi %hi(_PAGE_SZBITS), %g7
65 ldx [%g7 + %lo(_PAGE_SZBITS)], %g7
66 cmp %g2, %g7
67 mov 1, %g7
68 sllx %g7, TSB_TAG_INVALID_BIT, %g7
69 bne,a,pn %xcc, tsb_tlb_reload
70 TSB_STORE(%g1, %g7)
71
72 TSB_WRITE(%g1, %g5, %g6) 58 TSB_WRITE(%g1, %g5, %g6)
73 59
74 /* Finally, load TLB and return from trap. */ 60 /* Finally, load TLB and return from trap. */
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 87d5d1af1adb..5930e87dafbc 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -280,6 +280,8 @@ unsigned long _PAGE_SZBITS __read_mostly;
280void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 280void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
281{ 281{
282 struct mm_struct *mm; 282 struct mm_struct *mm;
283 struct tsb *tsb;
284 unsigned long tag;
283 285
284 if (tlb_type != hypervisor) { 286 if (tlb_type != hypervisor) {
285 unsigned long pfn = pte_pfn(pte); 287 unsigned long pfn = pte_pfn(pte);
@@ -308,15 +310,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
308 } 310 }
309 311
310 mm = vma->vm_mm; 312 mm = vma->vm_mm;
311 if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) { 313 tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
312 struct tsb *tsb; 314 (mm->context.tsb_nentries - 1UL)];
313 unsigned long tag; 315 tag = (address >> 22UL);
314 316 tsb_insert(tsb, tag, pte_val(pte));
315 tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
316 (mm->context.tsb_nentries - 1UL)];
317 tag = (address >> 22UL);
318 tsb_insert(tsb, tag, pte_val(pte));
319 }
320} 317}
321 318
322void flush_dcache_page(struct page *page) 319void flush_dcache_page(struct page *page)