aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm/init.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-02 19:16:24 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:11:34 -0500
commitf4e841da30b4bcbb8f1cc20a01157a788ff58b21 (patch)
tree8f145f6902b694402ce6291a493caf3a2348717e /arch/sparc64/mm/init.c
parent7bec08e38a7d0f088994f6eec9b6374652ea71fb (diff)
[SPARC64]: Turn off TSB growing for now.
There are several tricky races involved with growing the TSB. So just use base-size TSBs for user contexts and we can revisit enabling this later. One part of the SMP problems is that tsb_context_switch() can see partially updated TSB configuration state if tsb_grow() is running in parallel. That's easily solved with a seqlock taken as a writer by tsb_grow() and taken as a reader to capture all the TSB config state in tsb_context_switch(). Then there is flush_tsb_user() running in parallel with a tsb_grow(). In theory we could take the seqlock as a reader there too, and just resample the TSB pointer and reflush but that looks really ugly. Lastly, I believe there is a case with threads that results in a TSB entry lock bit being set spuriously which will cause the next access to that TSB entry to wedge the cpu (since the TSB entry lock bit will never clear). It's either copy_tsb() or some bug elsewhere in the TSB assembly. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/mm/init.c')
-rw-r--r--arch/sparc64/mm/init.c5
1 files changed, 0 insertions, 5 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 4893f3e2c33..1af63307b24 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -261,7 +261,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
261 struct page *page; 261 struct page *page;
262 unsigned long pfn; 262 unsigned long pfn;
263 unsigned long pg_flags; 263 unsigned long pg_flags;
264 unsigned long mm_rss;
265 264
266 pfn = pte_pfn(pte); 265 pfn = pte_pfn(pte);
267 if (pfn_valid(pfn) && 266 if (pfn_valid(pfn) &&
@@ -285,10 +284,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
285 } 284 }
286 285
287 mm = vma->vm_mm; 286 mm = vma->vm_mm;
288 mm_rss = get_mm_rss(mm);
289 if (mm_rss >= mm->context.tsb_rss_limit)
290 tsb_grow(mm, mm_rss, GFP_ATOMIC);
291
292 if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) { 287 if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) {
293 struct tsb *tsb; 288 struct tsb *tsb;
294 unsigned long tag; 289 unsigned long tag;