aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-02 19:16:24 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:11:34 -0500
commitf4e841da30b4bcbb8f1cc20a01157a788ff58b21 (patch)
tree8f145f6902b694402ce6291a493caf3a2348717e /arch
parent7bec08e38a7d0f088994f6eec9b6374652ea71fb (diff)
[SPARC64]: Turn off TSB growing for now.
There are several tricky races involved with growing the TSB. So just use base-size TSBs for user contexts and we can revisit enabling this later. One part of the SMP problems is that tsb_context_switch() can see partially updated TSB configuration state if tsb_grow() is running in parallel. That's easily solved with a seqlock taken as a writer by tsb_grow() and taken as a reader to capture all the TSB config state in tsb_context_switch(). Then there is flush_tsb_user() running in parallel with a tsb_grow(). In theory we could take the seqlock as a reader there too, and just resample the TSB pointer and reflush but that looks really ugly. Lastly, I believe there is a case with threads that results in a TSB entry lock bit being set spuriously which will cause the next access to that TSB entry to wedge the cpu (since the TSB entry lock bit will never clear). It's either copy_tsb() or some bug elsewhere in the TSB assembly. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc64/mm/init.c5
-rw-r--r--arch/sparc64/mm/tsb.c11
2 files changed, 1 insertions, 15 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 4893f3e2c336..1af63307b24f 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -261,7 +261,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
261 struct page *page; 261 struct page *page;
262 unsigned long pfn; 262 unsigned long pfn;
263 unsigned long pg_flags; 263 unsigned long pg_flags;
264 unsigned long mm_rss;
265 264
266 pfn = pte_pfn(pte); 265 pfn = pte_pfn(pte);
267 if (pfn_valid(pfn) && 266 if (pfn_valid(pfn) &&
@@ -285,10 +284,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
285 } 284 }
286 285
287 mm = vma->vm_mm; 286 mm = vma->vm_mm;
288 mm_rss = get_mm_rss(mm);
289 if (mm_rss >= mm->context.tsb_rss_limit)
290 tsb_grow(mm, mm_rss, GFP_ATOMIC);
291
292 if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) { 287 if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) {
293 struct tsb *tsb; 288 struct tsb *tsb;
294 unsigned long tag; 289 unsigned long tag;
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 787533f01049..2cc8e6528c63 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -310,7 +310,6 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags)
310 310
311int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 311int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
312{ 312{
313 unsigned long initial_rss;
314 313
315 mm->context.sparc64_ctx_val = 0UL; 314 mm->context.sparc64_ctx_val = 0UL;
316 315
@@ -319,15 +318,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
319 * will be confused and think there is an older TSB to free up. 318 * will be confused and think there is an older TSB to free up.
320 */ 319 */
321 mm->context.tsb = NULL; 320 mm->context.tsb = NULL;
322 321 tsb_grow(mm, 0, GFP_KERNEL);
323 /* If this is fork, inherit the parent's TSB size. We would
324 * grow it to that size on the first page fault anyways.
325 */
326 initial_rss = mm->context.tsb_nentries;
327 if (initial_rss)
328 initial_rss -= 1;
329
330 tsb_grow(mm, initial_rss, GFP_KERNEL);
331 322
332 if (unlikely(!mm->context.tsb)) 323 if (unlikely(!mm->context.tsb))
333 return -ENOMEM; 324 return -ENOMEM;