aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/smp.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-01-31 21:31:38 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:11:18 -0500
commitbd40791e1d289d807b8580abe1f117e9c62894e4 (patch)
tree2b47e24c8dc0e668dfd7ba0e3879165180c49c65 /arch/sparc64/kernel/smp.c
parent98c5584cfc47932c4f3ccf5eee2e0bae1447b85e (diff)
[SPARC64]: Dynamically grow TSB in response to RSS growth.
As the RSS grows, grow the TSB in order to reduce the likelyhood of hash collisions and thus poor hit rates in the TSB. This definitely needs some serious tuning. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/smp.c')
-rw-r--r--arch/sparc64/kernel/smp.c28
1 files changed, 24 insertions, 4 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 8c245859d212..3c14b549cf91 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -581,11 +581,11 @@ extern unsigned long xcall_call_function;
581 * You must not call this function with disabled interrupts or from a 581 * You must not call this function with disabled interrupts or from a
582 * hardware interrupt handler or from a bottom half handler. 582 * hardware interrupt handler or from a bottom half handler.
583 */ 583 */
584int smp_call_function(void (*func)(void *info), void *info, 584static int smp_call_function_mask(void (*func)(void *info), void *info,
585 int nonatomic, int wait) 585 int nonatomic, int wait, cpumask_t mask)
586{ 586{
587 struct call_data_struct data; 587 struct call_data_struct data;
588 int cpus = num_online_cpus() - 1; 588 int cpus = cpus_weight(mask) - 1;
589 long timeout; 589 long timeout;
590 590
591 if (!cpus) 591 if (!cpus)
@@ -603,7 +603,7 @@ int smp_call_function(void (*func)(void *info), void *info,
603 603
604 call_data = &data; 604 call_data = &data;
605 605
606 smp_cross_call(&xcall_call_function, 0, 0, 0); 606 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
607 607
608 /* 608 /*
609 * Wait for other cpus to complete function or at 609 * Wait for other cpus to complete function or at
@@ -629,6 +629,13 @@ out_timeout:
629 return 0; 629 return 0;
630} 630}
631 631
632int smp_call_function(void (*func)(void *info), void *info,
633 int nonatomic, int wait)
634{
635 return smp_call_function_mask(func, info, nonatomic, wait,
636 cpu_online_map);
637}
638
632void smp_call_function_client(int irq, struct pt_regs *regs) 639void smp_call_function_client(int irq, struct pt_regs *regs)
633{ 640{
634 void (*func) (void *info) = call_data->func; 641 void (*func) (void *info) = call_data->func;
@@ -646,6 +653,19 @@ void smp_call_function_client(int irq, struct pt_regs *regs)
646 } 653 }
647} 654}
648 655
656static void tsb_sync(void *info)
657{
658 struct mm_struct *mm = info;
659
660 if (current->active_mm == mm)
661 tsb_context_switch(mm);
662}
663
664void smp_tsb_sync(struct mm_struct *mm)
665{
666 smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
667}
668
649extern unsigned long xcall_flush_tlb_mm; 669extern unsigned long xcall_flush_tlb_mm;
650extern unsigned long xcall_flush_tlb_pending; 670extern unsigned long xcall_flush_tlb_pending;
651extern unsigned long xcall_flush_tlb_kernel_range; 671extern unsigned long xcall_flush_tlb_kernel_range;