aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-11-07 17:09:58 -0500
committerDavid S. Miller <davem@davemloft.net>2005-11-07 17:09:58 -0500
commit62dbec78be652c28f63ad5eda3d01c244c916040 (patch)
tree2f1e77d3f3a645dd4bfc36ef80bb60a9a2161ad8 /arch/sparc64
parent4c85ce522fc4bf1b8fcd6255fadc11cfb75773df (diff)
[SPARC64] mm: Do not flush TLB mm in tlb_finish_mmu()
It isn't needed any longer, as noted by Hugh Dickins. We still need the flush routines, due to the one remaining call site in hugetlb_prefault_arch_hook(). That can be eliminated at some later point, however. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/kernel/smp.c48
1 files changed, 17 insertions, 31 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index a9089e2140e9..5d90ee9aebf1 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -839,43 +839,29 @@ void smp_flush_tlb_all(void)
839 * questionable (in theory the big win for threads is the massive sharing of 839 * questionable (in theory the big win for threads is the massive sharing of
840 * address space state across processors). 840 * address space state across processors).
841 */ 841 */
842
843/* This currently is only used by the hugetlb arch pre-fault
844 * hook on UltraSPARC-III+ and later when changing the pagesize
845 * bits of the context register for an address space.
846 */
842void smp_flush_tlb_mm(struct mm_struct *mm) 847void smp_flush_tlb_mm(struct mm_struct *mm)
843{ 848{
844 /* 849 u32 ctx = CTX_HWBITS(mm->context);
845 * This code is called from two places, dup_mmap and exit_mmap. In the 850 int cpu = get_cpu();
846 * former case, we really need a flush. In the later case, the callers
847 * are single threaded exec_mmap (really need a flush), multithreaded
848 * exec_mmap case (do not need to flush, since the caller gets a new
849 * context via activate_mm), and all other callers of mmput() whence
850 * the flush can be optimized since the associated threads are dead and
851 * the mm is being torn down (__exit_mm and other mmput callers) or the
852 * owning thread is dissociating itself from the mm. The
853 * (atomic_read(&mm->mm_users) == 0) check ensures real work is done
854 * for single thread exec and dup_mmap cases. An alternate check might
855 * have been (current->mm != mm).
856 * Kanoj Sarcar
857 */
858 if (atomic_read(&mm->mm_users) == 0)
859 return;
860
861 {
862 u32 ctx = CTX_HWBITS(mm->context);
863 int cpu = get_cpu();
864 851
865 if (atomic_read(&mm->mm_users) == 1) { 852 if (atomic_read(&mm->mm_users) == 1) {
866 mm->cpu_vm_mask = cpumask_of_cpu(cpu); 853 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
867 goto local_flush_and_out; 854 goto local_flush_and_out;
868 } 855 }
869 856
870 smp_cross_call_masked(&xcall_flush_tlb_mm, 857 smp_cross_call_masked(&xcall_flush_tlb_mm,
871 ctx, 0, 0, 858 ctx, 0, 0,
872 mm->cpu_vm_mask); 859 mm->cpu_vm_mask);
873 860
874 local_flush_and_out: 861local_flush_and_out:
875 __flush_tlb_mm(ctx, SECONDARY_CONTEXT); 862 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
876 863
877 put_cpu(); 864 put_cpu();
878 }
879} 865}
880 866
881void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) 867void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)