diff options
| author | David S. Miller <davem@davemloft.net> | 2005-11-07 17:09:58 -0500 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2005-11-07 17:09:58 -0500 |
| commit | 62dbec78be652c28f63ad5eda3d01c244c916040 (patch) | |
| tree | 2f1e77d3f3a645dd4bfc36ef80bb60a9a2161ad8 | |
| parent | 4c85ce522fc4bf1b8fcd6255fadc11cfb75773df (diff) | |
[SPARC64] mm: Do not flush TLB mm in tlb_finish_mmu()
It isn't needed any longer, as noted by Hugh Dickins.
We still need the flush routines, due to the one remaining
call site in hugetlb_prefault_arch_hook(). That can be
eliminated at some later point, however.
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | arch/sparc64/kernel/smp.c | 48 | ||||
| -rw-r--r-- | include/asm-sparc64/tlb.h | 6 |
2 files changed, 19 insertions, 35 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index a9089e2140e9..5d90ee9aebf1 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
| @@ -839,43 +839,29 @@ void smp_flush_tlb_all(void) | |||
| 839 | * questionable (in theory the big win for threads is the massive sharing of | 839 | * questionable (in theory the big win for threads is the massive sharing of |
| 840 | * address space state across processors). | 840 | * address space state across processors). |
| 841 | */ | 841 | */ |
| 842 | |||
| 843 | /* This currently is only used by the hugetlb arch pre-fault | ||
| 844 | * hook on UltraSPARC-III+ and later when changing the pagesize | ||
| 845 | * bits of the context register for an address space. | ||
| 846 | */ | ||
| 842 | void smp_flush_tlb_mm(struct mm_struct *mm) | 847 | void smp_flush_tlb_mm(struct mm_struct *mm) |
| 843 | { | 848 | { |
| 844 | /* | 849 | u32 ctx = CTX_HWBITS(mm->context); |
| 845 | * This code is called from two places, dup_mmap and exit_mmap. In the | 850 | int cpu = get_cpu(); |
| 846 | * former case, we really need a flush. In the later case, the callers | ||
| 847 | * are single threaded exec_mmap (really need a flush), multithreaded | ||
| 848 | * exec_mmap case (do not need to flush, since the caller gets a new | ||
| 849 | * context via activate_mm), and all other callers of mmput() whence | ||
| 850 | * the flush can be optimized since the associated threads are dead and | ||
| 851 | * the mm is being torn down (__exit_mm and other mmput callers) or the | ||
| 852 | * owning thread is dissociating itself from the mm. The | ||
| 853 | * (atomic_read(&mm->mm_users) == 0) check ensures real work is done | ||
| 854 | * for single thread exec and dup_mmap cases. An alternate check might | ||
| 855 | * have been (current->mm != mm). | ||
| 856 | * Kanoj Sarcar | ||
| 857 | */ | ||
| 858 | if (atomic_read(&mm->mm_users) == 0) | ||
| 859 | return; | ||
| 860 | |||
| 861 | { | ||
| 862 | u32 ctx = CTX_HWBITS(mm->context); | ||
| 863 | int cpu = get_cpu(); | ||
| 864 | 851 | ||
| 865 | if (atomic_read(&mm->mm_users) == 1) { | 852 | if (atomic_read(&mm->mm_users) == 1) { |
| 866 | mm->cpu_vm_mask = cpumask_of_cpu(cpu); | 853 | mm->cpu_vm_mask = cpumask_of_cpu(cpu); |
| 867 | goto local_flush_and_out; | 854 | goto local_flush_and_out; |
| 868 | } | 855 | } |
| 869 | 856 | ||
| 870 | smp_cross_call_masked(&xcall_flush_tlb_mm, | 857 | smp_cross_call_masked(&xcall_flush_tlb_mm, |
| 871 | ctx, 0, 0, | 858 | ctx, 0, 0, |
| 872 | mm->cpu_vm_mask); | 859 | mm->cpu_vm_mask); |
| 873 | 860 | ||
| 874 | local_flush_and_out: | 861 | local_flush_and_out: |
| 875 | __flush_tlb_mm(ctx, SECONDARY_CONTEXT); | 862 | __flush_tlb_mm(ctx, SECONDARY_CONTEXT); |
| 876 | 863 | ||
| 877 | put_cpu(); | 864 | put_cpu(); |
| 878 | } | ||
| 879 | } | 865 | } |
| 880 | 866 | ||
| 881 | void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) | 867 | void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) |
diff --git a/include/asm-sparc64/tlb.h b/include/asm-sparc64/tlb.h index 66138d959df5..1eda17954f39 100644 --- a/include/asm-sparc64/tlb.h +++ b/include/asm-sparc64/tlb.h | |||
| @@ -78,11 +78,9 @@ static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, un | |||
| 78 | { | 78 | { |
| 79 | tlb_flush_mmu(mp); | 79 | tlb_flush_mmu(mp); |
| 80 | 80 | ||
| 81 | if (mp->fullmm) { | 81 | if (mp->fullmm) |
| 82 | if (CTX_VALID(mp->mm->context)) | ||
| 83 | do_flush_tlb_mm(mp->mm); | ||
| 84 | mp->fullmm = 0; | 82 | mp->fullmm = 0; |
| 85 | } else | 83 | else |
| 86 | flush_tlb_pending(); | 84 | flush_tlb_pending(); |
| 87 | 85 | ||
| 88 | /* keep the page table cache within bounds */ | 86 | /* keep the page table cache within bounds */ |
