diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-05-24 20:11:50 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-25 11:39:13 -0400 |
commit | 90f08e399d054d017c0e2c5089a0f44a76418271 (patch) | |
tree | 771d8a9ff675e3fe5deda53d0701e41f1f15e9a1 /arch/sparc/include/asm | |
parent | d6bf29b44ddf3ca915f77b9383bee8b7a209f3fd (diff) |
sparc: mmu_gather rework
Rework the sparc mmu_gather usage to conform to the new world order :-)
Sparc mmu_gather does two things:
- tracks vaddrs to unhash
- tracks pages to free
Split these two things like powerpc has done and keep the vaddrs
in per-cpu data structures and flush them on context switch.
The remaining bits can then use the generic mmu_gather.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Tony Luck <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/sparc/include/asm')
-rw-r--r-- | arch/sparc/include/asm/pgalloc_64.h | 3 | ||||
-rw-r--r-- | arch/sparc/include/asm/pgtable_64.h | 15 | ||||
-rw-r--r-- | arch/sparc/include/asm/tlb_64.h | 91 | ||||
-rw-r--r-- | arch/sparc/include/asm/tlbflush_64.h | 12 |
4 files changed, 31 insertions, 90 deletions
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h index 5bdfa2c6e400..4e5e0878144f 100644 --- a/arch/sparc/include/asm/pgalloc_64.h +++ b/arch/sparc/include/asm/pgalloc_64.h | |||
@@ -78,4 +78,7 @@ static inline void check_pgt_cache(void) | |||
78 | quicklist_trim(0, NULL, 25, 16); | 78 | quicklist_trim(0, NULL, 25, 16); |
79 | } | 79 | } |
80 | 80 | ||
81 | #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) | ||
82 | #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) | ||
83 | |||
81 | #endif /* _SPARC64_PGALLOC_H */ | 84 | #endif /* _SPARC64_PGALLOC_H */ |
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index b77128c80524..1e03c5a6b4f7 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
@@ -655,9 +655,11 @@ static inline int pte_special(pte_t pte) | |||
655 | #define pte_unmap(pte) do { } while (0) | 655 | #define pte_unmap(pte) do { } while (0) |
656 | 656 | ||
657 | /* Actual page table PTE updates. */ | 657 | /* Actual page table PTE updates. */ |
658 | extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig); | 658 | extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, |
659 | pte_t *ptep, pte_t orig, int fullmm); | ||
659 | 660 | ||
660 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) | 661 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, |
662 | pte_t *ptep, pte_t pte, int fullmm) | ||
661 | { | 663 | { |
662 | pte_t orig = *ptep; | 664 | pte_t orig = *ptep; |
663 | 665 | ||
@@ -670,12 +672,19 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p | |||
670 | * and SUN4V pte layout, so this inline test is fine. | 672 | * and SUN4V pte layout, so this inline test is fine. |
671 | */ | 673 | */ |
672 | if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID)) | 674 | if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID)) |
673 | tlb_batch_add(mm, addr, ptep, orig); | 675 | tlb_batch_add(mm, addr, ptep, orig, fullmm); |
674 | } | 676 | } |
675 | 677 | ||
678 | #define set_pte_at(mm,addr,ptep,pte) \ | ||
679 | __set_pte_at((mm), (addr), (ptep), (pte), 0) | ||
680 | |||
676 | #define pte_clear(mm,addr,ptep) \ | 681 | #define pte_clear(mm,addr,ptep) \ |
677 | set_pte_at((mm), (addr), (ptep), __pte(0UL)) | 682 | set_pte_at((mm), (addr), (ptep), __pte(0UL)) |
678 | 683 | ||
684 | #define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL | ||
685 | #define pte_clear_not_present_full(mm,addr,ptep,fullmm) \ | ||
686 | __set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm)) | ||
687 | |||
679 | #ifdef DCACHE_ALIASING_POSSIBLE | 688 | #ifdef DCACHE_ALIASING_POSSIBLE |
680 | #define __HAVE_ARCH_MOVE_PTE | 689 | #define __HAVE_ARCH_MOVE_PTE |
681 | #define move_pte(pte, prot, old_addr, new_addr) \ | 690 | #define move_pte(pte, prot, old_addr, new_addr) \ |
diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h index dca406b9b6fc..190e18913cc6 100644 --- a/arch/sparc/include/asm/tlb_64.h +++ b/arch/sparc/include/asm/tlb_64.h | |||
@@ -7,66 +7,11 @@ | |||
7 | #include <asm/tlbflush.h> | 7 | #include <asm/tlbflush.h> |
8 | #include <asm/mmu_context.h> | 8 | #include <asm/mmu_context.h> |
9 | 9 | ||
10 | #define TLB_BATCH_NR 192 | ||
11 | |||
12 | /* | ||
13 | * For UP we don't need to worry about TLB flush | ||
14 | * and page free order so much.. | ||
15 | */ | ||
16 | #ifdef CONFIG_SMP | ||
17 | #define FREE_PTE_NR 506 | ||
18 | #define tlb_fast_mode(bp) ((bp)->pages_nr == ~0U) | ||
19 | #else | ||
20 | #define FREE_PTE_NR 1 | ||
21 | #define tlb_fast_mode(bp) 1 | ||
22 | #endif | ||
23 | |||
24 | struct mmu_gather { | ||
25 | struct mm_struct *mm; | ||
26 | unsigned int pages_nr; | ||
27 | unsigned int need_flush; | ||
28 | unsigned int fullmm; | ||
29 | unsigned int tlb_nr; | ||
30 | unsigned long vaddrs[TLB_BATCH_NR]; | ||
31 | struct page *pages[FREE_PTE_NR]; | ||
32 | }; | ||
33 | |||
34 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
35 | |||
36 | #ifdef CONFIG_SMP | 10 | #ifdef CONFIG_SMP |
37 | extern void smp_flush_tlb_pending(struct mm_struct *, | 11 | extern void smp_flush_tlb_pending(struct mm_struct *, |
38 | unsigned long, unsigned long *); | 12 | unsigned long, unsigned long *); |
39 | #endif | 13 | #endif |
40 | 14 | ||
41 | extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *); | ||
42 | extern void flush_tlb_pending(void); | ||
43 | |||
44 | static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | ||
45 | { | ||
46 | struct mmu_gather *mp = &get_cpu_var(mmu_gathers); | ||
47 | |||
48 | BUG_ON(mp->tlb_nr); | ||
49 | |||
50 | mp->mm = mm; | ||
51 | mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U; | ||
52 | mp->fullmm = full_mm_flush; | ||
53 | |||
54 | return mp; | ||
55 | } | ||
56 | |||
57 | |||
58 | static inline void tlb_flush_mmu(struct mmu_gather *mp) | ||
59 | { | ||
60 | if (!mp->fullmm) | ||
61 | flush_tlb_pending(); | ||
62 | if (mp->need_flush) { | ||
63 | free_pages_and_swap_cache(mp->pages, mp->pages_nr); | ||
64 | mp->pages_nr = 0; | ||
65 | mp->need_flush = 0; | ||
66 | } | ||
67 | |||
68 | } | ||
69 | |||
70 | #ifdef CONFIG_SMP | 15 | #ifdef CONFIG_SMP |
71 | extern void smp_flush_tlb_mm(struct mm_struct *mm); | 16 | extern void smp_flush_tlb_mm(struct mm_struct *mm); |
72 | #define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm) | 17 | #define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm) |
@@ -74,38 +19,14 @@ extern void smp_flush_tlb_mm(struct mm_struct *mm); | |||
74 | #define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT) | 19 | #define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT) |
75 | #endif | 20 | #endif |
76 | 21 | ||
77 | static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end) | 22 | extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *); |
78 | { | 23 | extern void flush_tlb_pending(void); |
79 | tlb_flush_mmu(mp); | ||
80 | |||
81 | if (mp->fullmm) | ||
82 | mp->fullmm = 0; | ||
83 | |||
84 | /* keep the page table cache within bounds */ | ||
85 | check_pgt_cache(); | ||
86 | |||
87 | put_cpu_var(mmu_gathers); | ||
88 | } | ||
89 | |||
90 | static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page) | ||
91 | { | ||
92 | if (tlb_fast_mode(mp)) { | ||
93 | free_page_and_swap_cache(page); | ||
94 | return; | ||
95 | } | ||
96 | mp->need_flush = 1; | ||
97 | mp->pages[mp->pages_nr++] = page; | ||
98 | if (mp->pages_nr >= FREE_PTE_NR) | ||
99 | tlb_flush_mmu(mp); | ||
100 | } | ||
101 | |||
102 | #define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0) | ||
103 | #define pte_free_tlb(mp, ptepage, addr) pte_free((mp)->mm, ptepage) | ||
104 | #define pmd_free_tlb(mp, pmdp, addr) pmd_free((mp)->mm, pmdp) | ||
105 | #define pud_free_tlb(tlb,pudp, addr) __pud_free_tlb(tlb,pudp,addr) | ||
106 | 24 | ||
107 | #define tlb_migrate_finish(mm) do { } while (0) | ||
108 | #define tlb_start_vma(tlb, vma) do { } while (0) | 25 | #define tlb_start_vma(tlb, vma) do { } while (0) |
109 | #define tlb_end_vma(tlb, vma) do { } while (0) | 26 | #define tlb_end_vma(tlb, vma) do { } while (0) |
27 | #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) | ||
28 | #define tlb_flush(tlb) flush_tlb_pending() | ||
29 | |||
30 | #include <asm-generic/tlb.h> | ||
110 | 31 | ||
111 | #endif /* _SPARC64_TLB_H */ | 32 | #endif /* _SPARC64_TLB_H */ |
diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h index fbb675dbe0c9..2ef463494153 100644 --- a/arch/sparc/include/asm/tlbflush_64.h +++ b/arch/sparc/include/asm/tlbflush_64.h | |||
@@ -5,9 +5,17 @@ | |||
5 | #include <asm/mmu_context.h> | 5 | #include <asm/mmu_context.h> |
6 | 6 | ||
7 | /* TSB flush operations. */ | 7 | /* TSB flush operations. */ |
8 | struct mmu_gather; | 8 | |
9 | #define TLB_BATCH_NR 192 | ||
10 | |||
11 | struct tlb_batch { | ||
12 | struct mm_struct *mm; | ||
13 | unsigned long tlb_nr; | ||
14 | unsigned long vaddrs[TLB_BATCH_NR]; | ||
15 | }; | ||
16 | |||
9 | extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); | 17 | extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); |
10 | extern void flush_tsb_user(struct mmu_gather *mp); | 18 | extern void flush_tsb_user(struct tlb_batch *tb); |
11 | 19 | ||
12 | /* TLB flush operations. */ | 20 | /* TLB flush operations. */ |
13 | 21 | ||