diff options
-rw-r--r-- | arch/arm/include/asm/tlb.h | 102 |
1 files changed, 89 insertions, 13 deletions
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index f41a6f57cd12..e7690887b958 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h | |||
@@ -18,7 +18,6 @@ | |||
18 | #define __ASMARM_TLB_H | 18 | #define __ASMARM_TLB_H |
19 | 19 | ||
20 | #include <asm/cacheflush.h> | 20 | #include <asm/cacheflush.h> |
21 | #include <asm/tlbflush.h> | ||
22 | 21 | ||
23 | #ifndef CONFIG_MMU | 22 | #ifndef CONFIG_MMU |
24 | 23 | ||
@@ -27,7 +26,23 @@ | |||
27 | 26 | ||
28 | #else /* !CONFIG_MMU */ | 27 | #else /* !CONFIG_MMU */ |
29 | 28 | ||
29 | #include <linux/swap.h> | ||
30 | #include <asm/pgalloc.h> | 30 | #include <asm/pgalloc.h> |
31 | #include <asm/tlbflush.h> | ||
32 | |||
33 | /* | ||
34 | * We need to delay page freeing for SMP as other CPUs can access pages | ||
35 | * which have been removed but not yet had their TLB entries invalidated. | ||
36 | * Also, as ARMv7 speculative prefetch can drag new entries into the TLB, | ||
37 | * we need to apply this same delaying tactic to ensure correct operation. | ||
38 | */ | ||
39 | #if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) | ||
40 | #define tlb_fast_mode(tlb) 0 | ||
41 | #define FREE_PTE_NR 500 | ||
42 | #else | ||
43 | #define tlb_fast_mode(tlb) 1 | ||
44 | #define FREE_PTE_NR 0 | ||
45 | #endif | ||
31 | 46 | ||
32 | /* | 47 | /* |
33 | * TLB handling. This allows us to remove pages from the page | 48 | * TLB handling. This allows us to remove pages from the page |
@@ -36,12 +51,58 @@ | |||
36 | struct mmu_gather { | 51 | struct mmu_gather { |
37 | struct mm_struct *mm; | 52 | struct mm_struct *mm; |
38 | unsigned int fullmm; | 53 | unsigned int fullmm; |
54 | struct vm_area_struct *vma; | ||
39 | unsigned long range_start; | 55 | unsigned long range_start; |
40 | unsigned long range_end; | 56 | unsigned long range_end; |
57 | unsigned int nr; | ||
58 | struct page *pages[FREE_PTE_NR]; | ||
41 | }; | 59 | }; |
42 | 60 | ||
43 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | 61 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); |
44 | 62 | ||
63 | /* | ||
64 | * This is unnecessarily complex. There's three ways the TLB shootdown | ||
65 | * code is used: | ||
66 | * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). | ||
67 | * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. | ||
68 | * tlb->vma will be non-NULL. | ||
69 | * 2. Unmapping all vmas. See exit_mmap(). | ||
70 | * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. | ||
71 | * tlb->vma will be non-NULL. Additionally, page tables will be freed. | ||
72 | * 3. Unmapping argument pages. See shift_arg_pages(). | ||
73 | * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. | ||
74 | * tlb->vma will be NULL. | ||
75 | */ | ||
76 | static inline void tlb_flush(struct mmu_gather *tlb) | ||
77 | { | ||
78 | if (tlb->fullmm || !tlb->vma) | ||
79 | flush_tlb_mm(tlb->mm); | ||
80 | else if (tlb->range_end > 0) { | ||
81 | flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); | ||
82 | tlb->range_start = TASK_SIZE; | ||
83 | tlb->range_end = 0; | ||
84 | } | ||
85 | } | ||
86 | |||
87 | static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) | ||
88 | { | ||
89 | if (!tlb->fullmm) { | ||
90 | if (addr < tlb->range_start) | ||
91 | tlb->range_start = addr; | ||
92 | if (addr + PAGE_SIZE > tlb->range_end) | ||
93 | tlb->range_end = addr + PAGE_SIZE; | ||
94 | } | ||
95 | } | ||
96 | |||
97 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) | ||
98 | { | ||
99 | tlb_flush(tlb); | ||
100 | if (!tlb_fast_mode(tlb)) { | ||
101 | free_pages_and_swap_cache(tlb->pages, tlb->nr); | ||
102 | tlb->nr = 0; | ||
103 | } | ||
104 | } | ||
105 | |||
45 | static inline struct mmu_gather * | 106 | static inline struct mmu_gather * |
46 | tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | 107 | tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) |
47 | { | 108 | { |
@@ -49,6 +110,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | |||
49 | 110 | ||
50 | tlb->mm = mm; | 111 | tlb->mm = mm; |
51 | tlb->fullmm = full_mm_flush; | 112 | tlb->fullmm = full_mm_flush; |
113 | tlb->vma = NULL; | ||
114 | tlb->nr = 0; | ||
52 | 115 | ||
53 | return tlb; | 116 | return tlb; |
54 | } | 117 | } |
@@ -56,8 +119,7 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | |||
56 | static inline void | 119 | static inline void |
57 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | 120 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) |
58 | { | 121 | { |
59 | if (tlb->fullmm) | 122 | tlb_flush_mmu(tlb); |
60 | flush_tlb_mm(tlb->mm); | ||
61 | 123 | ||
62 | /* keep the page table cache within bounds */ | 124 | /* keep the page table cache within bounds */ |
63 | check_pgt_cache(); | 125 | check_pgt_cache(); |
@@ -71,12 +133,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | |||
71 | static inline void | 133 | static inline void |
72 | tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) | 134 | tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) |
73 | { | 135 | { |
74 | if (!tlb->fullmm) { | 136 | tlb_add_flush(tlb, addr); |
75 | if (addr < tlb->range_start) | ||
76 | tlb->range_start = addr; | ||
77 | if (addr + PAGE_SIZE > tlb->range_end) | ||
78 | tlb->range_end = addr + PAGE_SIZE; | ||
79 | } | ||
80 | } | 137 | } |
81 | 138 | ||
82 | /* | 139 | /* |
@@ -89,6 +146,7 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |||
89 | { | 146 | { |
90 | if (!tlb->fullmm) { | 147 | if (!tlb->fullmm) { |
91 | flush_cache_range(vma, vma->vm_start, vma->vm_end); | 148 | flush_cache_range(vma, vma->vm_start, vma->vm_end); |
149 | tlb->vma = vma; | ||
92 | tlb->range_start = TASK_SIZE; | 150 | tlb->range_start = TASK_SIZE; |
93 | tlb->range_end = 0; | 151 | tlb->range_end = 0; |
94 | } | 152 | } |
@@ -97,12 +155,30 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |||
97 | static inline void | 155 | static inline void |
98 | tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | 156 | tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) |
99 | { | 157 | { |
100 | if (!tlb->fullmm && tlb->range_end > 0) | 158 | if (!tlb->fullmm) |
101 | flush_tlb_range(vma, tlb->range_start, tlb->range_end); | 159 | tlb_flush(tlb); |
160 | } | ||
161 | |||
162 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | ||
163 | { | ||
164 | if (tlb_fast_mode(tlb)) { | ||
165 | free_page_and_swap_cache(page); | ||
166 | } else { | ||
167 | tlb->pages[tlb->nr++] = page; | ||
168 | if (tlb->nr >= FREE_PTE_NR) | ||
169 | tlb_flush_mmu(tlb); | ||
170 | } | ||
171 | } | ||
172 | |||
173 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | ||
174 | unsigned long addr) | ||
175 | { | ||
176 | pgtable_page_dtor(pte); | ||
177 | tlb_add_flush(tlb, addr); | ||
178 | tlb_remove_page(tlb, pte); | ||
102 | } | 179 | } |
103 | 180 | ||
104 | #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) | 181 | #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) |
105 | #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) | ||
106 | #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) | 182 | #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) |
107 | 183 | ||
108 | #define tlb_migrate_finish(mm) do { } while (0) | 184 | #define tlb_migrate_finish(mm) do { } while (0) |