diff options
Diffstat (limited to 'include/asm-powerpc/tlbflush.h')
-rw-r--r-- | include/asm-powerpc/tlbflush.h | 172 |
1 files changed, 104 insertions, 68 deletions
diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h index 93c7d0c7230f..86e6266a028b 100644 --- a/include/asm-powerpc/tlbflush.h +++ b/include/asm-powerpc/tlbflush.h | |||
@@ -17,10 +17,73 @@ | |||
17 | */ | 17 | */ |
18 | #ifdef __KERNEL__ | 18 | #ifdef __KERNEL__ |
19 | 19 | ||
20 | |||
21 | struct mm_struct; | 20 | struct mm_struct; |
21 | struct vm_area_struct; | ||
22 | |||
23 | #if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) | ||
24 | /* | ||
25 | * TLB flushing for software loaded TLB chips | ||
26 | * | ||
27 | * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & | ||
28 | * flush_tlb_kernel_range are best implemented as tlbia vs | ||
29 | * specific tlbie's | ||
30 | */ | ||
31 | |||
32 | extern void _tlbie(unsigned long address); | ||
33 | |||
34 | #if defined(CONFIG_40x) || defined(CONFIG_8xx) | ||
35 | #define _tlbia() asm volatile ("tlbia; sync" : : : "memory") | ||
36 | #else /* CONFIG_44x || CONFIG_FSL_BOOKE */ | ||
37 | extern void _tlbia(void); | ||
38 | #endif | ||
39 | |||
40 | static inline void flush_tlb_mm(struct mm_struct *mm) | ||
41 | { | ||
42 | _tlbia(); | ||
43 | } | ||
44 | |||
45 | static inline void flush_tlb_page(struct vm_area_struct *vma, | ||
46 | unsigned long vmaddr) | ||
47 | { | ||
48 | _tlbie(vmaddr); | ||
49 | } | ||
50 | |||
51 | static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, | ||
52 | unsigned long vmaddr) | ||
53 | { | ||
54 | _tlbie(vmaddr); | ||
55 | } | ||
56 | |||
57 | static inline void flush_tlb_range(struct vm_area_struct *vma, | ||
58 | unsigned long start, unsigned long end) | ||
59 | { | ||
60 | _tlbia(); | ||
61 | } | ||
62 | |||
63 | static inline void flush_tlb_kernel_range(unsigned long start, | ||
64 | unsigned long end) | ||
65 | { | ||
66 | _tlbia(); | ||
67 | } | ||
22 | 68 | ||
23 | #ifdef CONFIG_PPC64 | 69 | #elif defined(CONFIG_PPC32) |
70 | /* | ||
71 | * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx | ||
72 | */ | ||
73 | extern void _tlbie(unsigned long address); | ||
74 | extern void _tlbia(void); | ||
75 | |||
76 | extern void flush_tlb_mm(struct mm_struct *mm); | ||
77 | extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); | ||
78 | extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); | ||
79 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
80 | unsigned long end); | ||
81 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | ||
82 | |||
83 | #else | ||
84 | /* | ||
85 | * TLB flushing for 64-bit has-MMU CPUs | ||
86 | */ | ||
24 | 87 | ||
25 | #include <linux/percpu.h> | 88 | #include <linux/percpu.h> |
26 | #include <asm/page.h> | 89 | #include <asm/page.h> |
@@ -28,117 +91,90 @@ struct mm_struct; | |||
28 | #define PPC64_TLB_BATCH_NR 192 | 91 | #define PPC64_TLB_BATCH_NR 192 |
29 | 92 | ||
30 | struct ppc64_tlb_batch { | 93 | struct ppc64_tlb_batch { |
31 | unsigned long index; | 94 | int active; |
32 | struct mm_struct *mm; | 95 | unsigned long index; |
33 | real_pte_t pte[PPC64_TLB_BATCH_NR]; | 96 | struct mm_struct *mm; |
34 | unsigned long vaddr[PPC64_TLB_BATCH_NR]; | 97 | real_pte_t pte[PPC64_TLB_BATCH_NR]; |
35 | unsigned int psize; | 98 | unsigned long vaddr[PPC64_TLB_BATCH_NR]; |
99 | unsigned int psize; | ||
36 | }; | 100 | }; |
37 | DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | 101 | DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); |
38 | 102 | ||
39 | extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); | 103 | extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); |
40 | 104 | ||
41 | static inline void flush_tlb_pending(void) | 105 | extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, |
106 | pte_t *ptep, unsigned long pte, int huge); | ||
107 | |||
108 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE | ||
109 | |||
110 | static inline void arch_enter_lazy_mmu_mode(void) | ||
111 | { | ||
112 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | ||
113 | |||
114 | batch->active = 1; | ||
115 | } | ||
116 | |||
117 | static inline void arch_leave_lazy_mmu_mode(void) | ||
42 | { | 118 | { |
43 | struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); | 119 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
44 | 120 | ||
45 | if (batch->index) | 121 | if (batch->index) |
46 | __flush_tlb_pending(batch); | 122 | __flush_tlb_pending(batch); |
47 | put_cpu_var(ppc64_tlb_batch); | 123 | batch->active = 0; |
48 | } | 124 | } |
49 | 125 | ||
126 | #define arch_flush_lazy_mmu_mode() do {} while (0) | ||
127 | |||
128 | |||
50 | extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, | 129 | extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, |
51 | int local); | 130 | int local); |
52 | extern void flush_hash_range(unsigned long number, int local); | 131 | extern void flush_hash_range(unsigned long number, int local); |
53 | 132 | ||
54 | #else /* CONFIG_PPC64 */ | ||
55 | |||
56 | #include <linux/mm.h> | ||
57 | |||
58 | extern void _tlbie(unsigned long address); | ||
59 | extern void _tlbia(void); | ||
60 | |||
61 | /* | ||
62 | * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & | ||
63 | * flush_tlb_kernel_range are best implemented as tlbia vs | ||
64 | * specific tlbie's | ||
65 | */ | ||
66 | |||
67 | #if (defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx) | ||
68 | #define flush_tlb_pending() asm volatile ("tlbia; sync" : : : "memory") | ||
69 | #elif defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE) | ||
70 | #define flush_tlb_pending() _tlbia() | ||
71 | #endif | ||
72 | |||
73 | /* | ||
74 | * This gets called at the end of handling a page fault, when | ||
75 | * the kernel has put a new PTE into the page table for the process. | ||
76 | * We use it to ensure coherency between the i-cache and d-cache | ||
77 | * for the page which has just been mapped in. | ||
78 | * On machines which use an MMU hash table, we use this to put a | ||
79 | * corresponding HPTE into the hash table ahead of time, instead of | ||
80 | * waiting for the inevitable extra hash-table miss exception. | ||
81 | */ | ||
82 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | ||
83 | |||
84 | #endif /* CONFIG_PPC64 */ | ||
85 | |||
86 | #if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \ | ||
87 | defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx) | ||
88 | 133 | ||
89 | static inline void flush_tlb_mm(struct mm_struct *mm) | 134 | static inline void flush_tlb_mm(struct mm_struct *mm) |
90 | { | 135 | { |
91 | flush_tlb_pending(); | ||
92 | } | 136 | } |
93 | 137 | ||
94 | static inline void flush_tlb_page(struct vm_area_struct *vma, | 138 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
95 | unsigned long vmaddr) | 139 | unsigned long vmaddr) |
96 | { | 140 | { |
97 | #ifdef CONFIG_PPC64 | ||
98 | flush_tlb_pending(); | ||
99 | #else | ||
100 | _tlbie(vmaddr); | ||
101 | #endif | ||
102 | } | 141 | } |
103 | 142 | ||
104 | static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, | 143 | static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, |
105 | unsigned long vmaddr) | 144 | unsigned long vmaddr) |
106 | { | 145 | { |
107 | #ifndef CONFIG_PPC64 | ||
108 | _tlbie(vmaddr); | ||
109 | #endif | ||
110 | } | 146 | } |
111 | 147 | ||
112 | static inline void flush_tlb_range(struct vm_area_struct *vma, | 148 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
113 | unsigned long start, unsigned long end) | 149 | unsigned long start, unsigned long end) |
114 | { | 150 | { |
115 | flush_tlb_pending(); | ||
116 | } | 151 | } |
117 | 152 | ||
118 | static inline void flush_tlb_kernel_range(unsigned long start, | 153 | static inline void flush_tlb_kernel_range(unsigned long start, |
119 | unsigned long end) | 154 | unsigned long end) |
120 | { | 155 | { |
121 | flush_tlb_pending(); | ||
122 | } | 156 | } |
123 | 157 | ||
124 | #else /* 6xx, 7xx, 7xxx cpus */ | ||
125 | |||
126 | extern void flush_tlb_mm(struct mm_struct *mm); | ||
127 | extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); | ||
128 | extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); | ||
129 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
130 | unsigned long end); | ||
131 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | ||
132 | |||
133 | #endif | 158 | #endif |
134 | 159 | ||
135 | /* | 160 | /* |
161 | * This gets called at the end of handling a page fault, when | ||
162 | * the kernel has put a new PTE into the page table for the process. | ||
163 | * We use it to ensure coherency between the i-cache and d-cache | ||
164 | * for the page which has just been mapped in. | ||
165 | * On machines which use an MMU hash table, we use this to put a | ||
166 | * corresponding HPTE into the hash table ahead of time, instead of | ||
167 | * waiting for the inevitable extra hash-table miss exception. | ||
168 | */ | ||
169 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | ||
170 | |||
171 | /* | ||
136 | * This is called in munmap when we have freed up some page-table | 172 | * This is called in munmap when we have freed up some page-table |
137 | * pages. We don't need to do anything here, there's nothing special | 173 | * pages. We don't need to do anything here, there's nothing special |
138 | * about our page-table pages. -- paulus | 174 | * about our page-table pages. -- paulus |
139 | */ | 175 | */ |
140 | static inline void flush_tlb_pgtables(struct mm_struct *mm, | 176 | static inline void flush_tlb_pgtables(struct mm_struct *mm, |
141 | unsigned long start, unsigned long end) | 177 | unsigned long start, unsigned long end) |
142 | { | 178 | { |
143 | } | 179 | } |
144 | 180 | ||