aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc/tlbflush.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-powerpc/tlbflush.h')
-rw-r--r--include/asm-powerpc/tlbflush.h172
1 files changed, 104 insertions, 68 deletions
diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h
index 93c7d0c7230f..86e6266a028b 100644
--- a/include/asm-powerpc/tlbflush.h
+++ b/include/asm-powerpc/tlbflush.h
@@ -17,10 +17,73 @@
17 */ 17 */
18#ifdef __KERNEL__ 18#ifdef __KERNEL__
19 19
20
21struct mm_struct; 20struct mm_struct;
21struct vm_area_struct;
22
23#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
24/*
25 * TLB flushing for software loaded TLB chips
26 *
27 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
28 * flush_tlb_kernel_range are best implemented as tlbia vs
29 * specific tlbie's
30 */
31
32extern void _tlbie(unsigned long address);
33
34#if defined(CONFIG_40x) || defined(CONFIG_8xx)
35#define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
36#else /* CONFIG_44x || CONFIG_FSL_BOOKE */
37extern void _tlbia(void);
38#endif
39
40static inline void flush_tlb_mm(struct mm_struct *mm)
41{
42 _tlbia();
43}
44
45static inline void flush_tlb_page(struct vm_area_struct *vma,
46 unsigned long vmaddr)
47{
48 _tlbie(vmaddr);
49}
50
51static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
52 unsigned long vmaddr)
53{
54 _tlbie(vmaddr);
55}
56
57static inline void flush_tlb_range(struct vm_area_struct *vma,
58 unsigned long start, unsigned long end)
59{
60 _tlbia();
61}
62
63static inline void flush_tlb_kernel_range(unsigned long start,
64 unsigned long end)
65{
66 _tlbia();
67}
22 68
23#ifdef CONFIG_PPC64 69#elif defined(CONFIG_PPC32)
70/*
71 * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx
72 */
73extern void _tlbie(unsigned long address);
74extern void _tlbia(void);
75
76extern void flush_tlb_mm(struct mm_struct *mm);
77extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
78extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
79extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
80 unsigned long end);
81extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
82
83#else
84/*
85 * TLB flushing for 64-bit has-MMU CPUs
86 */
24 87
25#include <linux/percpu.h> 88#include <linux/percpu.h>
26#include <asm/page.h> 89#include <asm/page.h>
@@ -28,117 +91,90 @@ struct mm_struct;
28#define PPC64_TLB_BATCH_NR 192 91#define PPC64_TLB_BATCH_NR 192
29 92
30struct ppc64_tlb_batch { 93struct ppc64_tlb_batch {
31 unsigned long index; 94 int active;
32 struct mm_struct *mm; 95 unsigned long index;
33 real_pte_t pte[PPC64_TLB_BATCH_NR]; 96 struct mm_struct *mm;
34 unsigned long vaddr[PPC64_TLB_BATCH_NR]; 97 real_pte_t pte[PPC64_TLB_BATCH_NR];
35 unsigned int psize; 98 unsigned long vaddr[PPC64_TLB_BATCH_NR];
99 unsigned int psize;
36}; 100};
37DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); 101DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
38 102
39extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 103extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
40 104
41static inline void flush_tlb_pending(void) 105extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
106 pte_t *ptep, unsigned long pte, int huge);
107
108#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
109
110static inline void arch_enter_lazy_mmu_mode(void)
111{
112 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
113
114 batch->active = 1;
115}
116
117static inline void arch_leave_lazy_mmu_mode(void)
42{ 118{
43 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); 119 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
44 120
45 if (batch->index) 121 if (batch->index)
46 __flush_tlb_pending(batch); 122 __flush_tlb_pending(batch);
47 put_cpu_var(ppc64_tlb_batch); 123 batch->active = 0;
48} 124}
49 125
126#define arch_flush_lazy_mmu_mode() do {} while (0)
127
128
50extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, 129extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
51 int local); 130 int local);
52extern void flush_hash_range(unsigned long number, int local); 131extern void flush_hash_range(unsigned long number, int local);
53 132
54#else /* CONFIG_PPC64 */
55
56#include <linux/mm.h>
57
58extern void _tlbie(unsigned long address);
59extern void _tlbia(void);
60
61/*
62 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
63 * flush_tlb_kernel_range are best implemented as tlbia vs
64 * specific tlbie's
65 */
66
67#if (defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx)
68#define flush_tlb_pending() asm volatile ("tlbia; sync" : : : "memory")
69#elif defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE)
70#define flush_tlb_pending() _tlbia()
71#endif
72
73/*
74 * This gets called at the end of handling a page fault, when
75 * the kernel has put a new PTE into the page table for the process.
76 * We use it to ensure coherency between the i-cache and d-cache
77 * for the page which has just been mapped in.
78 * On machines which use an MMU hash table, we use this to put a
79 * corresponding HPTE into the hash table ahead of time, instead of
80 * waiting for the inevitable extra hash-table miss exception.
81 */
82extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
83
84#endif /* CONFIG_PPC64 */
85
86#if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \
87 defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx)
88 133
89static inline void flush_tlb_mm(struct mm_struct *mm) 134static inline void flush_tlb_mm(struct mm_struct *mm)
90{ 135{
91 flush_tlb_pending();
92} 136}
93 137
94static inline void flush_tlb_page(struct vm_area_struct *vma, 138static inline void flush_tlb_page(struct vm_area_struct *vma,
95 unsigned long vmaddr) 139 unsigned long vmaddr)
96{ 140{
97#ifdef CONFIG_PPC64
98 flush_tlb_pending();
99#else
100 _tlbie(vmaddr);
101#endif
102} 141}
103 142
104static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, 143static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
105 unsigned long vmaddr) 144 unsigned long vmaddr)
106{ 145{
107#ifndef CONFIG_PPC64
108 _tlbie(vmaddr);
109#endif
110} 146}
111 147
112static inline void flush_tlb_range(struct vm_area_struct *vma, 148static inline void flush_tlb_range(struct vm_area_struct *vma,
113 unsigned long start, unsigned long end) 149 unsigned long start, unsigned long end)
114{ 150{
115 flush_tlb_pending();
116} 151}
117 152
118static inline void flush_tlb_kernel_range(unsigned long start, 153static inline void flush_tlb_kernel_range(unsigned long start,
119 unsigned long end) 154 unsigned long end)
120{ 155{
121 flush_tlb_pending();
122} 156}
123 157
124#else /* 6xx, 7xx, 7xxx cpus */
125
126extern void flush_tlb_mm(struct mm_struct *mm);
127extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
128extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
129extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
130 unsigned long end);
131extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
132
133#endif 158#endif
134 159
135/* 160/*
161 * This gets called at the end of handling a page fault, when
162 * the kernel has put a new PTE into the page table for the process.
163 * We use it to ensure coherency between the i-cache and d-cache
164 * for the page which has just been mapped in.
165 * On machines which use an MMU hash table, we use this to put a
166 * corresponding HPTE into the hash table ahead of time, instead of
167 * waiting for the inevitable extra hash-table miss exception.
168 */
169extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
170
171/*
136 * This is called in munmap when we have freed up some page-table 172 * This is called in munmap when we have freed up some page-table
137 * pages. We don't need to do anything here, there's nothing special 173 * pages. We don't need to do anything here, there's nothing special
138 * about our page-table pages. -- paulus 174 * about our page-table pages. -- paulus
139 */ 175 */
140static inline void flush_tlb_pgtables(struct mm_struct *mm, 176static inline void flush_tlb_pgtables(struct mm_struct *mm,
141 unsigned long start, unsigned long end) 177 unsigned long start, unsigned long end)
142{ 178{
143} 179}
144 180