aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/mm/mmu_decl.h1
-rw-r--r--include/asm-powerpc/pgtable.h10
-rw-r--r--include/asm-powerpc/tlbflush.h135
3 files changed, 81 insertions, 65 deletions
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index ee55e0bb28bc..9c4538bb04b0 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -19,6 +19,7 @@
19 * 2 of the License, or (at your option) any later version. 19 * 2 of the License, or (at your option) any later version.
20 * 20 *
21 */ 21 */
22#include <linux/mm.h>
22#include <asm/tlbflush.h> 23#include <asm/tlbflush.h>
23#include <asm/mmu.h> 24#include <asm/mmu.h>
24 25
diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h
index c7142c7e0e05..19edb6982b81 100644
--- a/include/asm-powerpc/pgtable.h
+++ b/include/asm-powerpc/pgtable.h
@@ -448,16 +448,6 @@ extern pgd_t swapper_pg_dir[];
448 448
449extern void paging_init(void); 449extern void paging_init(void);
450 450
451/*
452 * This gets called at the end of handling a page fault, when
453 * the kernel has put a new PTE into the page table for the process.
454 * We use it to put a corresponding HPTE into the hash table
455 * ahead of time, instead of waiting for the inevitable extra
456 * hash-table miss exception.
457 */
458struct vm_area_struct;
459extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
460
461/* Encode and de-code a swap entry */ 451/* Encode and de-code a swap entry */
462#define __swp_type(entry) (((entry).val >> 1) & 0x3f) 452#define __swp_type(entry) (((entry).val >> 1) & 0x3f)
463#define __swp_offset(entry) ((entry).val >> 8) 453#define __swp_offset(entry) ((entry).val >> 8)
diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h
index 0bc5a5e506be..86e6266a028b 100644
--- a/include/asm-powerpc/tlbflush.h
+++ b/include/asm-powerpc/tlbflush.h
@@ -17,10 +17,73 @@
17 */ 17 */
18#ifdef __KERNEL__ 18#ifdef __KERNEL__
19 19
20
21struct mm_struct; 20struct mm_struct;
21struct vm_area_struct;
22
23#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
24/*
25 * TLB flushing for software loaded TLB chips
26 *
27 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
28 * flush_tlb_kernel_range are best implemented as tlbia vs
29 * specific tlbie's
30 */
31
32extern void _tlbie(unsigned long address);
33
34#if defined(CONFIG_40x) || defined(CONFIG_8xx)
35#define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
36#else /* CONFIG_44x || CONFIG_FSL_BOOKE */
37extern void _tlbia(void);
38#endif
22 39
23#ifdef CONFIG_PPC64 40static inline void flush_tlb_mm(struct mm_struct *mm)
41{
42 _tlbia();
43}
44
45static inline void flush_tlb_page(struct vm_area_struct *vma,
46 unsigned long vmaddr)
47{
48 _tlbie(vmaddr);
49}
50
51static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
52 unsigned long vmaddr)
53{
54 _tlbie(vmaddr);
55}
56
57static inline void flush_tlb_range(struct vm_area_struct *vma,
58 unsigned long start, unsigned long end)
59{
60 _tlbia();
61}
62
63static inline void flush_tlb_kernel_range(unsigned long start,
64 unsigned long end)
65{
66 _tlbia();
67}
68
69#elif defined(CONFIG_PPC32)
70/*
71 * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx
72 */
73extern void _tlbie(unsigned long address);
74extern void _tlbia(void);
75
76extern void flush_tlb_mm(struct mm_struct *mm);
77extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
78extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
79extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
80 unsigned long end);
81extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
82
83#else
84/*
85 * TLB flushing for 64-bit has-MMU CPUs
86 */
24 87
25#include <linux/percpu.h> 88#include <linux/percpu.h>
26#include <asm/page.h> 89#include <asm/page.h>
@@ -67,89 +130,51 @@ extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
67 int local); 130 int local);
68extern void flush_hash_range(unsigned long number, int local); 131extern void flush_hash_range(unsigned long number, int local);
69 132
70#else /* CONFIG_PPC64 */
71
72#include <linux/mm.h>
73
74extern void _tlbie(unsigned long address);
75extern void _tlbia(void);
76
77/*
78 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
79 * flush_tlb_kernel_range are best implemented as tlbia vs
80 * specific tlbie's
81 */
82
83#if (defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx)
84#define flush_tlb_pending() asm volatile ("tlbia; sync" : : : "memory")
85#elif defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE)
86#define flush_tlb_pending() _tlbia()
87#endif
88
89/*
90 * This gets called at the end of handling a page fault, when
91 * the kernel has put a new PTE into the page table for the process.
92 * We use it to ensure coherency between the i-cache and d-cache
93 * for the page which has just been mapped in.
94 * On machines which use an MMU hash table, we use this to put a
95 * corresponding HPTE into the hash table ahead of time, instead of
96 * waiting for the inevitable extra hash-table miss exception.
97 */
98extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
99
100#endif /* CONFIG_PPC64 */
101
102#if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \
103 defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx)
104 133
105static inline void flush_tlb_mm(struct mm_struct *mm) 134static inline void flush_tlb_mm(struct mm_struct *mm)
106{ 135{
107} 136}
108 137
109static inline void flush_tlb_page(struct vm_area_struct *vma, 138static inline void flush_tlb_page(struct vm_area_struct *vma,
110 unsigned long vmaddr) 139 unsigned long vmaddr)
111{ 140{
112#ifndef CONFIG_PPC64
113 _tlbie(vmaddr);
114#endif
115} 141}
116 142
117static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, 143static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
118 unsigned long vmaddr) 144 unsigned long vmaddr)
119{ 145{
120#ifndef CONFIG_PPC64
121 _tlbie(vmaddr);
122#endif
123} 146}
124 147
125static inline void flush_tlb_range(struct vm_area_struct *vma, 148static inline void flush_tlb_range(struct vm_area_struct *vma,
126 unsigned long start, unsigned long end) 149 unsigned long start, unsigned long end)
127{ 150{
128} 151}
129 152
130static inline void flush_tlb_kernel_range(unsigned long start, 153static inline void flush_tlb_kernel_range(unsigned long start,
131 unsigned long end) 154 unsigned long end)
132{ 155{
133} 156}
134 157
135#else /* 6xx, 7xx, 7xxx cpus */
136
137extern void flush_tlb_mm(struct mm_struct *mm);
138extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
139extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
140extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
141 unsigned long end);
142extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
143
144#endif 158#endif
145 159
146/* 160/*
161 * This gets called at the end of handling a page fault, when
162 * the kernel has put a new PTE into the page table for the process.
163 * We use it to ensure coherency between the i-cache and d-cache
164 * for the page which has just been mapped in.
165 * On machines which use an MMU hash table, we use this to put a
166 * corresponding HPTE into the hash table ahead of time, instead of
167 * waiting for the inevitable extra hash-table miss exception.
168 */
169extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
170
171/*
147 * This is called in munmap when we have freed up some page-table 172 * This is called in munmap when we have freed up some page-table
148 * pages. We don't need to do anything here, there's nothing special 173 * pages. We don't need to do anything here, there's nothing special
149 * about our page-table pages. -- paulus 174 * about our page-table pages. -- paulus
150 */ 175 */
151static inline void flush_tlb_pgtables(struct mm_struct *mm, 176static inline void flush_tlb_pgtables(struct mm_struct *mm,
152 unsigned long start, unsigned long end) 177 unsigned long start, unsigned long end)
153{ 178{
154} 179}
155 180