diff options
Diffstat (limited to 'include/asm-powerpc/tlbflush.h')
-rw-r--r-- | include/asm-powerpc/tlbflush.h | 39 |
1 files changed, 25 insertions, 14 deletions
diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h index 93c7d0c7230f..0bc5a5e506be 100644 --- a/include/asm-powerpc/tlbflush.h +++ b/include/asm-powerpc/tlbflush.h | |||
@@ -28,25 +28,41 @@ struct mm_struct; | |||
28 | #define PPC64_TLB_BATCH_NR 192 | 28 | #define PPC64_TLB_BATCH_NR 192 |
29 | 29 | ||
30 | struct ppc64_tlb_batch { | 30 | struct ppc64_tlb_batch { |
31 | unsigned long index; | 31 | int active; |
32 | struct mm_struct *mm; | 32 | unsigned long index; |
33 | real_pte_t pte[PPC64_TLB_BATCH_NR]; | 33 | struct mm_struct *mm; |
34 | unsigned long vaddr[PPC64_TLB_BATCH_NR]; | 34 | real_pte_t pte[PPC64_TLB_BATCH_NR]; |
35 | unsigned int psize; | 35 | unsigned long vaddr[PPC64_TLB_BATCH_NR]; |
36 | unsigned int psize; | ||
36 | }; | 37 | }; |
37 | DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | 38 | DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); |
38 | 39 | ||
39 | extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); | 40 | extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); |
40 | 41 | ||
41 | static inline void flush_tlb_pending(void) | 42 | extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, |
43 | pte_t *ptep, unsigned long pte, int huge); | ||
44 | |||
45 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE | ||
46 | |||
47 | static inline void arch_enter_lazy_mmu_mode(void) | ||
48 | { | ||
49 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | ||
50 | |||
51 | batch->active = 1; | ||
52 | } | ||
53 | |||
54 | static inline void arch_leave_lazy_mmu_mode(void) | ||
42 | { | 55 | { |
43 | struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); | 56 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
44 | 57 | ||
45 | if (batch->index) | 58 | if (batch->index) |
46 | __flush_tlb_pending(batch); | 59 | __flush_tlb_pending(batch); |
47 | put_cpu_var(ppc64_tlb_batch); | 60 | batch->active = 0; |
48 | } | 61 | } |
49 | 62 | ||
63 | #define arch_flush_lazy_mmu_mode() do {} while (0) | ||
64 | |||
65 | |||
50 | extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, | 66 | extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, |
51 | int local); | 67 | int local); |
52 | extern void flush_hash_range(unsigned long number, int local); | 68 | extern void flush_hash_range(unsigned long number, int local); |
@@ -88,15 +104,12 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | |||
88 | 104 | ||
89 | static inline void flush_tlb_mm(struct mm_struct *mm) | 105 | static inline void flush_tlb_mm(struct mm_struct *mm) |
90 | { | 106 | { |
91 | flush_tlb_pending(); | ||
92 | } | 107 | } |
93 | 108 | ||
94 | static inline void flush_tlb_page(struct vm_area_struct *vma, | 109 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
95 | unsigned long vmaddr) | 110 | unsigned long vmaddr) |
96 | { | 111 | { |
97 | #ifdef CONFIG_PPC64 | 112 | #ifndef CONFIG_PPC64 |
98 | flush_tlb_pending(); | ||
99 | #else | ||
100 | _tlbie(vmaddr); | 113 | _tlbie(vmaddr); |
101 | #endif | 114 | #endif |
102 | } | 115 | } |
@@ -112,13 +125,11 @@ static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, | |||
112 | static inline void flush_tlb_range(struct vm_area_struct *vma, | 125 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
113 | unsigned long start, unsigned long end) | 126 | unsigned long start, unsigned long end) |
114 | { | 127 | { |
115 | flush_tlb_pending(); | ||
116 | } | 128 | } |
117 | 129 | ||
118 | static inline void flush_tlb_kernel_range(unsigned long start, | 130 | static inline void flush_tlb_kernel_range(unsigned long start, |
119 | unsigned long end) | 131 | unsigned long end) |
120 | { | 132 | { |
121 | flush_tlb_pending(); | ||
122 | } | 133 | } |
123 | 134 | ||
124 | #else /* 6xx, 7xx, 7xxx cpus */ | 135 | #else /* 6xx, 7xx, 7xxx cpus */ |