aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-07-23 19:15:24 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-08-19 20:12:41 -0400
commitd4e167da4cb60910f6ac305aee03714937f70b71 (patch)
tree44a0faf402dd91024544b3450033b80524b25799 /arch/powerpc/mm
parent44c58ccc8dc25f78a4f641901f17092c93dd0458 (diff)
powerpc/mm: Make low level TLB flush ops on BookE take additional args
We need to pass down whether the page is direct or indirect and we'll need to pass the page size to _tlbil_va and _tlbivax_bcast We also add a new low level _tlbil_pid_noind() which does a TLB flush by PID but avoids flushing indirect entries if possible This implements those new prototypes but defines them with inlines or macros so that no additional arguments are actually passed on current processors. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/mmu_decl.h16
-rw-r--r--arch/powerpc/mm/tlb_nohash.c42
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S6
3 files changed, 47 insertions, 17 deletions
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index d1f9c62dc177..3871dceee2dd 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -36,21 +36,30 @@ static inline void _tlbil_pid(unsigned int pid)
36{ 36{
37 asm volatile ("sync; tlbia; isync" : : : "memory"); 37 asm volatile ("sync; tlbia; isync" : : : "memory");
38} 38}
39#define _tlbil_pid_noind(pid) _tlbil_pid(pid)
40
39#else /* CONFIG_40x || CONFIG_8xx */ 41#else /* CONFIG_40x || CONFIG_8xx */
40extern void _tlbil_all(void); 42extern void _tlbil_all(void);
41extern void _tlbil_pid(unsigned int pid); 43extern void _tlbil_pid(unsigned int pid);
44#define _tlbil_pid_noind(pid) _tlbil_pid(pid)
42#endif /* !(CONFIG_40x || CONFIG_8xx) */ 45#endif /* !(CONFIG_40x || CONFIG_8xx) */
43 46
44/* 47/*
45 * On 8xx, we directly inline tlbie, on others, it's extern 48 * On 8xx, we directly inline tlbie, on others, it's extern
46 */ 49 */
47#ifdef CONFIG_8xx 50#ifdef CONFIG_8xx
48static inline void _tlbil_va(unsigned long address, unsigned int pid) 51static inline void _tlbil_va(unsigned long address, unsigned int pid,
52 unsigned int tsize, unsigned int ind)
49{ 53{
50 asm volatile ("tlbie %0; sync" : : "r" (address) : "memory"); 54 asm volatile ("tlbie %0; sync" : : "r" (address) : "memory");
51} 55}
52#else /* CONFIG_8xx */ 56#else /* CONFIG_8xx */
53extern void _tlbil_va(unsigned long address, unsigned int pid); 57extern void __tlbil_va(unsigned long address, unsigned int pid);
58static inline void _tlbil_va(unsigned long address, unsigned int pid,
59 unsigned int tsize, unsigned int ind)
60{
61 __tlbil_va(address, pid);
62}
54#endif /* CONIFG_8xx */ 63#endif /* CONIFG_8xx */
55 64
56/* 65/*
@@ -58,7 +67,8 @@ extern void _tlbil_va(unsigned long address, unsigned int pid);
58 * implementation. When that becomes the case, this will be 67 * implementation. When that becomes the case, this will be
59 * an extern. 68 * an extern.
60 */ 69 */
61static inline void _tlbivax_bcast(unsigned long address, unsigned int pid) 70static inline void _tlbivax_bcast(unsigned long address, unsigned int pid,
71 unsigned int tsize, unsigned int ind)
62{ 72{
63 BUG(); 73 BUG();
64} 74}
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index d908e75cc3b5..761e8882416f 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -67,18 +67,24 @@ void local_flush_tlb_mm(struct mm_struct *mm)
67} 67}
68EXPORT_SYMBOL(local_flush_tlb_mm); 68EXPORT_SYMBOL(local_flush_tlb_mm);
69 69
70void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 70void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
71 int tsize, int ind)
71{ 72{
72 unsigned int pid; 73 unsigned int pid;
73 74
74 preempt_disable(); 75 preempt_disable();
75 pid = vma ? vma->vm_mm->context.id : 0; 76 pid = mm ? mm->context.id : 0;
76 if (pid != MMU_NO_CONTEXT) 77 if (pid != MMU_NO_CONTEXT)
77 _tlbil_va(vmaddr, pid); 78 _tlbil_va(vmaddr, pid, tsize, ind);
78 preempt_enable(); 79 preempt_enable();
79} 80}
80EXPORT_SYMBOL(local_flush_tlb_page);
81 81
82void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
83{
84 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
85 0 /* tsize unused for now */, 0);
86}
87EXPORT_SYMBOL(local_flush_tlb_page);
82 88
83/* 89/*
84 * And here are the SMP non-local implementations 90 * And here are the SMP non-local implementations
@@ -96,6 +102,8 @@ static int mm_is_core_local(struct mm_struct *mm)
96struct tlb_flush_param { 102struct tlb_flush_param {
97 unsigned long addr; 103 unsigned long addr;
98 unsigned int pid; 104 unsigned int pid;
105 unsigned int tsize;
106 unsigned int ind;
99}; 107};
100 108
101static void do_flush_tlb_mm_ipi(void *param) 109static void do_flush_tlb_mm_ipi(void *param)
@@ -109,7 +117,7 @@ static void do_flush_tlb_page_ipi(void *param)
109{ 117{
110 struct tlb_flush_param *p = param; 118 struct tlb_flush_param *p = param;
111 119
112 _tlbil_va(p->addr, p->pid); 120 _tlbil_va(p->addr, p->pid, p->tsize, p->ind);
113} 121}
114 122
115 123
@@ -149,37 +157,49 @@ void flush_tlb_mm(struct mm_struct *mm)
149} 157}
150EXPORT_SYMBOL(flush_tlb_mm); 158EXPORT_SYMBOL(flush_tlb_mm);
151 159
152void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 160void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
161 int tsize, int ind)
153{ 162{
154 struct cpumask *cpu_mask; 163 struct cpumask *cpu_mask;
155 unsigned int pid; 164 unsigned int pid;
156 165
157 preempt_disable(); 166 preempt_disable();
158 pid = vma ? vma->vm_mm->context.id : 0; 167 pid = mm ? mm->context.id : 0;
159 if (unlikely(pid == MMU_NO_CONTEXT)) 168 if (unlikely(pid == MMU_NO_CONTEXT))
160 goto bail; 169 goto bail;
161 cpu_mask = mm_cpumask(vma->vm_mm); 170 cpu_mask = mm_cpumask(mm);
162 if (!mm_is_core_local(mm)) { 171 if (!mm_is_core_local(mm)) {
163 /* If broadcast tlbivax is supported, use it */ 172 /* If broadcast tlbivax is supported, use it */
164 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) { 173 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
165 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL); 174 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
166 if (lock) 175 if (lock)
167 spin_lock(&tlbivax_lock); 176 spin_lock(&tlbivax_lock);
168 _tlbivax_bcast(vmaddr, pid); 177 _tlbivax_bcast(vmaddr, pid, tsize, ind);
169 if (lock) 178 if (lock)
170 spin_unlock(&tlbivax_lock); 179 spin_unlock(&tlbivax_lock);
171 goto bail; 180 goto bail;
172 } else { 181 } else {
173 struct tlb_flush_param p = { .pid = pid, .addr = vmaddr }; 182 struct tlb_flush_param p = {
183 .pid = pid,
184 .addr = vmaddr,
185 .tsize = tsize,
186 .ind = ind,
187 };
174 /* Ignores smp_processor_id() even if set in cpu_mask */ 188 /* Ignores smp_processor_id() even if set in cpu_mask */
175 smp_call_function_many(cpu_mask, 189 smp_call_function_many(cpu_mask,
176 do_flush_tlb_page_ipi, &p, 1); 190 do_flush_tlb_page_ipi, &p, 1);
177 } 191 }
178 } 192 }
179 _tlbil_va(vmaddr, pid); 193 _tlbil_va(vmaddr, pid, tsize, ind);
180 bail: 194 bail:
181 preempt_enable(); 195 preempt_enable();
182} 196}
197
198void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
199{
200 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
201 0 /* tsize unused for now */, 0);
202}
183EXPORT_SYMBOL(flush_tlb_page); 203EXPORT_SYMBOL(flush_tlb_page);
184 204
185#endif /* CONFIG_SMP */ 205#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index 3037911279b1..c7d89a0adba2 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -39,7 +39,7 @@
39/* 39/*
40 * 40x implementation needs only tlbil_va 40 * 40x implementation needs only tlbil_va
41 */ 41 */
42_GLOBAL(_tlbil_va) 42_GLOBAL(__tlbil_va)
43 /* We run the search with interrupts disabled because we have to change 43 /* We run the search with interrupts disabled because we have to change
44 * the PID and I don't want to preempt when that happens. 44 * the PID and I don't want to preempt when that happens.
45 */ 45 */
@@ -71,7 +71,7 @@ _GLOBAL(_tlbil_va)
71 * 440 implementation uses tlbsx/we for tlbil_va and a full sweep 71 * 440 implementation uses tlbsx/we for tlbil_va and a full sweep
72 * of the TLB for everything else. 72 * of the TLB for everything else.
73 */ 73 */
74_GLOBAL(_tlbil_va) 74_GLOBAL(__tlbil_va)
75 mfspr r5,SPRN_MMUCR 75 mfspr r5,SPRN_MMUCR
76 rlwimi r5,r4,0,24,31 /* Set TID */ 76 rlwimi r5,r4,0,24,31 /* Set TID */
77 77
@@ -170,7 +170,7 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBILX)
170 * Flush MMU TLB for a particular address, but only on the local processor 170 * Flush MMU TLB for a particular address, but only on the local processor
171 * (no broadcast) 171 * (no broadcast)
172 */ 172 */
173_GLOBAL(_tlbil_va) 173_GLOBAL(__tlbil_va)
174 mfmsr r10 174 mfmsr r10
175 wrteei 0 175 wrteei 0
176 slwi r4,r4,16 176 slwi r4,r4,16