aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/tlb_nohash.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-07-23 19:15:24 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-08-19 20:12:41 -0400
commitd4e167da4cb60910f6ac305aee03714937f70b71 (patch)
tree44a0faf402dd91024544b3450033b80524b25799 /arch/powerpc/mm/tlb_nohash.c
parent44c58ccc8dc25f78a4f641901f17092c93dd0458 (diff)
powerpc/mm: Make low level TLB flush ops on BookE take additional args
We need to pass down whether the page is direct or indirect and we'll need to pass the page size to _tlbil_va and _tlbivax_bcast We also add a new low level _tlbil_pid_noind() which does a TLB flush by PID but avoids flushing indirect entries if possible This implements those new prototypes but defines them with inlines or macros so that no additional arguments are actually passed on current processors. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm/tlb_nohash.c')
-rw-r--r--arch/powerpc/mm/tlb_nohash.c42
1 files changed, 31 insertions, 11 deletions
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index d908e75cc3b..761e8882416 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -67,18 +67,24 @@ void local_flush_tlb_mm(struct mm_struct *mm)
67} 67}
68EXPORT_SYMBOL(local_flush_tlb_mm); 68EXPORT_SYMBOL(local_flush_tlb_mm);
69 69
70void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 70void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
71 int tsize, int ind)
71{ 72{
72 unsigned int pid; 73 unsigned int pid;
73 74
74 preempt_disable(); 75 preempt_disable();
75 pid = vma ? vma->vm_mm->context.id : 0; 76 pid = mm ? mm->context.id : 0;
76 if (pid != MMU_NO_CONTEXT) 77 if (pid != MMU_NO_CONTEXT)
77 _tlbil_va(vmaddr, pid); 78 _tlbil_va(vmaddr, pid, tsize, ind);
78 preempt_enable(); 79 preempt_enable();
79} 80}
80EXPORT_SYMBOL(local_flush_tlb_page);
81 81
82void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
83{
84 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
85 0 /* tsize unused for now */, 0);
86}
87EXPORT_SYMBOL(local_flush_tlb_page);
82 88
83/* 89/*
84 * And here are the SMP non-local implementations 90 * And here are the SMP non-local implementations
@@ -96,6 +102,8 @@ static int mm_is_core_local(struct mm_struct *mm)
96struct tlb_flush_param { 102struct tlb_flush_param {
97 unsigned long addr; 103 unsigned long addr;
98 unsigned int pid; 104 unsigned int pid;
105 unsigned int tsize;
106 unsigned int ind;
99}; 107};
100 108
101static void do_flush_tlb_mm_ipi(void *param) 109static void do_flush_tlb_mm_ipi(void *param)
@@ -109,7 +117,7 @@ static void do_flush_tlb_page_ipi(void *param)
109{ 117{
110 struct tlb_flush_param *p = param; 118 struct tlb_flush_param *p = param;
111 119
112 _tlbil_va(p->addr, p->pid); 120 _tlbil_va(p->addr, p->pid, p->tsize, p->ind);
113} 121}
114 122
115 123
@@ -149,37 +157,49 @@ void flush_tlb_mm(struct mm_struct *mm)
149} 157}
150EXPORT_SYMBOL(flush_tlb_mm); 158EXPORT_SYMBOL(flush_tlb_mm);
151 159
152void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 160void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
161 int tsize, int ind)
153{ 162{
154 struct cpumask *cpu_mask; 163 struct cpumask *cpu_mask;
155 unsigned int pid; 164 unsigned int pid;
156 165
157 preempt_disable(); 166 preempt_disable();
158 pid = vma ? vma->vm_mm->context.id : 0; 167 pid = mm ? mm->context.id : 0;
159 if (unlikely(pid == MMU_NO_CONTEXT)) 168 if (unlikely(pid == MMU_NO_CONTEXT))
160 goto bail; 169 goto bail;
161 cpu_mask = mm_cpumask(vma->vm_mm); 170 cpu_mask = mm_cpumask(mm);
162 if (!mm_is_core_local(mm)) { 171 if (!mm_is_core_local(mm)) {
163 /* If broadcast tlbivax is supported, use it */ 172 /* If broadcast tlbivax is supported, use it */
164 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) { 173 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
165 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL); 174 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
166 if (lock) 175 if (lock)
167 spin_lock(&tlbivax_lock); 176 spin_lock(&tlbivax_lock);
168 _tlbivax_bcast(vmaddr, pid); 177 _tlbivax_bcast(vmaddr, pid, tsize, ind);
169 if (lock) 178 if (lock)
170 spin_unlock(&tlbivax_lock); 179 spin_unlock(&tlbivax_lock);
171 goto bail; 180 goto bail;
172 } else { 181 } else {
173 struct tlb_flush_param p = { .pid = pid, .addr = vmaddr }; 182 struct tlb_flush_param p = {
183 .pid = pid,
184 .addr = vmaddr,
185 .tsize = tsize,
186 .ind = ind,
187 };
174 /* Ignores smp_processor_id() even if set in cpu_mask */ 188 /* Ignores smp_processor_id() even if set in cpu_mask */
175 smp_call_function_many(cpu_mask, 189 smp_call_function_many(cpu_mask,
176 do_flush_tlb_page_ipi, &p, 1); 190 do_flush_tlb_page_ipi, &p, 1);
177 } 191 }
178 } 192 }
179 _tlbil_va(vmaddr, pid); 193 _tlbil_va(vmaddr, pid, tsize, ind);
180 bail: 194 bail:
181 preempt_enable(); 195 preempt_enable();
182} 196}
197
198void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
199{
200 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
201 0 /* tsize unused for now */, 0);
202}
183EXPORT_SYMBOL(flush_tlb_page); 203EXPORT_SYMBOL(flush_tlb_page);
184 204
185#endif /* CONFIG_SMP */ 205#endif /* CONFIG_SMP */