aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorChristoph Egger <siccegge@cs.fau.de>2010-06-09 22:23:11 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-07-08 21:28:38 -0400
commitcccd23428347251713b643d4bc5edb610308fd49 (patch)
tree6a320402e9c86af2e8ac598c3fe0ba3207db2366 /arch/powerpc
parent74052173177b7f969d9cc0c8f136093e1d447a01 (diff)
powerpc: Removing dead CONFIG_SMP_750
CONFIG_SMP_750 doesn't exist in Kconfig, therefore removing all references for it from the source code. Signed-off-by: Christoph Egger <siccegge@cs.fau.de> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/mm/tlb_hash32.c15
1 files changed, 0 insertions, 15 deletions
diff --git a/arch/powerpc/mm/tlb_hash32.c b/arch/powerpc/mm/tlb_hash32.c
index 8aaa8b7eb324..690566b66e8e 100644
--- a/arch/powerpc/mm/tlb_hash32.c
+++ b/arch/powerpc/mm/tlb_hash32.c
@@ -89,17 +89,6 @@ void tlb_flush(struct mmu_gather *tlb)
89 * -- Cort 89 * -- Cort
90 */ 90 */
91 91
92/*
93 * 750 SMP is a Bad Idea because the 750 doesn't broadcast all
94 * the cache operations on the bus. Hence we need to use an IPI
95 * to get the other CPU(s) to invalidate their TLBs.
96 */
97#ifdef CONFIG_SMP_750
98#define FINISH_FLUSH smp_send_tlb_invalidate(0)
99#else
100#define FINISH_FLUSH do { } while (0)
101#endif
102
103static void flush_range(struct mm_struct *mm, unsigned long start, 92static void flush_range(struct mm_struct *mm, unsigned long start,
104 unsigned long end) 93 unsigned long end)
105{ 94{
@@ -138,7 +127,6 @@ static void flush_range(struct mm_struct *mm, unsigned long start,
138void flush_tlb_kernel_range(unsigned long start, unsigned long end) 127void flush_tlb_kernel_range(unsigned long start, unsigned long end)
139{ 128{
140 flush_range(&init_mm, start, end); 129 flush_range(&init_mm, start, end);
141 FINISH_FLUSH;
142} 130}
143EXPORT_SYMBOL(flush_tlb_kernel_range); 131EXPORT_SYMBOL(flush_tlb_kernel_range);
144 132
@@ -162,7 +150,6 @@ void flush_tlb_mm(struct mm_struct *mm)
162 */ 150 */
163 for (mp = mm->mmap; mp != NULL; mp = mp->vm_next) 151 for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
164 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); 152 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
165 FINISH_FLUSH;
166} 153}
167EXPORT_SYMBOL(flush_tlb_mm); 154EXPORT_SYMBOL(flush_tlb_mm);
168 155
@@ -179,7 +166,6 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
179 pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr); 166 pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr);
180 if (!pmd_none(*pmd)) 167 if (!pmd_none(*pmd))
181 flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); 168 flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
182 FINISH_FLUSH;
183} 169}
184EXPORT_SYMBOL(flush_tlb_page); 170EXPORT_SYMBOL(flush_tlb_page);
185 171
@@ -192,6 +178,5 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
192 unsigned long end) 178 unsigned long end)
193{ 179{
194 flush_range(vma->vm_mm, start, end); 180 flush_range(vma->vm_mm, start, end);
195 FINISH_FLUSH;
196} 181}
197EXPORT_SYMBOL(flush_tlb_range); 182EXPORT_SYMBOL(flush_tlb_range);