diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-01-30 07:32:55 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:32:55 -0500 |
commit | 925596a017bbd045ff711b778256f459e50a1192 (patch) | |
tree | fd0dd20d4ee216344a964c21aa6accaaa0749b57 /arch/x86/mach-voyager | |
parent | 6c3866558213ff706d8331053386915371ad63ec (diff) |
x86: avoid name conflict for Voyager leave_mm
Avoid a conflict between Voyager's leave_mm and asm-x86/mmu.h's leave_mm.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mach-voyager')
-rw-r--r-- | arch/x86/mach-voyager/voyager_smp.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 751777241881..dffa786f61fe 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -808,7 +808,7 @@ static DEFINE_SPINLOCK(tlbstate_lock); | |||
808 | * We need to reload %cr3 since the page tables may be going | 808 | * We need to reload %cr3 since the page tables may be going |
809 | * away from under us.. | 809 | * away from under us.. |
810 | */ | 810 | */ |
811 | static inline void leave_mm(unsigned long cpu) | 811 | static inline void voyager_leave_mm(unsigned long cpu) |
812 | { | 812 | { |
813 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) | 813 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) |
814 | BUG(); | 814 | BUG(); |
@@ -838,7 +838,7 @@ static void smp_invalidate_interrupt(void) | |||
838 | else | 838 | else |
839 | __flush_tlb_one(flush_va); | 839 | __flush_tlb_one(flush_va); |
840 | } else | 840 | } else |
841 | leave_mm(cpu); | 841 | voyager_leave_mm(cpu); |
842 | } | 842 | } |
843 | smp_mb__before_clear_bit(); | 843 | smp_mb__before_clear_bit(); |
844 | clear_bit(cpu, &smp_invalidate_needed); | 844 | clear_bit(cpu, &smp_invalidate_needed); |
@@ -919,7 +919,7 @@ void flush_tlb_mm(struct mm_struct *mm) | |||
919 | if (current->mm) | 919 | if (current->mm) |
920 | local_flush_tlb(); | 920 | local_flush_tlb(); |
921 | else | 921 | else |
922 | leave_mm(smp_processor_id()); | 922 | voyager_leave_mm(smp_processor_id()); |
923 | } | 923 | } |
924 | if (cpu_mask) | 924 | if (cpu_mask) |
925 | voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); | 925 | voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); |
@@ -939,7 +939,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | |||
939 | if (current->mm) | 939 | if (current->mm) |
940 | __flush_tlb_one(va); | 940 | __flush_tlb_one(va); |
941 | else | 941 | else |
942 | leave_mm(smp_processor_id()); | 942 | voyager_leave_mm(smp_processor_id()); |
943 | } | 943 | } |
944 | 944 | ||
945 | if (cpu_mask) | 945 | if (cpu_mask) |
@@ -1155,7 +1155,7 @@ static void do_flush_tlb_all(void *info) | |||
1155 | 1155 | ||
1156 | __flush_tlb_all(); | 1156 | __flush_tlb_all(); |
1157 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY) | 1157 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY) |
1158 | leave_mm(cpu); | 1158 | voyager_leave_mm(cpu); |
1159 | } | 1159 | } |
1160 | 1160 | ||
1161 | /* flush the TLB of every active CPU in the system */ | 1161 | /* flush the TLB of every active CPU in the system */ |