aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-01-30 07:30:35 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:30:35 -0500
commit0b9c99b6f21c2e9e00938e9c57942ed71bfe4d21 (patch)
tree3d14168b8a58d03f91870b985e3dc9bf5d8aa2cc /arch/x86
parent1075cf7a959f72833e54dd2d4f885617e58e3e0a (diff)
x86: cleanup tlbflush.h variants
Bring the tlbflush.h variants into sync to prepare merging and paravirt support. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/smp_64.c12
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c7
-rw-r--r--arch/x86/mm/boot_ioremap_32.c2
3 files changed, 10 insertions, 11 deletions
diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c
index 62b0f2a1b1e8..7142447b5666 100644
--- a/arch/x86/kernel/smp_64.c
+++ b/arch/x86/kernel/smp_64.c
@@ -55,7 +55,6 @@ union smp_flush_state {
55 cpumask_t flush_cpumask; 55 cpumask_t flush_cpumask;
56 struct mm_struct *flush_mm; 56 struct mm_struct *flush_mm;
57 unsigned long flush_va; 57 unsigned long flush_va;
58#define FLUSH_ALL -1ULL
59 spinlock_t tlbstate_lock; 58 spinlock_t tlbstate_lock;
60 }; 59 };
61 char pad[SMP_CACHE_BYTES]; 60 char pad[SMP_CACHE_BYTES];
@@ -153,7 +152,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
153 152
154 if (f->flush_mm == read_pda(active_mm)) { 153 if (f->flush_mm == read_pda(active_mm)) {
155 if (read_pda(mmu_state) == TLBSTATE_OK) { 154 if (read_pda(mmu_state) == TLBSTATE_OK) {
156 if (f->flush_va == FLUSH_ALL) 155 if (f->flush_va == TLB_FLUSH_ALL)
157 local_flush_tlb(); 156 local_flush_tlb();
158 else 157 else
159 __flush_tlb_one(f->flush_va); 158 __flush_tlb_one(f->flush_va);
@@ -166,11 +165,12 @@ out:
166 add_pda(irq_tlb_count, 1); 165 add_pda(irq_tlb_count, 1);
167} 166}
168 167
169static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, 168void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
170 unsigned long va) 169 unsigned long va)
171{ 170{
172 int sender; 171 int sender;
173 union smp_flush_state *f; 172 union smp_flush_state *f;
173 cpumask_t cpumask = *cpumaskp;
174 174
175 /* Caller has disabled preemption */ 175 /* Caller has disabled preemption */
176 sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; 176 sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
@@ -223,7 +223,7 @@ void flush_tlb_current_task(void)
223 223
224 local_flush_tlb(); 224 local_flush_tlb();
225 if (!cpus_empty(cpu_mask)) 225 if (!cpus_empty(cpu_mask))
226 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 226 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
227 preempt_enable(); 227 preempt_enable();
228} 228}
229 229
@@ -242,7 +242,7 @@ void flush_tlb_mm (struct mm_struct * mm)
242 leave_mm(smp_processor_id()); 242 leave_mm(smp_processor_id());
243 } 243 }
244 if (!cpus_empty(cpu_mask)) 244 if (!cpus_empty(cpu_mask))
245 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 245 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
246 246
247 preempt_enable(); 247 preempt_enable();
248} 248}
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 981def2b4e9b..b472a2df0b7f 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -800,7 +800,6 @@ static void smp_reschedule_interrupt(void)
800static struct mm_struct *flush_mm; 800static struct mm_struct *flush_mm;
801static unsigned long flush_va; 801static unsigned long flush_va;
802static DEFINE_SPINLOCK(tlbstate_lock); 802static DEFINE_SPINLOCK(tlbstate_lock);
803#define FLUSH_ALL 0xffffffff
804 803
805/* 804/*
806 * We cannot call mmdrop() because we are in interrupt context, 805 * We cannot call mmdrop() because we are in interrupt context,
@@ -834,7 +833,7 @@ static void smp_invalidate_interrupt(void)
834 833
835 if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { 834 if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
836 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { 835 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
837 if (flush_va == FLUSH_ALL) 836 if (flush_va == TLB_FLUSH_ALL)
838 local_flush_tlb(); 837 local_flush_tlb();
839 else 838 else
840 __flush_tlb_one(flush_va); 839 __flush_tlb_one(flush_va);
@@ -903,7 +902,7 @@ void flush_tlb_current_task(void)
903 cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); 902 cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
904 local_flush_tlb(); 903 local_flush_tlb();
905 if (cpu_mask) 904 if (cpu_mask)
906 voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 905 voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
907 906
908 preempt_enable(); 907 preempt_enable();
909} 908}
@@ -923,7 +922,7 @@ void flush_tlb_mm(struct mm_struct *mm)
923 leave_mm(smp_processor_id()); 922 leave_mm(smp_processor_id());
924 } 923 }
925 if (cpu_mask) 924 if (cpu_mask)
926 voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 925 voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
927 926
928 preempt_enable(); 927 preempt_enable();
929} 928}
diff --git a/arch/x86/mm/boot_ioremap_32.c b/arch/x86/mm/boot_ioremap_32.c
index f14da2a53ece..b20f74a2770f 100644
--- a/arch/x86/mm/boot_ioremap_32.c
+++ b/arch/x86/mm/boot_ioremap_32.c
@@ -57,7 +57,7 @@ static void __boot_ioremap(unsigned long phys_addr, unsigned long nrpages,
57 pte = boot_vaddr_to_pte(virtual_source); 57 pte = boot_vaddr_to_pte(virtual_source);
58 for (i=0; i < nrpages; i++, phys_addr += PAGE_SIZE, pte++) { 58 for (i=0; i < nrpages; i++, phys_addr += PAGE_SIZE, pte++) {
59 set_pte(pte, pfn_pte(phys_addr>>PAGE_SHIFT, PAGE_KERNEL)); 59 set_pte(pte, pfn_pte(phys_addr>>PAGE_SHIFT, PAGE_KERNEL));
60 __flush_tlb_one(&vaddr[i*PAGE_SIZE]); 60 __flush_tlb_one((unsigned long) &vaddr[i*PAGE_SIZE]);
61 } 61 }
62} 62}
63 63