aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/cyclone.c2
-rw-r--r--arch/ia64/kernel/smp.c14
-rw-r--r--arch/ia64/kernel/time.c4
3 files changed, 9 insertions, 11 deletions
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c
index 790ef0d87e12..71e35864d2e2 100644
--- a/arch/ia64/kernel/cyclone.c
+++ b/arch/ia64/kernel/cyclone.c
@@ -21,7 +21,7 @@ void __init cyclone_setup(void)
21 21
22static void __iomem *cyclone_mc; 22static void __iomem *cyclone_mc;
23 23
24static cycle_t read_cyclone(void) 24static cycle_t read_cyclone(struct clocksource *cs)
25{ 25{
26 return (cycle_t)readq((void __iomem *)cyclone_mc); 26 return (cycle_t)readq((void __iomem *)cyclone_mc);
27} 27}
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 2ea4199d9c57..5230eaafd83f 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -225,6 +225,7 @@ smp_send_reschedule (int cpu)
225{ 225{
226 platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); 226 platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
227} 227}
228EXPORT_SYMBOL_GPL(smp_send_reschedule);
228 229
229/* 230/*
230 * Called with preemption disabled. 231 * Called with preemption disabled.
@@ -300,15 +301,12 @@ smp_flush_tlb_mm (struct mm_struct *mm)
300 return; 301 return;
301 } 302 }
302 303
304 smp_call_function_mask(mm->cpu_vm_mask,
305 (void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
306 local_irq_disable();
307 local_finish_flush_tlb_mm(mm);
308 local_irq_enable();
303 preempt_enable(); 309 preempt_enable();
304 /*
305 * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
306 * have been running in the address space. It's not clear that this is worth the
307 * trouble though: to avoid races, we have to raise the IPI on the target CPU
308 * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
309 * rather trivial.
310 */
311 on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
312} 310}
313 311
314void arch_send_call_function_single_ipi(int cpu) 312void arch_send_call_function_single_ipi(int cpu)
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 641c8b61c4f1..604c1a35db33 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -33,7 +33,7 @@
33 33
34#include "fsyscall_gtod_data.h" 34#include "fsyscall_gtod_data.h"
35 35
36static cycle_t itc_get_cycles(void); 36static cycle_t itc_get_cycles(struct clocksource *cs);
37 37
38struct fsyscall_gtod_data_t fsyscall_gtod_data = { 38struct fsyscall_gtod_data_t fsyscall_gtod_data = {
39 .lock = SEQLOCK_UNLOCKED, 39 .lock = SEQLOCK_UNLOCKED,
@@ -383,7 +383,7 @@ ia64_init_itm (void)
383 } 383 }
384} 384}
385 385
386static cycle_t itc_get_cycles(void) 386static cycle_t itc_get_cycles(struct clocksource *cs)
387{ 387{
388 u64 lcycle, now, ret; 388 u64 lcycle, now, ret;
389 389