diff options
| author | Bjoern Brandenburg <bbb@mpi-sws.org> | 2015-08-09 07:18:46 -0400 |
|---|---|---|
| committer | Bjoern Brandenburg <bbb@mpi-sws.org> | 2015-08-09 06:21:17 -0400 |
| commit | 5014e7011964ff46b2d73cf91a05ed9eed5a8fa2 (patch) | |
| tree | 76fad060cf673112d92a4f5f2d9b9423383610f6 /arch/x86/kernel | |
| parent | fc6ac04ddc314b9cff5bdb92c8330569658076a3 (diff) | |
Add hrtimer_start_on() support
This patch adds hrtimer_start_on(), which allows arming timers on
remote CPUs. This is needed to avoided timer interrupts on "shielded"
CPUs and is also useful for implementing semi-partitioned schedulers.
Diffstat (limited to 'arch/x86/kernel')
| -rw-r--r-- | arch/x86/kernel/entry_64.S | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/irqinit.c | 3 | ||||
| -rw-r--r-- | arch/x86/kernel/smp.c | 38 |
3 files changed, 42 insertions, 1 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 02c2eff7478d..d810f5fea04f 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
| @@ -947,6 +947,8 @@ apicinterrupt CALL_FUNCTION_VECTOR \ | |||
| 947 | call_function_interrupt smp_call_function_interrupt | 947 | call_function_interrupt smp_call_function_interrupt |
| 948 | apicinterrupt RESCHEDULE_VECTOR \ | 948 | apicinterrupt RESCHEDULE_VECTOR \ |
| 949 | reschedule_interrupt smp_reschedule_interrupt | 949 | reschedule_interrupt smp_reschedule_interrupt |
| 950 | apicinterrupt PULL_TIMERS_VECTOR \ | ||
| 951 | pull_timers_interrupt smp_pull_timers_interrupt | ||
| 950 | #endif | 952 | #endif |
| 951 | 953 | ||
| 952 | apicinterrupt ERROR_APIC_VECTOR \ | 954 | apicinterrupt ERROR_APIC_VECTOR \ |
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index cd10a6437264..834496ed9536 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
| @@ -115,6 +115,9 @@ static void __init smp_intr_init(void) | |||
| 115 | alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, | 115 | alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, |
| 116 | call_function_single_interrupt); | 116 | call_function_single_interrupt); |
| 117 | 117 | ||
| 118 | /* IPI for hrtimer pulling on remote cpus */ | ||
| 119 | alloc_intr_gate(PULL_TIMERS_VECTOR, pull_timers_interrupt); | ||
| 120 | |||
| 118 | /* Low priority IPI to cleanup after moving an irq */ | 121 | /* Low priority IPI to cleanup after moving an irq */ |
| 119 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); | 122 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); |
| 120 | set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); | 123 | set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index be8e1bde07aa..7ce9fb210d55 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
| @@ -24,6 +24,8 @@ | |||
| 24 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
| 25 | #include <linux/gfp.h> | 25 | #include <linux/gfp.h> |
| 26 | 26 | ||
| 27 | #include <litmus/debug_trace.h> | ||
| 28 | |||
| 27 | #include <asm/mtrr.h> | 29 | #include <asm/mtrr.h> |
| 28 | #include <asm/tlbflush.h> | 30 | #include <asm/tlbflush.h> |
| 29 | #include <asm/mmu_context.h> | 31 | #include <asm/mmu_context.h> |
| @@ -164,6 +166,16 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) | |||
| 164 | return NMI_HANDLED; | 166 | return NMI_HANDLED; |
| 165 | } | 167 | } |
| 166 | 168 | ||
| 169 | /* trigger timers on remote cpu */ | ||
| 170 | void smp_send_pull_timers(int cpu) | ||
| 171 | { | ||
| 172 | if (unlikely(cpu_is_offline(cpu))) { | ||
| 173 | WARN_ON(1); | ||
| 174 | return; | ||
| 175 | } | ||
| 176 | apic->send_IPI_mask(cpumask_of(cpu), PULL_TIMERS_VECTOR); | ||
| 177 | } | ||
| 178 | |||
| 167 | /* | 179 | /* |
| 168 | * this function calls the 'stop' function on all other CPUs in the system. | 180 | * this function calls the 'stop' function on all other CPUs in the system. |
| 169 | */ | 181 | */ |
| @@ -216,7 +228,7 @@ static void native_stop_other_cpus(int wait) | |||
| 216 | while (num_online_cpus() > 1 && (wait || timeout--)) | 228 | while (num_online_cpus() > 1 && (wait || timeout--)) |
| 217 | udelay(1); | 229 | udelay(1); |
| 218 | } | 230 | } |
| 219 | 231 | ||
| 220 | /* if the REBOOT_VECTOR didn't work, try with the NMI */ | 232 | /* if the REBOOT_VECTOR didn't work, try with the NMI */ |
| 221 | if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) { | 233 | if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) { |
| 222 | if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, | 234 | if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, |
| @@ -341,6 +353,30 @@ static int __init nonmi_ipi_setup(char *str) | |||
| 341 | 353 | ||
| 342 | __setup("nonmi_ipi", nonmi_ipi_setup); | 354 | __setup("nonmi_ipi", nonmi_ipi_setup); |
| 343 | 355 | ||
| 356 | extern void hrtimer_pull(void); | ||
| 357 | |||
| 358 | static inline void __smp_pull_timers_interrupt(void) | ||
| 359 | { | ||
| 360 | TRACE("pull timer interrupt\n"); | ||
| 361 | hrtimer_pull(); | ||
| 362 | } | ||
| 363 | |||
| 364 | __visible void smp_pull_timers_interrupt(struct pt_regs *regs) | ||
| 365 | { | ||
| 366 | smp_entering_irq(); | ||
| 367 | __smp_pull_timers_interrupt(); | ||
| 368 | exiting_irq(); | ||
| 369 | } | ||
| 370 | |||
| 371 | __visible void smp_trace_pull_timers_interrupt(struct pt_regs *regs) | ||
| 372 | { | ||
| 373 | smp_entering_irq(); | ||
| 374 | trace_call_function_single_entry(PULL_TIMERS_VECTOR); | ||
| 375 | __smp_pull_timers_interrupt(); | ||
| 376 | trace_call_function_single_exit(PULL_TIMERS_VECTOR); | ||
| 377 | exiting_irq(); | ||
| 378 | } | ||
| 379 | |||
| 344 | struct smp_ops smp_ops = { | 380 | struct smp_ops smp_ops = { |
| 345 | .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, | 381 | .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, |
| 346 | .smp_prepare_cpus = native_smp_prepare_cpus, | 382 | .smp_prepare_cpus = native_smp_prepare_cpus, |
