aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c95
1 files changed, 95 insertions, 0 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 2043c08d36c..2391745f656 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -46,6 +46,8 @@
46#include <linux/sched.h> 46#include <linux/sched.h>
47#include <linux/timer.h> 47#include <linux/timer.h>
48 48
49#include <litmus/litmus.h>
50
49#include <asm/uaccess.h> 51#include <asm/uaccess.h>
50 52
51#include <trace/events/timer.h> 53#include <trace/events/timer.h>
@@ -1028,6 +1030,98 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
1028} 1030}
1029EXPORT_SYMBOL_GPL(hrtimer_start); 1031EXPORT_SYMBOL_GPL(hrtimer_start);
1030 1032
1033#ifdef CONFIG_ARCH_HAS_SEND_PULL_TIMERS
1034
1035/**
1036 * hrtimer_start_on_info_init - Initialize hrtimer_start_on_info
1037 */
1038void hrtimer_start_on_info_init(struct hrtimer_start_on_info *info)
1039{
1040 memset(info, 0, sizeof(struct hrtimer_start_on_info));
1041 atomic_set(&info->state, HRTIMER_START_ON_INACTIVE);
1042}
1043
1044/**
1045 * hrtimer_pull - PULL_TIMERS_VECTOR callback on remote cpu
1046 */
1047void hrtimer_pull(void)
1048{
1049 struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
1050 struct hrtimer_start_on_info *info;
1051 struct list_head *pos, *safe, list;
1052
1053 raw_spin_lock(&base->lock);
1054 list_replace_init(&base->to_pull, &list);
1055 raw_spin_unlock(&base->lock);
1056
1057 list_for_each_safe(pos, safe, &list) {
1058 info = list_entry(pos, struct hrtimer_start_on_info, list);
1059 TRACE("pulled timer 0x%x\n", info->timer);
1060 list_del(pos);
1061 hrtimer_start(info->timer, info->time, info->mode);
1062 }
1063}
1064
1065/**
1066 * hrtimer_start_on - trigger timer arming on remote cpu
1067 * @cpu: remote cpu
1068 * @info: save timer information for enqueuing on remote cpu
1069 * @timer: timer to be pulled
1070 * @time: expire time
1071 * @mode: timer mode
1072 */
1073int hrtimer_start_on(int cpu, struct hrtimer_start_on_info* info,
1074 struct hrtimer *timer, ktime_t time,
1075 const enum hrtimer_mode mode)
1076{
1077 unsigned long flags;
1078 struct hrtimer_cpu_base* base;
1079 int in_use = 0, was_empty;
1080
1081 /* serialize access to info through the timer base */
1082 lock_hrtimer_base(timer, &flags);
1083
1084 in_use = (atomic_read(&info->state) != HRTIMER_START_ON_INACTIVE);
1085 if (!in_use) {
1086 INIT_LIST_HEAD(&info->list);
1087 info->timer = timer;
1088 info->time = time;
1089 info->mode = mode;
1090 /* mark as in use */
1091 atomic_set(&info->state, HRTIMER_START_ON_QUEUED);
1092 }
1093
1094 unlock_hrtimer_base(timer, &flags);
1095
1096 if (!in_use) {
1097 /* initiate pull */
1098 preempt_disable();
1099 if (cpu == smp_processor_id()) {
1100 /* start timer locally; we may get called
1101 * with rq->lock held, do not wake up anything
1102 */
1103 TRACE("hrtimer_start_on: starting on local CPU\n");
1104 __hrtimer_start_range_ns(info->timer, info->time,
1105 0, info->mode, 0);
1106 } else {
1107 TRACE("hrtimer_start_on: pulling to remote CPU\n");
1108 base = &per_cpu(hrtimer_bases, cpu);
1109 raw_spin_lock_irqsave(&base->lock, flags);
1110 was_empty = list_empty(&base->to_pull);
1111 list_add(&info->list, &base->to_pull);
1112 raw_spin_unlock_irqrestore(&base->lock, flags);
1113 if (was_empty)
1114 /* only send IPI if other no else
1115 * has done so already
1116 */
1117 smp_send_pull_timers(cpu);
1118 }
1119 preempt_enable();
1120 }
1121 return in_use;
1122}
1123
1124#endif
1031 1125
1032/** 1126/**
1033 * hrtimer_try_to_cancel - try to deactivate a timer 1127 * hrtimer_try_to_cancel - try to deactivate a timer
@@ -1627,6 +1721,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
1627 } 1721 }
1628 1722
1629 hrtimer_init_hres(cpu_base); 1723 hrtimer_init_hres(cpu_base);
1724 INIT_LIST_HEAD(&cpu_base->to_pull);
1630} 1725}
1631 1726
1632#ifdef CONFIG_HOTPLUG_CPU 1727#ifdef CONFIG_HOTPLUG_CPU