aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 23:45:13 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 23:57:07 -0400
commita66246f9e973a68fb9955a2fa7663a2e02afbd30 (patch)
treeebdf77a3cf491c0d0b77af3d9622f33013af5856 /kernel
parent6ffc1fee98c4b995eb3a0285f4f8fb467cb0306e (diff)
Change most LitmusRT spinlock_t in raw_spinlock_t
Adapt to new schema for spinlock: (tglx 20091217) spinlock - the weakest one, which might sleep in RT raw_spinlock - spinlock which always spins even on RT arch_spinlock - the hardware level architecture dependent implementation ---- Most probably, all the spinlocks changed by this commit will be true spinning lock (raw_spinlock) in PreemptRT (so hopefully we'll need few changes when porting Litmmus to PreemptRT). There are a couple of spinlock that the kernel still defines as spinlock_t (therefore no changes reported in this commit) that might cause us troubles: - wait_queue_t lock is defined as spinlock_t; it is used in: * fmlp.c -- sem->wait.lock * sync.c -- ts_release.wait.lock - rwlock_t used in fifo implementation in sched_trace.c * this need probably to be changed to something always spinning in RT at the expense of increased locking time. ---- This commit also fixes warnings and errors due to the need to include slab.h when using kmalloc() and friends. ---- This commit does not compile.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index c0b440b1f6ee..02e5097bf319 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1052,9 +1052,9 @@ void hrtimer_pull(void)
1052 struct hrtimer_start_on_info *info; 1052 struct hrtimer_start_on_info *info;
1053 struct list_head *pos, *safe, list; 1053 struct list_head *pos, *safe, list;
1054 1054
1055 spin_lock(&base->lock); 1055 raw_spin_lock(&base->lock);
1056 list_replace_init(&base->to_pull, &list); 1056 list_replace_init(&base->to_pull, &list);
1057 spin_unlock(&base->lock); 1057 raw_spin_unlock(&base->lock);
1058 1058
1059 list_for_each_safe(pos, safe, &list) { 1059 list_for_each_safe(pos, safe, &list) {
1060 info = list_entry(pos, struct hrtimer_start_on_info, list); 1060 info = list_entry(pos, struct hrtimer_start_on_info, list);
@@ -1108,10 +1108,10 @@ int hrtimer_start_on(int cpu, struct hrtimer_start_on_info* info,
1108 } else { 1108 } else {
1109 TRACE("hrtimer_start_on: pulling to remote CPU\n"); 1109 TRACE("hrtimer_start_on: pulling to remote CPU\n");
1110 base = &per_cpu(hrtimer_bases, cpu); 1110 base = &per_cpu(hrtimer_bases, cpu);
1111 spin_lock_irqsave(&base->lock, flags); 1111 raw_spin_lock_irqsave(&base->lock, flags);
1112 was_empty = list_empty(&base->to_pull); 1112 was_empty = list_empty(&base->to_pull);
1113 list_add(&info->list, &base->to_pull); 1113 list_add(&info->list, &base->to_pull);
1114 spin_unlock_irqrestore(&base->lock, flags); 1114 raw_spin_unlock_irqrestore(&base->lock, flags);
1115 if (was_empty) 1115 if (was_empty)
1116 /* only send IPI if other no else 1116 /* only send IPI if other no else
1117 * has done so already 1117 * has done so already