From 816a3cbda0cfdb3781da19e8d532c2f78ca49e18 Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Fri, 21 May 2010 14:26:22 -0400 Subject: Change API: spinlock_t -> raw_spinlock_t Adapt to new schema for spinlock: (tglx 20091217) spinlock - the weakest one, which might sleep in RT raw_spinlock - spinlock which always spins even on RT arch_spinlock - the hardware level architecture dependent implementation ---- Planning for future porting on PreemptRT, probably all of the spinlock changed in this patch should true spinning lock (raw_spinlock). There are a couple of spinlock that the kernel still defines as spinlock_t (therefore no changes reported in this commit) that might cause problems to us: - wait_queue_t lock is defined as spinlock_t; it is used in: * fmlp.c -- sem->wait.lock * sync.c -- ts_release.wait.lock - rwlock_t used in fifo implementation in sched_trace.c * this need probably to be changed to something always spinning in RT at the expense of increased locking time. --- litmus/ftdev.c | 1 + litmus/litmus.c | 10 +++++----- litmus/rt_domain.c | 18 +++++++++--------- litmus/sched_cedf.c | 25 +++++++++++++------------ litmus/sched_gsn_edf.c | 36 ++++++++++++++++++------------------ litmus/sched_pfair.c | 25 +++++++++++++------------ litmus/sched_plugin.c | 14 +++++++------- litmus/sched_psn_edf.c | 24 ++++++++++++------------ 8 files changed, 78 insertions(+), 75 deletions(-) (limited to 'litmus') diff --git a/litmus/ftdev.c b/litmus/ftdev.c index 8b2d74d816a2..51dafaebf8a6 100644 --- a/litmus/ftdev.c +++ b/litmus/ftdev.c @@ -1,5 +1,6 @@ #include #include +#include #include #include #include diff --git a/litmus/litmus.c b/litmus/litmus.c index 5bf848386e1c..b71fc819eb51 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -23,7 +23,7 @@ /* Number of RT tasks that exist in the system */ atomic_t rt_task_count = ATOMIC_INIT(0); -static DEFINE_SPINLOCK(task_transition_lock); +static DEFINE_RAW_SPINLOCK(task_transition_lock); /* synchronize plugin switching */ atomic_t cannot_use_plugin = ATOMIC_INIT(0); @@ -330,7 +330,7 @@ long litmus_admit_task(struct task_struct* tsk) INIT_LIST_HEAD(&tsk_rt(tsk)->list); /* avoid scheduler plugin changing underneath us */ - spin_lock_irqsave(&task_transition_lock, flags); + raw_spin_lock_irqsave(&task_transition_lock, flags); /* allocate heap node for this task */ tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC); @@ -357,7 +357,7 @@ long litmus_admit_task(struct task_struct* tsk) } out_unlock: - spin_unlock_irqrestore(&task_transition_lock, flags); + raw_spin_unlock_irqrestore(&task_transition_lock, flags); out: return retval; } @@ -403,7 +403,7 @@ int switch_sched_plugin(struct sched_plugin* plugin) smp_call_function(synch_on_plugin_switch, NULL, 0); /* stop task transitions */ - spin_lock_irqsave(&task_transition_lock, flags); + raw_spin_lock_irqsave(&task_transition_lock, flags); /* don't switch if there are active real-time tasks */ if (atomic_read(&rt_task_count) == 0) { @@ -421,7 +421,7 @@ int switch_sched_plugin(struct sched_plugin* plugin) } else ret = -EBUSY; out: - spin_unlock_irqrestore(&task_transition_lock, flags); + raw_spin_unlock_irqrestore(&task_transition_lock, flags); atomic_set(&cannot_use_plugin, 0); return ret; } diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index 609ff0f82abb..8d5db6050723 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c @@ -53,11 +53,11 @@ static enum hrtimer_restart on_release_timer(struct hrtimer *timer) rh = container_of(timer, struct release_heap, timer); - spin_lock_irqsave(&rh->dom->release_lock, flags); + raw_spin_lock_irqsave(&rh->dom->release_lock, flags); TRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock); /* remove from release queue */ list_del(&rh->list); - spin_unlock_irqrestore(&rh->dom->release_lock, flags); + raw_spin_unlock_irqrestore(&rh->dom->release_lock, flags); TRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock); /* call release callback */ @@ -185,20 +185,20 @@ static void arm_release_timer(rt_domain_t *_rt) list_del(pos); /* put into release heap while holding release_lock */ - spin_lock(&rt->release_lock); + raw_spin_lock(&rt->release_lock); TRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock); rh = get_release_heap(rt, t, 0); if (!rh) { /* need to use our own, but drop lock first */ - spin_unlock(&rt->release_lock); + raw_spin_unlock(&rt->release_lock); TRACE_TASK(t, "Dropped release_lock 0x%p\n", &rt->release_lock); reinit_release_heap(t); TRACE_TASK(t, "release_heap ready\n"); - spin_lock(&rt->release_lock); + raw_spin_lock(&rt->release_lock); TRACE_TASK(t, "Re-acquired release_lock 0x%p\n", &rt->release_lock); @@ -207,7 +207,7 @@ static void arm_release_timer(rt_domain_t *_rt) bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node); TRACE_TASK(t, "arm_release_timer(): added to release heap\n"); - spin_unlock(&rt->release_lock); + raw_spin_unlock(&rt->release_lock); TRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock); /* To avoid arming the timer multiple times, we only let the @@ -258,9 +258,9 @@ void rt_domain_init(rt_domain_t *rt, for (i = 0; i < RELEASE_QUEUE_SLOTS; i++) INIT_LIST_HEAD(&rt->release_queue.slot[i]); - spin_lock_init(&rt->ready_lock); - spin_lock_init(&rt->release_lock); - spin_lock_init(&rt->tobe_lock); + raw_spin_lock_init(&rt->ready_lock); + raw_spin_lock_init(&rt->release_lock); + raw_spin_lock_init(&rt->tobe_lock); rt->check_resched = check; rt->release_jobs = release; diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index e57a11afda16..f5b77080cc4f 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -285,12 +286,12 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); unsigned long flags; - spin_lock_irqsave(&cluster->lock, flags); + raw_spin_lock_irqsave(&cluster->lock, flags); __merge_ready(&cluster->domain, tasks); check_for_preemptions(cluster); - spin_unlock_irqrestore(&cluster->lock, flags); + raw_spin_unlock_irqrestore(&cluster->lock, flags); } /* caller holds cedf_lock */ @@ -371,7 +372,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) int out_of_time, sleep, preempt, np, exists, blocks; struct task_struct* next = NULL; - spin_lock(&cluster->lock); + raw_spin_lock(&cluster->lock); clear_will_schedule(); /* sanity checking */ @@ -454,7 +455,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) if (exists) next = prev; - spin_unlock(&cluster->lock); + raw_spin_unlock(&cluster->lock); #ifdef WANT_ALL_SCHED_EVENTS TRACE("cedf_lock released, next=0x%p\n", next); @@ -496,7 +497,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) /* the cluster doesn't change even if t is running */ cluster = task_cpu_cluster(t); - spin_lock_irqsave(&cluster->domain.ready_lock, flags); + raw_spin_lock_irqsave(&cluster->domain.ready_lock, flags); /* setup job params */ release_at(t, litmus_clock()); @@ -513,7 +514,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) t->rt_param.linked_on = NO_CPU; cedf_job_arrival(t); - spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); + raw_spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); } static void cedf_task_wake_up(struct task_struct *task) @@ -526,7 +527,7 @@ static void cedf_task_wake_up(struct task_struct *task) cluster = task_cpu_cluster(task); - spin_lock_irqsave(&cluster->lock, flags); + raw_spin_lock_irqsave(&cluster->lock, flags); /* We need to take suspensions because of semaphores into * account! If a job resumes after being suspended due to acquiring * a semaphore, it should never be treated as a new job release. @@ -549,7 +550,7 @@ static void cedf_task_wake_up(struct task_struct *task) } } cedf_job_arrival(task); - spin_unlock_irqrestore(&cluster->lock, flags); + raw_spin_unlock_irqrestore(&cluster->lock, flags); } static void cedf_task_block(struct task_struct *t) @@ -562,9 +563,9 @@ static void cedf_task_block(struct task_struct *t) cluster = task_cpu_cluster(t); /* unlink if necessary */ - spin_lock_irqsave(&cluster->lock, flags); + raw_spin_lock_irqsave(&cluster->lock, flags); unlink(t); - spin_unlock_irqrestore(&cluster->lock, flags); + raw_spin_unlock_irqrestore(&cluster->lock, flags); BUG_ON(!is_realtime(t)); } @@ -576,13 +577,13 @@ static void cedf_task_exit(struct task_struct * t) cedf_domain_t *cluster = task_cpu_cluster(t); /* unlink if necessary */ - spin_lock_irqsave(&cluster->lock, flags); + raw_spin_lock_irqsave(&cluster->lock, flags); unlink(t); if (tsk_rt(t)->scheduled_on != NO_CPU) { cluster->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; tsk_rt(t)->scheduled_on = NO_CPU; } - spin_unlock_irqrestore(&cluster->lock, flags); + raw_spin_unlock_irqrestore(&cluster->lock, flags); BUG_ON(!is_realtime(t)); TRACE_TASK(t, "RIP\n"); diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 6137c74729cb..c0c63eba70ce 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -297,12 +297,12 @@ static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) { unsigned long flags; - spin_lock_irqsave(&gsnedf_lock, flags); + raw_spin_lock_irqsave(&gsnedf_lock, flags); __merge_ready(rt, tasks); check_for_preemptions(); - spin_unlock_irqrestore(&gsnedf_lock, flags); + raw_spin_unlock_irqrestore(&gsnedf_lock, flags); } /* caller holds gsnedf_lock */ @@ -388,7 +388,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) if (gsnedf.release_master == entry->cpu) return NULL; - spin_lock(&gsnedf_lock); + raw_spin_lock(&gsnedf_lock); clear_will_schedule(); /* sanity checking */ @@ -471,7 +471,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) if (exists) next = prev; - spin_unlock(&gsnedf_lock); + raw_spin_unlock(&gsnedf_lock); #ifdef WANT_ALL_SCHED_EVENTS TRACE("gsnedf_lock released, next=0x%p\n", next); @@ -509,7 +509,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) TRACE("gsn edf: task new %d\n", t->pid); - spin_lock_irqsave(&gsnedf_lock, flags); + raw_spin_lock_irqsave(&gsnedf_lock, flags); /* setup job params */ release_at(t, litmus_clock()); @@ -532,7 +532,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) t->rt_param.linked_on = NO_CPU; gsnedf_job_arrival(t); - spin_unlock_irqrestore(&gsnedf_lock, flags); + raw_spin_unlock_irqrestore(&gsnedf_lock, flags); } static void gsnedf_task_wake_up(struct task_struct *task) @@ -542,7 +542,7 @@ static void gsnedf_task_wake_up(struct task_struct *task) TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); - spin_lock_irqsave(&gsnedf_lock, flags); + raw_spin_lock_irqsave(&gsnedf_lock, flags); /* We need to take suspensions because of semaphores into * account! If a job resumes after being suspended due to acquiring * a semaphore, it should never be treated as a new job release. @@ -565,7 +565,7 @@ static void gsnedf_task_wake_up(struct task_struct *task) } } gsnedf_job_arrival(task); - spin_unlock_irqrestore(&gsnedf_lock, flags); + raw_spin_unlock_irqrestore(&gsnedf_lock, flags); } static void gsnedf_task_block(struct task_struct *t) @@ -575,9 +575,9 @@ static void gsnedf_task_block(struct task_struct *t) TRACE_TASK(t, "block at %llu\n", litmus_clock()); /* unlink if necessary */ - spin_lock_irqsave(&gsnedf_lock, flags); + raw_spin_lock_irqsave(&gsnedf_lock, flags); unlink(t); - spin_unlock_irqrestore(&gsnedf_lock, flags); + raw_spin_unlock_irqrestore(&gsnedf_lock, flags); BUG_ON(!is_realtime(t)); } @@ -588,13 +588,13 @@ static void gsnedf_task_exit(struct task_struct * t) unsigned long flags; /* unlink if necessary */ - spin_lock_irqsave(&gsnedf_lock, flags); + raw_spin_lock_irqsave(&gsnedf_lock, flags); unlink(t); if (tsk_rt(t)->scheduled_on != NO_CPU) { gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; tsk_rt(t)->scheduled_on = NO_CPU; } - spin_unlock_irqrestore(&gsnedf_lock, flags); + raw_spin_unlock_irqrestore(&gsnedf_lock, flags); BUG_ON(!is_realtime(t)); TRACE_TASK(t, "RIP\n"); @@ -630,7 +630,7 @@ static void update_queue_position(struct task_struct *holder) gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); } else { /* holder may be queued: first stop queue changes */ - spin_lock(&gsnedf.release_lock); + raw_spin_lock(&gsnedf.release_lock); if (is_queued(holder)) { TRACE_TASK(holder, "%s: is queued\n", __FUNCTION__); @@ -648,7 +648,7 @@ static void update_queue_position(struct task_struct *holder) TRACE_TASK(holder, "%s: is NOT queued => Done.\n", __FUNCTION__); } - spin_unlock(&gsnedf.release_lock); + raw_spin_unlock(&gsnedf.release_lock); /* If holder was enqueued in a release heap, then the following * preemption check is pointless, but we can't easily detect @@ -682,7 +682,7 @@ static long gsnedf_pi_block(struct pi_semaphore *sem, if (edf_higher_prio(new_waiter, sem->hp.task)) { TRACE_TASK(new_waiter, " boosts priority via %p\n", sem); /* called with IRQs disabled */ - spin_lock(&gsnedf_lock); + raw_spin_lock(&gsnedf_lock); /* store new highest-priority task */ sem->hp.task = new_waiter; if (sem->holder) { @@ -694,7 +694,7 @@ static long gsnedf_pi_block(struct pi_semaphore *sem, sem->holder->rt_param.inh_task = new_waiter; update_queue_position(sem->holder); } - spin_unlock(&gsnedf_lock); + raw_spin_unlock(&gsnedf_lock); } return 0; @@ -740,7 +740,7 @@ static long gsnedf_return_priority(struct pi_semaphore *sem) if (t->rt_param.inh_task) { /* interrupts already disabled by PI code */ - spin_lock(&gsnedf_lock); + raw_spin_lock(&gsnedf_lock); /* Reset inh_task to NULL. */ t->rt_param.inh_task = NULL; @@ -748,7 +748,7 @@ static long gsnedf_return_priority(struct pi_semaphore *sem) /* Check if rescheduling is necessary */ unlink(t); gsnedf_job_arrival(t); - spin_unlock(&gsnedf_lock); + raw_spin_unlock(&gsnedf_lock); } return ret; diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c index 2ea39223e7f0..ea77d3295290 100644 --- a/litmus/sched_pfair.c +++ b/litmus/sched_pfair.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -415,7 +416,7 @@ static void schedule_next_quantum(quanta_t time) /* called with interrupts disabled */ PTRACE("--- Q %lu at %llu PRE-SPIN\n", time, litmus_clock()); - spin_lock(&pfair_lock); + raw_spin_lock(&pfair_lock); PTRACE("<<< Q %lu at %llu\n", time, litmus_clock()); @@ -448,7 +449,7 @@ static void schedule_next_quantum(quanta_t time) } PTRACE(">>> Q %lu at %llu\n", time, litmus_clock()); - spin_unlock(&pfair_lock); + raw_spin_unlock(&pfair_lock); } static noinline void wait_for_quantum(quanta_t q, struct pfair_state* state) @@ -564,7 +565,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev) int blocks; struct task_struct* next = NULL; - spin_lock(&pfair_lock); + raw_spin_lock(&pfair_lock); blocks = is_realtime(prev) && !is_running(prev); @@ -577,7 +578,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev) tsk_rt(next)->scheduled_on = state->cpu; } - spin_unlock(&pfair_lock); + raw_spin_unlock(&pfair_lock); if (next) TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n", @@ -594,7 +595,7 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running) TRACE("pfair: task new %d state:%d\n", t->pid, t->state); - spin_lock_irqsave(&pfair_lock, flags); + raw_spin_lock_irqsave(&pfair_lock, flags); if (running) t->rt_param.scheduled_on = task_cpu(t); else @@ -605,7 +606,7 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running) pfair_add_release(t); check_preempt(t); - spin_unlock_irqrestore(&pfair_lock, flags); + raw_spin_unlock_irqrestore(&pfair_lock, flags); } static void pfair_task_wake_up(struct task_struct *t) @@ -616,7 +617,7 @@ static void pfair_task_wake_up(struct task_struct *t) TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n", litmus_clock(), cur_release(t), pfair_time); - spin_lock_irqsave(&pfair_lock, flags); + raw_spin_lock_irqsave(&pfair_lock, flags); /* It is a little unclear how to deal with Pfair * tasks that block for a while and then wake. For now, @@ -637,7 +638,7 @@ static void pfair_task_wake_up(struct task_struct *t) check_preempt(t); - spin_unlock_irqrestore(&pfair_lock, flags); + raw_spin_unlock_irqrestore(&pfair_lock, flags); TRACE_TASK(t, "wake up done at %llu\n", litmus_clock()); } @@ -661,12 +662,12 @@ static void pfair_task_exit(struct task_struct * t) * might not be the same as the CPU that the PFAIR scheduler * has chosen for it. */ - spin_lock_irqsave(&pfair_lock, flags); + raw_spin_lock_irqsave(&pfair_lock, flags); TRACE_TASK(t, "RIP, state:%d\n", t->state); drop_all_references(t); - spin_unlock_irqrestore(&pfair_lock, flags); + raw_spin_unlock_irqrestore(&pfair_lock, flags); kfree(t->rt_param.pfair); t->rt_param.pfair = NULL; @@ -680,7 +681,7 @@ static void pfair_release_at(struct task_struct* task, lt_t start) BUG_ON(!is_realtime(task)); - spin_lock_irqsave(&pfair_lock, flags); + raw_spin_lock_irqsave(&pfair_lock, flags); release_at(task, start); release = time2quanta(start, CEIL); @@ -698,7 +699,7 @@ static void pfair_release_at(struct task_struct* task, lt_t start) */ tsk_pfair(task)->sporadic_release = 0; - spin_unlock_irqrestore(&pfair_lock, flags); + raw_spin_unlock_irqrestore(&pfair_lock, flags); } static void init_subtask(struct subtask* sub, unsigned long i, diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index 3767b30e610a..3543b7baff53 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c @@ -187,7 +187,7 @@ struct sched_plugin *litmus = &linux_sched_plugin; /* the list of registered scheduling plugins */ static LIST_HEAD(sched_plugins); -static DEFINE_SPINLOCK(sched_plugins_lock); +static DEFINE_RAW_SPINLOCK(sched_plugins_lock); #define CHECK(func) {\ if (!plugin->func) \ @@ -220,9 +220,9 @@ int register_sched_plugin(struct sched_plugin* plugin) if (!plugin->release_at) plugin->release_at = release_at; - spin_lock(&sched_plugins_lock); + raw_spin_lock(&sched_plugins_lock); list_add(&plugin->list, &sched_plugins); - spin_unlock(&sched_plugins_lock); + raw_spin_unlock(&sched_plugins_lock); return 0; } @@ -234,7 +234,7 @@ struct sched_plugin* find_sched_plugin(const char* name) struct list_head *pos; struct sched_plugin *plugin; - spin_lock(&sched_plugins_lock); + raw_spin_lock(&sched_plugins_lock); list_for_each(pos, &sched_plugins) { plugin = list_entry(pos, struct sched_plugin, list); if (!strcmp(plugin->plugin_name, name)) @@ -243,7 +243,7 @@ struct sched_plugin* find_sched_plugin(const char* name) plugin = NULL; out_unlock: - spin_unlock(&sched_plugins_lock); + raw_spin_unlock(&sched_plugins_lock); return plugin; } @@ -253,13 +253,13 @@ int print_sched_plugins(char* buf, int max) struct list_head *pos; struct sched_plugin *plugin; - spin_lock(&sched_plugins_lock); + raw_spin_lock(&sched_plugins_lock); list_for_each(pos, &sched_plugins) { plugin = list_entry(pos, struct sched_plugin, list); count += snprintf(buf + count, max - count, "%s\n", plugin->plugin_name); if (max - count <= 0) break; } - spin_unlock(&sched_plugins_lock); + raw_spin_unlock(&sched_plugins_lock); return count; } diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index af0b30cb8b89..e50b27391d21 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c @@ -131,7 +131,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev) int out_of_time, sleep, preempt, np, exists, blocks, resched; - spin_lock(&pedf->slock); + raw_spin_lock(&pedf->slock); /* sanity checking * differently from gedf, when a task exits (dead) @@ -203,7 +203,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev) } pedf->scheduled = next; - spin_unlock(&pedf->slock); + raw_spin_unlock(&pedf->slock); return next; } @@ -226,7 +226,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running) /* The task should be running in the queue, otherwise signal * code will try to wake it up with fatal consequences. */ - spin_lock_irqsave(&pedf->slock, flags); + raw_spin_lock_irqsave(&pedf->slock, flags); if (running) { /* there shouldn't be anything else running at the time */ BUG_ON(pedf->scheduled); @@ -236,7 +236,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running) /* maybe we have to reschedule */ preempt(pedf); } - spin_unlock_irqrestore(&pedf->slock, flags); + raw_spin_unlock_irqrestore(&pedf->slock, flags); } static void psnedf_task_wake_up(struct task_struct *task) @@ -247,7 +247,7 @@ static void psnedf_task_wake_up(struct task_struct *task) lt_t now; TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); - spin_lock_irqsave(&pedf->slock, flags); + raw_spin_lock_irqsave(&pedf->slock, flags); BUG_ON(is_queued(task)); /* We need to take suspensions because of semaphores into * account! If a job resumes after being suspended due to acquiring @@ -272,7 +272,7 @@ static void psnedf_task_wake_up(struct task_struct *task) if (pedf->scheduled != task) requeue(task, edf); - spin_unlock_irqrestore(&pedf->slock, flags); + raw_spin_unlock_irqrestore(&pedf->slock, flags); TRACE_TASK(task, "wake up done\n"); } @@ -291,7 +291,7 @@ static void psnedf_task_exit(struct task_struct * t) psnedf_domain_t* pedf = task_pedf(t); rt_domain_t* edf; - spin_lock_irqsave(&pedf->slock, flags); + raw_spin_lock_irqsave(&pedf->slock, flags); if (is_queued(t)) { /* dequeue */ edf = task_edf(t); @@ -303,7 +303,7 @@ static void psnedf_task_exit(struct task_struct * t) TRACE_TASK(t, "RIP, now reschedule\n"); preempt(pedf); - spin_unlock_irqrestore(&pedf->slock, flags); + raw_spin_unlock_irqrestore(&pedf->slock, flags); } #ifdef CONFIG_FMLP @@ -323,7 +323,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem, edf = task_edf(new_waiter); /* interrupts already disabled */ - spin_lock(&pedf->slock); + raw_spin_lock(&pedf->slock); /* store new highest-priority task */ sem->hp.cpu_task[cpu] = new_waiter; @@ -348,7 +348,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem, if (edf_preemption_needed(edf, current)) preempt(pedf); - spin_unlock(&pedf->slock); + raw_spin_unlock(&pedf->slock); } return 0; @@ -415,7 +415,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem) /* Always check for delayed preemptions that might have become * necessary due to non-preemptive execution. */ - spin_lock(&pedf->slock); + raw_spin_lock(&pedf->slock); /* Reset inh_task to NULL. */ current->rt_param.inh_task = NULL; @@ -424,7 +424,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem) if (edf_preemption_needed(edf, current)) preempt(pedf); - spin_unlock(&pedf->slock); + raw_spin_unlock(&pedf->slock); return ret; -- cgit v1.2.2 From b21e106a3a9c1d74d92dc80e76fc4886d2a4e1aa Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Fri, 21 May 2010 14:35:12 -0400 Subject: Change APIs for sched_class and spinlock - get_rr_interval() changed signature - load_balance() and move_one_tak() are no longer needed - spinlock -> raw_spinlock --- litmus/sched_litmus.c | 37 ++++++++++--------------------------- 1 file changed, 10 insertions(+), 27 deletions(-) (limited to 'litmus') diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index c1fc7748e590..0cdf284eb9c2 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c @@ -60,7 +60,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) */ was_running = is_running(prev); mb(); - spin_unlock(&rq->lock); + raw_spin_unlock(&rq->lock); /* Don't race with a concurrent switch. This could deadlock in * the case of cross or circular migrations. It's the job of @@ -91,7 +91,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) next = NULL; /* bail out */ - spin_lock(&rq->lock); + raw_spin_lock(&rq->lock); return next; } } @@ -139,7 +139,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) next = NULL; } /* release the other CPU's runqueue, but keep ours */ - spin_unlock(&other_rq->lock); + raw_spin_unlock(&other_rq->lock); } if (next) { next->rt_param.stack_in_use = rq->cpu; @@ -150,7 +150,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) } static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, - int wakeup) + int wakeup, bool head) { if (wakeup) { sched_trace_task_resume(p); @@ -243,7 +243,7 @@ static void prio_changed_litmus(struct rq *rq, struct task_struct *p, { } -unsigned int get_rr_interval_litmus(struct task_struct *p) +unsigned int get_rr_interval_litmus(struct rq *rq, struct task_struct *p) { /* return infinity */ return 0; @@ -261,31 +261,16 @@ static void set_curr_task_litmus(struct rq *rq) #ifdef CONFIG_SMP -/* execve tries to rebalance task in this scheduling domain */ +/* execve tries to rebalance task in this scheduling domain. + * We don't care about the scheduling domain; can gets called from + * exec, fork, wakeup. + */ static int select_task_rq_litmus(struct task_struct *p, int sd_flag, int flags) { /* preemption is already disabled. * We don't want to change cpu here */ - return smp_processor_id(); -} - -/* we don't repartition at runtime */ - -static unsigned long -load_balance_litmus(struct rq *this_rq, int this_cpu, struct rq *busiest, - unsigned long max_load_move, - struct sched_domain *sd, enum cpu_idle_type idle, - int *all_pinned, int *this_best_prio) -{ - return 0; -} - -static int -move_one_task_litmus(struct rq *this_rq, int this_cpu, struct rq *busiest, - struct sched_domain *sd, enum cpu_idle_type idle) -{ - return 0; + return task_cpu(p); } #endif @@ -303,8 +288,6 @@ const struct sched_class litmus_sched_class = { #ifdef CONFIG_SMP .select_task_rq = select_task_rq_litmus, - .load_balance = load_balance_litmus, - .move_one_task = move_one_task_litmus, .pre_schedule = pre_schedule_litmus, #endif -- cgit v1.2.2 From 3280f21d43ee541f97f8cda5792150d2dbec20d5 Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Fri, 21 May 2010 14:37:06 -0400 Subject: Change API: kfifo and spinlock - kfifo needs to be defined and used differently (see include/linux/kfifo.h) - spinlock -> raw_spinlock Note that [linux/slab.h] should be now included when using kmalloc and friends --- litmus/sched_trace.c | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) (limited to 'litmus') diff --git a/litmus/sched_trace.c b/litmus/sched_trace.c index ad0b138d4b01..1fa2094b0495 100644 --- a/litmus/sched_trace.c +++ b/litmus/sched_trace.c @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -32,7 +33,7 @@ typedef struct { rwlock_t del_lock; /* the buffer */ - struct kfifo *kfifo; + struct kfifo kfifo; } ring_buffer_t; /* Main buffer structure */ @@ -49,25 +50,26 @@ typedef struct { void rb_init(ring_buffer_t* buf) { rwlock_init(&buf->del_lock); - buf->kfifo = NULL; } int rb_alloc_buf(ring_buffer_t* buf, unsigned int size) { unsigned long flags; + int ret = 0; write_lock_irqsave(&buf->del_lock, flags); - buf->kfifo = kfifo_alloc(size, GFP_ATOMIC, NULL); + /* kfifo size must be a power of 2 + * atm kfifo alloc is automatically rounding the size + */ + ret = kfifo_alloc(&buf->kfifo, size, GFP_ATOMIC); write_unlock_irqrestore(&buf->del_lock, flags); - if(IS_ERR(buf->kfifo)) { + if(ret < 0) printk(KERN_ERR "kfifo_alloc failed\n"); - return PTR_ERR(buf->kfifo); - } - return 0; + return ret; } int rb_free_buf(ring_buffer_t* buf) @@ -76,10 +78,8 @@ int rb_free_buf(ring_buffer_t* buf) write_lock_irqsave(&buf->del_lock, flags); - BUG_ON(!buf->kfifo); - kfifo_free(buf->kfifo); - - buf->kfifo = NULL; + BUG_ON(!kfifo_initialized(&buf->kfifo)); + kfifo_free(&buf->kfifo); write_unlock_irqrestore(&buf->del_lock, flags); @@ -98,12 +98,12 @@ int rb_put(ring_buffer_t* buf, char* mem, size_t len) read_lock_irqsave(&buf->del_lock, flags); - if (!buf->kfifo) { + if (!kfifo_initialized(&buf->kfifo)) { error = -ENODEV; goto out; } - if((__kfifo_put(buf->kfifo, mem, len)) < len) { + if((kfifo_in(&buf->kfifo, mem, len)) < len) { error = -ENOMEM; goto out; } @@ -120,12 +120,12 @@ int rb_get(ring_buffer_t* buf, char* mem, size_t len) int error = 0; read_lock_irqsave(&buf->del_lock, flags); - if (!buf->kfifo) { + if (!kfifo_initialized(&buf->kfifo)) { error = -ENODEV; goto out; } - error = __kfifo_get(buf->kfifo, (unsigned char*)mem, len); + error = kfifo_out(&buf->kfifo, (unsigned char*)mem, len); out: read_unlock_irqrestore(&buf->del_lock, flags); @@ -135,7 +135,7 @@ int rb_get(ring_buffer_t* buf, char* mem, size_t len) /* * Device Driver management */ -static spinlock_t log_buffer_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_RAW_SPINLOCK(log_buffer_lock); static trace_buffer_t log_buffer; static void init_log_buffer(void) @@ -170,12 +170,12 @@ void sched_trace_log_message(const char* fmt, ...) buf = __get_cpu_var(fmt_buffer); len = vscnprintf(buf, MSG_SIZE, fmt, args); - spin_lock(&log_buffer_lock); + raw_spin_lock(&log_buffer_lock); /* Don't copy the trailing null byte, we don't want null bytes * in a text file. */ rb_put(&log_buffer.buf, buf, len); - spin_unlock(&log_buffer_lock); + raw_spin_unlock(&log_buffer_lock); local_irq_restore(flags); va_end(args); @@ -265,8 +265,8 @@ static int log_open(struct inode *in, struct file *filp) filp->private_data = tbuf; printk(KERN_DEBUG - "sched_trace kfifo at 0x%p with buffer starting at: 0x%p\n", - tbuf->buf.kfifo, &((tbuf->buf.kfifo)->buffer)); + "sched_trace kfifo with buffer starting at: 0x%p\n", + (tbuf->buf.kfifo).buffer); /* override printk() */ trace_override++; -- cgit v1.2.2