From 5b8782ef8948c7aad808971f359401f1dc837c25 Mon Sep 17 00:00:00 2001 From: Bjoern Brandenburg Date: Fri, 15 Feb 2013 16:44:42 +0100 Subject: Disallow nesting of LITMUS^RT locks Nesting of locks was never supported in LITMUS^RT since the required analysis does not exist anyway. That is, as defined in the literature, the protocols implemented in LITMUS^RT have not been studied in conjunction with nested critical sections. In LITMUS^RT, attempting to nest locks could lead to silent or not-so-silent bugs. This patch makes this restriction explicit and returns EBUSY when a process attempts to nest resources. This is enforced on a protocol-by-protocol basis, which means that adding protocols with support for nesting in future versions is not affected by this change. Exception: PCP and SRP resources may be nested, but not within global critical sections. --- litmus/sched_gsn_edf.c | 8 ++++++++ litmus/sched_pfp.c | 36 ++++++++++++++++++++++++++++++++++++ litmus/sched_psn_edf.c | 9 +++++++++ litmus/srp.c | 16 +++++++++++++--- 4 files changed, 66 insertions(+), 3 deletions(-) (limited to 'litmus') diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index b8548b885b35..8fdc8f68fcfb 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -773,6 +773,10 @@ int gsnedf_fmlp_lock(struct litmus_lock* l) if (!is_realtime(t)) return -EPERM; + /* prevent nested lock acquisition --- not supported by FMLP */ + if (tsk_rt(t)->num_locks_held) + return -EBUSY; + spin_lock_irqsave(&sem->wait.lock, flags); if (sem->owner) { @@ -817,6 +821,8 @@ int gsnedf_fmlp_lock(struct litmus_lock* l) spin_unlock_irqrestore(&sem->wait.lock, flags); } + tsk_rt(t)->num_locks_held++; + return 0; } @@ -834,6 +840,8 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l) goto out; } + tsk_rt(t)->num_locks_held--; + /* check if there are jobs waiting for this resource */ next = __waitqueue_remove_first(&sem->wait); if (next) { diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c index e3c3bd770efa..fc9a509f185d 100644 --- a/litmus/sched_pfp.c +++ b/litmus/sched_pfp.c @@ -555,6 +555,11 @@ int pfp_fmlp_lock(struct litmus_lock* l) if (!is_realtime(t)) return -EPERM; + /* prevent nested lock acquisition --- not supported by FMLP */ + if (tsk_rt(t)->num_locks_held || + tsk_rt(t)->num_local_locks_held) + return -EBUSY; + spin_lock_irqsave(&sem->wait.lock, flags); /* tie-break by this point in time */ @@ -599,6 +604,8 @@ int pfp_fmlp_lock(struct litmus_lock* l) spin_unlock_irqrestore(&sem->wait.lock, flags); } + tsk_rt(t)->num_locks_held++; + return 0; } @@ -616,6 +623,8 @@ int pfp_fmlp_unlock(struct litmus_lock* l) goto out; } + tsk_rt(t)->num_locks_held--; + /* we lose the benefit of priority boosting */ unboost_priority(t); @@ -790,6 +799,11 @@ int pfp_mpcp_lock(struct litmus_lock* l) if (!is_realtime(t)) return -EPERM; + /* prevent nested lock acquisition */ + if (tsk_rt(t)->num_locks_held || + tsk_rt(t)->num_local_locks_held) + return -EBUSY; + preempt_disable(); if (sem->vspin) @@ -840,6 +854,8 @@ int pfp_mpcp_lock(struct litmus_lock* l) spin_unlock_irqrestore(&sem->wait.lock, flags); } + tsk_rt(t)->num_locks_held++; + return 0; } @@ -857,6 +873,9 @@ int pfp_mpcp_unlock(struct litmus_lock* l) goto out; } + + tsk_rt(t)->num_locks_held--; + /* we lose the benefit of priority boosting */ unboost_priority(t); @@ -1249,12 +1268,18 @@ int pfp_pcp_lock(struct litmus_lock* l) if (!is_realtime(t) || from != to) return -EPERM; + /* prevent nested lock acquisition in global critical section */ + if (tsk_rt(t)->num_locks_held) + return -EBUSY; + preempt_disable(); pcp_raise_ceiling(sem, eprio); preempt_enable(); + tsk_rt(t)->num_local_locks_held++; + return 0; } @@ -1272,6 +1297,8 @@ int pfp_pcp_unlock(struct litmus_lock* l) goto out; } + tsk_rt(t)->num_local_locks_held--; + /* give it back */ pcp_lower_ceiling(sem); @@ -1428,6 +1455,11 @@ int pfp_dpcp_lock(struct litmus_lock* l) if (!is_realtime(t)) return -EPERM; + /* prevent nested lock accquisition */ + if (tsk_rt(t)->num_locks_held || + tsk_rt(t)->num_local_locks_held) + return -EBUSY; + preempt_disable(); /* Priority-boost ourself *before* we suspend so that @@ -1444,6 +1476,8 @@ int pfp_dpcp_lock(struct litmus_lock* l) preempt_enable(); + tsk_rt(t)->num_locks_held++; + return 0; } @@ -1461,6 +1495,8 @@ int pfp_dpcp_unlock(struct litmus_lock* l) goto out; } + tsk_rt(t)->num_locks_held--; + home = sem->owner_cpu; /* give it back */ diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index 0e1675d2e572..c158f3532ba6 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c @@ -419,6 +419,11 @@ int psnedf_fmlp_lock(struct litmus_lock* l) if (!is_realtime(t)) return -EPERM; + /* prevent nested lock acquisition --- not supported by FMLP */ + if (tsk_rt(t)->num_locks_held || + tsk_rt(t)->num_local_locks_held) + return -EBUSY; + spin_lock_irqsave(&sem->wait.lock, flags); if (sem->owner) { @@ -459,6 +464,8 @@ int psnedf_fmlp_lock(struct litmus_lock* l) spin_unlock_irqrestore(&sem->wait.lock, flags); } + tsk_rt(t)->num_locks_held++; + return 0; } @@ -476,6 +483,8 @@ int psnedf_fmlp_unlock(struct litmus_lock* l) goto out; } + tsk_rt(t)->num_locks_held--; + /* we lose the benefit of priority boosting */ unboost_priority(t); diff --git a/litmus/srp.c b/litmus/srp.c index 2ed4ec12a9d3..c88dbf2f580f 100644 --- a/litmus/srp.c +++ b/litmus/srp.c @@ -98,11 +98,16 @@ static void srp_add_prio(struct srp* srp, struct srp_priority* prio) static int lock_srp_semaphore(struct litmus_lock* l) { + struct task_struct* t = current; struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock); - if (!is_realtime(current)) + if (!is_realtime(t)) return -EPERM; + /* prevent acquisition of local locks in global critical sections */ + if (tsk_rt(t)->num_locks_held) + return -EBUSY; + preempt_disable(); /* Update ceiling. */ @@ -111,9 +116,11 @@ static int lock_srp_semaphore(struct litmus_lock* l) /* SRP invariant: all resources available */ BUG_ON(sem->owner != NULL); - sem->owner = current; + sem->owner = t; TRACE_CUR("acquired srp 0x%p\n", sem); + tsk_rt(t)->num_local_locks_held++; + preempt_enable(); return 0; @@ -121,12 +128,13 @@ static int lock_srp_semaphore(struct litmus_lock* l) static int unlock_srp_semaphore(struct litmus_lock* l) { + struct task_struct* t = current; struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock); int err = 0; preempt_disable(); - if (sem->owner != current) { + if (sem->owner != t) { err = -EINVAL; } else { /* Determine new system priority ceiling for this CPU. */ @@ -138,6 +146,8 @@ static int unlock_srp_semaphore(struct litmus_lock* l) /* Wake tasks on this CPU, if they exceed current ceiling. */ TRACE_CUR("released srp 0x%p\n", sem); wake_up_all(&__get_cpu_var(srp).ceiling_blocked); + + tsk_rt(t)->num_local_locks_held--; } preempt_enable(); -- cgit v1.2.2