aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2013-02-15 10:44:42 -0500
committerBjoern Brandenburg <bbb@mpi-sws.org>2013-02-15 10:44:42 -0500
commit5b8782ef8948c7aad808971f359401f1dc837c25 (patch)
treeb6ceca72779e3030c7c3ea102f4a2f72676851f9 /litmus
parent574cad9d2ac20c8df24aa008eecca39331df0bd6 (diff)
Disallow nesting of LITMUS^RT locks
Nesting of locks was never supported in LITMUS^RT since the required analysis does not exist anyway. That is, as defined in the literature, the protocols implemented in LITMUS^RT have not been studied in conjunction with nested critical sections. In LITMUS^RT, attempting to nest locks could lead to silent or not-so-silent bugs. This patch makes this restriction explicit and returns EBUSY when a process attempts to nest resources. This is enforced on a protocol-by-protocol basis, which means that adding protocols with support for nesting in future versions is not affected by this change. Exception: PCP and SRP resources may be nested, but not within global critical sections.
Diffstat (limited to 'litmus')
-rw-r--r--litmus/sched_gsn_edf.c8
-rw-r--r--litmus/sched_pfp.c36
-rw-r--r--litmus/sched_psn_edf.c9
-rw-r--r--litmus/srp.c16
4 files changed, 66 insertions, 3 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index b8548b885b35..8fdc8f68fcfb 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -773,6 +773,10 @@ int gsnedf_fmlp_lock(struct litmus_lock* l)
773 if (!is_realtime(t)) 773 if (!is_realtime(t))
774 return -EPERM; 774 return -EPERM;
775 775
776 /* prevent nested lock acquisition --- not supported by FMLP */
777 if (tsk_rt(t)->num_locks_held)
778 return -EBUSY;
779
776 spin_lock_irqsave(&sem->wait.lock, flags); 780 spin_lock_irqsave(&sem->wait.lock, flags);
777 781
778 if (sem->owner) { 782 if (sem->owner) {
@@ -817,6 +821,8 @@ int gsnedf_fmlp_lock(struct litmus_lock* l)
817 spin_unlock_irqrestore(&sem->wait.lock, flags); 821 spin_unlock_irqrestore(&sem->wait.lock, flags);
818 } 822 }
819 823
824 tsk_rt(t)->num_locks_held++;
825
820 return 0; 826 return 0;
821} 827}
822 828
@@ -834,6 +840,8 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l)
834 goto out; 840 goto out;
835 } 841 }
836 842
843 tsk_rt(t)->num_locks_held--;
844
837 /* check if there are jobs waiting for this resource */ 845 /* check if there are jobs waiting for this resource */
838 next = __waitqueue_remove_first(&sem->wait); 846 next = __waitqueue_remove_first(&sem->wait);
839 if (next) { 847 if (next) {
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
index e3c3bd770efa..fc9a509f185d 100644
--- a/litmus/sched_pfp.c
+++ b/litmus/sched_pfp.c
@@ -555,6 +555,11 @@ int pfp_fmlp_lock(struct litmus_lock* l)
555 if (!is_realtime(t)) 555 if (!is_realtime(t))
556 return -EPERM; 556 return -EPERM;
557 557
558 /* prevent nested lock acquisition --- not supported by FMLP */
559 if (tsk_rt(t)->num_locks_held ||
560 tsk_rt(t)->num_local_locks_held)
561 return -EBUSY;
562
558 spin_lock_irqsave(&sem->wait.lock, flags); 563 spin_lock_irqsave(&sem->wait.lock, flags);
559 564
560 /* tie-break by this point in time */ 565 /* tie-break by this point in time */
@@ -599,6 +604,8 @@ int pfp_fmlp_lock(struct litmus_lock* l)
599 spin_unlock_irqrestore(&sem->wait.lock, flags); 604 spin_unlock_irqrestore(&sem->wait.lock, flags);
600 } 605 }
601 606
607 tsk_rt(t)->num_locks_held++;
608
602 return 0; 609 return 0;
603} 610}
604 611
@@ -616,6 +623,8 @@ int pfp_fmlp_unlock(struct litmus_lock* l)
616 goto out; 623 goto out;
617 } 624 }
618 625
626 tsk_rt(t)->num_locks_held--;
627
619 /* we lose the benefit of priority boosting */ 628 /* we lose the benefit of priority boosting */
620 629
621 unboost_priority(t); 630 unboost_priority(t);
@@ -790,6 +799,11 @@ int pfp_mpcp_lock(struct litmus_lock* l)
790 if (!is_realtime(t)) 799 if (!is_realtime(t))
791 return -EPERM; 800 return -EPERM;
792 801
802 /* prevent nested lock acquisition */
803 if (tsk_rt(t)->num_locks_held ||
804 tsk_rt(t)->num_local_locks_held)
805 return -EBUSY;
806
793 preempt_disable(); 807 preempt_disable();
794 808
795 if (sem->vspin) 809 if (sem->vspin)
@@ -840,6 +854,8 @@ int pfp_mpcp_lock(struct litmus_lock* l)
840 spin_unlock_irqrestore(&sem->wait.lock, flags); 854 spin_unlock_irqrestore(&sem->wait.lock, flags);
841 } 855 }
842 856
857 tsk_rt(t)->num_locks_held++;
858
843 return 0; 859 return 0;
844} 860}
845 861
@@ -857,6 +873,9 @@ int pfp_mpcp_unlock(struct litmus_lock* l)
857 goto out; 873 goto out;
858 } 874 }
859 875
876
877 tsk_rt(t)->num_locks_held--;
878
860 /* we lose the benefit of priority boosting */ 879 /* we lose the benefit of priority boosting */
861 880
862 unboost_priority(t); 881 unboost_priority(t);
@@ -1249,12 +1268,18 @@ int pfp_pcp_lock(struct litmus_lock* l)
1249 if (!is_realtime(t) || from != to) 1268 if (!is_realtime(t) || from != to)
1250 return -EPERM; 1269 return -EPERM;
1251 1270
1271 /* prevent nested lock acquisition in global critical section */
1272 if (tsk_rt(t)->num_locks_held)
1273 return -EBUSY;
1274
1252 preempt_disable(); 1275 preempt_disable();
1253 1276
1254 pcp_raise_ceiling(sem, eprio); 1277 pcp_raise_ceiling(sem, eprio);
1255 1278
1256 preempt_enable(); 1279 preempt_enable();
1257 1280
1281 tsk_rt(t)->num_local_locks_held++;
1282
1258 return 0; 1283 return 0;
1259} 1284}
1260 1285
@@ -1272,6 +1297,8 @@ int pfp_pcp_unlock(struct litmus_lock* l)
1272 goto out; 1297 goto out;
1273 } 1298 }
1274 1299
1300 tsk_rt(t)->num_local_locks_held--;
1301
1275 /* give it back */ 1302 /* give it back */
1276 pcp_lower_ceiling(sem); 1303 pcp_lower_ceiling(sem);
1277 1304
@@ -1428,6 +1455,11 @@ int pfp_dpcp_lock(struct litmus_lock* l)
1428 if (!is_realtime(t)) 1455 if (!is_realtime(t))
1429 return -EPERM; 1456 return -EPERM;
1430 1457
1458 /* prevent nested lock accquisition */
1459 if (tsk_rt(t)->num_locks_held ||
1460 tsk_rt(t)->num_local_locks_held)
1461 return -EBUSY;
1462
1431 preempt_disable(); 1463 preempt_disable();
1432 1464
1433 /* Priority-boost ourself *before* we suspend so that 1465 /* Priority-boost ourself *before* we suspend so that
@@ -1444,6 +1476,8 @@ int pfp_dpcp_lock(struct litmus_lock* l)
1444 1476
1445 preempt_enable(); 1477 preempt_enable();
1446 1478
1479 tsk_rt(t)->num_locks_held++;
1480
1447 return 0; 1481 return 0;
1448} 1482}
1449 1483
@@ -1461,6 +1495,8 @@ int pfp_dpcp_unlock(struct litmus_lock* l)
1461 goto out; 1495 goto out;
1462 } 1496 }
1463 1497
1498 tsk_rt(t)->num_locks_held--;
1499
1464 home = sem->owner_cpu; 1500 home = sem->owner_cpu;
1465 1501
1466 /* give it back */ 1502 /* give it back */
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 0e1675d2e572..c158f3532ba6 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -419,6 +419,11 @@ int psnedf_fmlp_lock(struct litmus_lock* l)
419 if (!is_realtime(t)) 419 if (!is_realtime(t))
420 return -EPERM; 420 return -EPERM;
421 421
422 /* prevent nested lock acquisition --- not supported by FMLP */
423 if (tsk_rt(t)->num_locks_held ||
424 tsk_rt(t)->num_local_locks_held)
425 return -EBUSY;
426
422 spin_lock_irqsave(&sem->wait.lock, flags); 427 spin_lock_irqsave(&sem->wait.lock, flags);
423 428
424 if (sem->owner) { 429 if (sem->owner) {
@@ -459,6 +464,8 @@ int psnedf_fmlp_lock(struct litmus_lock* l)
459 spin_unlock_irqrestore(&sem->wait.lock, flags); 464 spin_unlock_irqrestore(&sem->wait.lock, flags);
460 } 465 }
461 466
467 tsk_rt(t)->num_locks_held++;
468
462 return 0; 469 return 0;
463} 470}
464 471
@@ -476,6 +483,8 @@ int psnedf_fmlp_unlock(struct litmus_lock* l)
476 goto out; 483 goto out;
477 } 484 }
478 485
486 tsk_rt(t)->num_locks_held--;
487
479 /* we lose the benefit of priority boosting */ 488 /* we lose the benefit of priority boosting */
480 489
481 unboost_priority(t); 490 unboost_priority(t);
diff --git a/litmus/srp.c b/litmus/srp.c
index 2ed4ec12a9d3..c88dbf2f580f 100644
--- a/litmus/srp.c
+++ b/litmus/srp.c
@@ -98,11 +98,16 @@ static void srp_add_prio(struct srp* srp, struct srp_priority* prio)
98 98
99static int lock_srp_semaphore(struct litmus_lock* l) 99static int lock_srp_semaphore(struct litmus_lock* l)
100{ 100{
101 struct task_struct* t = current;
101 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock); 102 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
102 103
103 if (!is_realtime(current)) 104 if (!is_realtime(t))
104 return -EPERM; 105 return -EPERM;
105 106
107 /* prevent acquisition of local locks in global critical sections */
108 if (tsk_rt(t)->num_locks_held)
109 return -EBUSY;
110
106 preempt_disable(); 111 preempt_disable();
107 112
108 /* Update ceiling. */ 113 /* Update ceiling. */
@@ -111,9 +116,11 @@ static int lock_srp_semaphore(struct litmus_lock* l)
111 /* SRP invariant: all resources available */ 116 /* SRP invariant: all resources available */
112 BUG_ON(sem->owner != NULL); 117 BUG_ON(sem->owner != NULL);
113 118
114 sem->owner = current; 119 sem->owner = t;
115 TRACE_CUR("acquired srp 0x%p\n", sem); 120 TRACE_CUR("acquired srp 0x%p\n", sem);
116 121
122 tsk_rt(t)->num_local_locks_held++;
123
117 preempt_enable(); 124 preempt_enable();
118 125
119 return 0; 126 return 0;
@@ -121,12 +128,13 @@ static int lock_srp_semaphore(struct litmus_lock* l)
121 128
122static int unlock_srp_semaphore(struct litmus_lock* l) 129static int unlock_srp_semaphore(struct litmus_lock* l)
123{ 130{
131 struct task_struct* t = current;
124 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock); 132 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
125 int err = 0; 133 int err = 0;
126 134
127 preempt_disable(); 135 preempt_disable();
128 136
129 if (sem->owner != current) { 137 if (sem->owner != t) {
130 err = -EINVAL; 138 err = -EINVAL;
131 } else { 139 } else {
132 /* Determine new system priority ceiling for this CPU. */ 140 /* Determine new system priority ceiling for this CPU. */
@@ -138,6 +146,8 @@ static int unlock_srp_semaphore(struct litmus_lock* l)
138 /* Wake tasks on this CPU, if they exceed current ceiling. */ 146 /* Wake tasks on this CPU, if they exceed current ceiling. */
139 TRACE_CUR("released srp 0x%p\n", sem); 147 TRACE_CUR("released srp 0x%p\n", sem);
140 wake_up_all(&__get_cpu_var(srp).ceiling_blocked); 148 wake_up_all(&__get_cpu_var(srp).ceiling_blocked);
149
150 tsk_rt(t)->num_local_locks_held--;
141 } 151 }
142 152
143 preempt_enable(); 153 preempt_enable();