aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-02-22 15:33:17 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2013-02-22 15:33:17 -0500
commita7e1b64848448d7c300288162e33004c59693510 (patch)
treec57d67d33f3743ec043e991d39afde24aaf3e7c0
parentd13125bb8be7c7031add37212c1553ad8ccc7c22 (diff)
parent5b8782ef8948c7aad808971f359401f1dc837c25 (diff)
Merge remote-tracking branch 'github/staging' into wip-2012.3-gpu-stage-merge
-rw-r--r--include/litmus/rt_param.h5
-rw-r--r--litmus/sched_gsn_edf.c8
-rw-r--r--litmus/sched_pfp.c44
-rw-r--r--litmus/sched_psn_edf.c9
-rw-r--r--litmus/sched_task_trace.c3
-rw-r--r--litmus/srp.c16
6 files changed, 80 insertions, 5 deletions
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index de20eff7cf71..8d37d6e050c9 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -348,6 +348,11 @@ struct rt_param {
348 unsigned int priority_boosted:1; 348 unsigned int priority_boosted:1;
349 /* If so, when did this start? */ 349 /* If so, when did this start? */
350 lt_t boost_start_time; 350 lt_t boost_start_time;
351
352 /* How many LITMUS^RT locks does the task currently hold/wait for? */
353 unsigned int num_locks_held;
354 /* How many PCP/SRP locks does the task currently hold/wait for? */
355 unsigned int num_local_locks_held;
351#endif 356#endif
352 357
353 /* user controlled parameters */ 358 /* user controlled parameters */
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 4589888fc652..46a10fa17044 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -1569,6 +1569,10 @@ int gsnedf_fmlp_lock(struct litmus_lock* l)
1569 if (!is_realtime(t)) 1569 if (!is_realtime(t))
1570 return -EPERM; 1570 return -EPERM;
1571 1571
1572 /* prevent nested lock acquisition --- not supported by FMLP */
1573 if (tsk_rt(t)->num_locks_held)
1574 return -EBUSY;
1575
1572 spin_lock_irqsave(&sem->wait.lock, flags); 1576 spin_lock_irqsave(&sem->wait.lock, flags);
1573 1577
1574 if (sem->owner) { 1578 if (sem->owner) {
@@ -1613,6 +1617,8 @@ int gsnedf_fmlp_lock(struct litmus_lock* l)
1613 spin_unlock_irqrestore(&sem->wait.lock, flags); 1617 spin_unlock_irqrestore(&sem->wait.lock, flags);
1614 } 1618 }
1615 1619
1620 tsk_rt(t)->num_locks_held++;
1621
1616 return 0; 1622 return 0;
1617} 1623}
1618 1624
@@ -1630,6 +1636,8 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l)
1630 goto out; 1636 goto out;
1631 } 1637 }
1632 1638
1639 tsk_rt(t)->num_locks_held--;
1640
1633 /* check if there are jobs waiting for this resource */ 1641 /* check if there are jobs waiting for this resource */
1634 next = __waitqueue_remove_first(&sem->wait); 1642 next = __waitqueue_remove_first(&sem->wait);
1635 if (next) { 1643 if (next) {
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
index fdcf8a41684a..56d5a61c25e4 100644
--- a/litmus/sched_pfp.c
+++ b/litmus/sched_pfp.c
@@ -571,6 +571,11 @@ int pfp_fmlp_lock(struct litmus_lock* l)
571 if (!is_realtime(t)) 571 if (!is_realtime(t))
572 return -EPERM; 572 return -EPERM;
573 573
574 /* prevent nested lock acquisition --- not supported by FMLP */
575 if (tsk_rt(t)->num_locks_held ||
576 tsk_rt(t)->num_local_locks_held)
577 return -EBUSY;
578
574 spin_lock_irqsave(&sem->wait.lock, flags); 579 spin_lock_irqsave(&sem->wait.lock, flags);
575 580
576 /* tie-break by this point in time */ 581 /* tie-break by this point in time */
@@ -615,6 +620,8 @@ int pfp_fmlp_lock(struct litmus_lock* l)
615 spin_unlock_irqrestore(&sem->wait.lock, flags); 620 spin_unlock_irqrestore(&sem->wait.lock, flags);
616 } 621 }
617 622
623 tsk_rt(t)->num_locks_held++;
624
618 return 0; 625 return 0;
619} 626}
620 627
@@ -632,6 +639,8 @@ int pfp_fmlp_unlock(struct litmus_lock* l)
632 goto out; 639 goto out;
633 } 640 }
634 641
642 tsk_rt(t)->num_locks_held--;
643
635 /* we lose the benefit of priority boosting */ 644 /* we lose the benefit of priority boosting */
636 645
637 unboost_priority(t); 646 unboost_priority(t);
@@ -807,6 +816,11 @@ int pfp_mpcp_lock(struct litmus_lock* l)
807 if (!is_realtime(t)) 816 if (!is_realtime(t))
808 return -EPERM; 817 return -EPERM;
809 818
819 /* prevent nested lock acquisition */
820 if (tsk_rt(t)->num_locks_held ||
821 tsk_rt(t)->num_local_locks_held)
822 return -EBUSY;
823
810 preempt_disable(); 824 preempt_disable();
811 825
812 if (sem->vspin) 826 if (sem->vspin)
@@ -857,6 +871,8 @@ int pfp_mpcp_lock(struct litmus_lock* l)
857 spin_unlock_irqrestore(&sem->wait.lock, flags); 871 spin_unlock_irqrestore(&sem->wait.lock, flags);
858 } 872 }
859 873
874 tsk_rt(t)->num_locks_held++;
875
860 return 0; 876 return 0;
861} 877}
862 878
@@ -874,6 +890,9 @@ int pfp_mpcp_unlock(struct litmus_lock* l)
874 goto out; 890 goto out;
875 } 891 }
876 892
893
894 tsk_rt(t)->num_locks_held--;
895
877 /* we lose the benefit of priority boosting */ 896 /* we lose the benefit of priority boosting */
878 897
879 unboost_priority(t); 898 unboost_priority(t);
@@ -1267,12 +1286,18 @@ int pfp_pcp_lock(struct litmus_lock* l)
1267 if (!is_realtime(t) || from != to) 1286 if (!is_realtime(t) || from != to)
1268 return -EPERM; 1287 return -EPERM;
1269 1288
1289 /* prevent nested lock acquisition in global critical section */
1290 if (tsk_rt(t)->num_locks_held)
1291 return -EBUSY;
1292
1270 preempt_disable(); 1293 preempt_disable();
1271 1294
1272 pcp_raise_ceiling(sem, eprio); 1295 pcp_raise_ceiling(sem, eprio);
1273 1296
1274 preempt_enable(); 1297 preempt_enable();
1275 1298
1299 tsk_rt(t)->num_local_locks_held++;
1300
1276 return 0; 1301 return 0;
1277} 1302}
1278 1303
@@ -1290,6 +1315,8 @@ int pfp_pcp_unlock(struct litmus_lock* l)
1290 goto out; 1315 goto out;
1291 } 1316 }
1292 1317
1318 tsk_rt(t)->num_local_locks_held--;
1319
1293 /* give it back */ 1320 /* give it back */
1294 pcp_lower_ceiling(sem); 1321 pcp_lower_ceiling(sem);
1295 1322
@@ -1310,7 +1337,9 @@ int pfp_pcp_open(struct litmus_lock* l, void* __user config)
1310 /* we need to know the real-time priority */ 1337 /* we need to know the real-time priority */
1311 return -EPERM; 1338 return -EPERM;
1312 1339
1313 if (get_user(cpu, (int*) config)) 1340 if (!config)
1341 cpu = get_partition(t);
1342 else if (get_user(cpu, (int*) config))
1314 return -EFAULT; 1343 return -EFAULT;
1315 1344
1316 /* make sure the resource location matches */ 1345 /* make sure the resource location matches */
@@ -1445,6 +1474,11 @@ int pfp_dpcp_lock(struct litmus_lock* l)
1445 if (!is_realtime(t)) 1474 if (!is_realtime(t))
1446 return -EPERM; 1475 return -EPERM;
1447 1476
1477 /* prevent nested lock accquisition */
1478 if (tsk_rt(t)->num_locks_held ||
1479 tsk_rt(t)->num_local_locks_held)
1480 return -EBUSY;
1481
1448 preempt_disable(); 1482 preempt_disable();
1449 1483
1450 /* Priority-boost ourself *before* we suspend so that 1484 /* Priority-boost ourself *before* we suspend so that
@@ -1461,6 +1495,8 @@ int pfp_dpcp_lock(struct litmus_lock* l)
1461 1495
1462 preempt_enable(); 1496 preempt_enable();
1463 1497
1498 tsk_rt(t)->num_locks_held++;
1499
1464 return 0; 1500 return 0;
1465} 1501}
1466 1502
@@ -1478,6 +1514,8 @@ int pfp_dpcp_unlock(struct litmus_lock* l)
1478 goto out; 1514 goto out;
1479 } 1515 }
1480 1516
1517 tsk_rt(t)->num_locks_held--;
1518
1481 home = sem->owner_cpu; 1519 home = sem->owner_cpu;
1482 1520
1483 /* give it back */ 1521 /* give it back */
@@ -1633,7 +1671,9 @@ static long pfp_allocate_lock(struct litmus_lock **lock, int type,
1633 1671
1634 case PCP_SEM: 1672 case PCP_SEM:
1635 /* Priority Ceiling Protocol */ 1673 /* Priority Ceiling Protocol */
1636 if (get_user(cpu, (int*) config)) 1674 if (!config)
1675 cpu = get_partition(current);
1676 else if (get_user(cpu, (int*) config))
1637 return -EFAULT; 1677 return -EFAULT;
1638 1678
1639 if (!cpu_online(cpu)) 1679 if (!cpu_online(cpu))
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 63fa6103882a..fabff1be9bba 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -434,6 +434,11 @@ int psnedf_fmlp_lock(struct litmus_lock* l)
434 if (!is_realtime(t)) 434 if (!is_realtime(t))
435 return -EPERM; 435 return -EPERM;
436 436
437 /* prevent nested lock acquisition --- not supported by FMLP */
438 if (tsk_rt(t)->num_locks_held ||
439 tsk_rt(t)->num_local_locks_held)
440 return -EBUSY;
441
437 spin_lock_irqsave(&sem->wait.lock, flags); 442 spin_lock_irqsave(&sem->wait.lock, flags);
438 443
439 if (sem->owner) { 444 if (sem->owner) {
@@ -474,6 +479,8 @@ int psnedf_fmlp_lock(struct litmus_lock* l)
474 spin_unlock_irqrestore(&sem->wait.lock, flags); 479 spin_unlock_irqrestore(&sem->wait.lock, flags);
475 } 480 }
476 481
482 tsk_rt(t)->num_locks_held++;
483
477 return 0; 484 return 0;
478} 485}
479 486
@@ -491,6 +498,8 @@ int psnedf_fmlp_unlock(struct litmus_lock* l)
491 goto out; 498 goto out;
492 } 499 }
493 500
501 tsk_rt(t)->num_locks_held--;
502
494 /* we lose the benefit of priority boosting */ 503 /* we lose the benefit of priority boosting */
495 504
496 unboost_priority(t); 505 unboost_priority(t);
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
index e863eaf41b96..8d75437e7771 100644
--- a/litmus/sched_task_trace.c
+++ b/litmus/sched_task_trace.c
@@ -16,6 +16,9 @@
16#include <litmus/feather_trace.h> 16#include <litmus/feather_trace.h>
17#include <litmus/ftdev.h> 17#include <litmus/ftdev.h>
18 18
19#ifdef CONFIG_SCHED_LITMUS_TRACEPOINT
20#define CREATE_TRACE_POINTS
21#endif
19 22
20#define NUM_EVENTS (1 << (CONFIG_SCHED_TASK_TRACE_SHIFT+11)) 23#define NUM_EVENTS (1 << (CONFIG_SCHED_TASK_TRACE_SHIFT+11))
21 24
diff --git a/litmus/srp.c b/litmus/srp.c
index 5ffdc9e7dc5b..e04088bf2cb4 100644
--- a/litmus/srp.c
+++ b/litmus/srp.c
@@ -98,11 +98,16 @@ static void srp_add_prio(struct srp* srp, struct srp_priority* prio)
98 98
99static int lock_srp_semaphore(struct litmus_lock* l) 99static int lock_srp_semaphore(struct litmus_lock* l)
100{ 100{
101 struct task_struct* t = current;
101 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock); 102 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
102 103
103 if (!is_realtime(current)) 104 if (!is_realtime(t))
104 return -EPERM; 105 return -EPERM;
105 106
107 /* prevent acquisition of local locks in global critical sections */
108 if (tsk_rt(t)->num_locks_held)
109 return -EBUSY;
110
106 preempt_disable(); 111 preempt_disable();
107 112
108 /* Update ceiling. */ 113 /* Update ceiling. */
@@ -111,9 +116,11 @@ static int lock_srp_semaphore(struct litmus_lock* l)
111 /* SRP invariant: all resources available */ 116 /* SRP invariant: all resources available */
112 BUG_ON(sem->owner != NULL); 117 BUG_ON(sem->owner != NULL);
113 118
114 sem->owner = current; 119 sem->owner = t;
115 TRACE_CUR("acquired srp 0x%p\n", sem); 120 TRACE_CUR("acquired srp 0x%p\n", sem);
116 121
122 tsk_rt(t)->num_local_locks_held++;
123
117 preempt_enable(); 124 preempt_enable();
118 125
119 return 0; 126 return 0;
@@ -121,12 +128,13 @@ static int lock_srp_semaphore(struct litmus_lock* l)
121 128
122static int unlock_srp_semaphore(struct litmus_lock* l) 129static int unlock_srp_semaphore(struct litmus_lock* l)
123{ 130{
131 struct task_struct* t = current;
124 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock); 132 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
125 int err = 0; 133 int err = 0;
126 134
127 preempt_disable(); 135 preempt_disable();
128 136
129 if (sem->owner != current) { 137 if (sem->owner != t) {
130 err = -EINVAL; 138 err = -EINVAL;
131 } else { 139 } else {
132 /* Determine new system priority ceiling for this CPU. */ 140 /* Determine new system priority ceiling for this CPU. */
@@ -138,6 +146,8 @@ static int unlock_srp_semaphore(struct litmus_lock* l)
138 /* Wake tasks on this CPU, if they exceed current ceiling. */ 146 /* Wake tasks on this CPU, if they exceed current ceiling. */
139 TRACE_CUR("released srp 0x%p\n", sem); 147 TRACE_CUR("released srp 0x%p\n", sem);
140 wake_up_all(&__get_cpu_var(srp).ceiling_blocked); 148 wake_up_all(&__get_cpu_var(srp).ceiling_blocked);
149
150 tsk_rt(t)->num_local_locks_held--;
141 } 151 }
142 152
143 preempt_enable(); 153 preempt_enable();