aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2012-02-02 04:40:07 -0500
committerBjoern Brandenburg <bbb@mpi-sws.org>2012-02-02 04:40:07 -0500
commit532478450c1e0a25ab92b2585a8c3ee61089d02b (patch)
treea759a7bd33eb3742a4b9e4901077feac3526f5c9
Import refactored schedcat codebase
Based on the code available at http://www.cs.unc.edu/~bbb/diss, this is a cleaned-up version of schedcat intended to serve as the basis for all further development. Notable improvements: more unit tests and a cleaner module structure.
-rw-r--r--.gitignore29
-rw-r--r--Makefile48
-rw-r--r--native/SConstruct135
-rw-r--r--native/include/edf/baker.h21
-rw-r--r--native/include/edf/baruah.h29
-rw-r--r--native/include/edf/bcl.h21
-rw-r--r--native/include/edf/bcl_iterative.h21
-rw-r--r--native/include/edf/ffdbf.h28
-rw-r--r--native/include/edf/gedf.h32
-rw-r--r--native/include/edf/gfb.h16
-rw-r--r--native/include/edf/load.h19
-rw-r--r--native/include/edf/rta.h33
-rw-r--r--native/include/edf/sim.h25
-rw-r--r--native/include/event.h47
-rw-r--r--native/include/math-helper.h20
-rw-r--r--native/include/res_io.h8
-rw-r--r--native/include/schedulability.h11
-rw-r--r--native/include/schedule_sim.h315
-rw-r--r--native/include/sharedres.h418
-rw-r--r--native/include/stl-helper.h31
-rw-r--r--native/include/task_io.h6
-rw-r--r--native/include/tasks.h200
-rw-r--r--native/interface/locking.i31
-rw-r--r--native/interface/sched.i39
-rw-r--r--native/interface/sim.i20
-rw-r--r--native/src/edf/baker.cpp68
-rw-r--r--native/src/edf/baruah.cpp315
-rw-r--r--native/src/edf/bcl.cpp82
-rw-r--r--native/src/edf/bcl_iterative.cpp105
-rw-r--r--native/src/edf/ffdbf.cpp298
-rw-r--r--native/src/edf/gedf.cpp54
-rw-r--r--native/src/edf/gfb.cpp24
-rw-r--r--native/src/edf/load.cpp48
-rw-r--r--native/src/edf/rta.cpp156
-rw-r--r--native/src/edf/sim.cpp103
-rw-r--r--native/src/schedule_sim.cpp67
-rw-r--r--native/src/sharedres.cpp2007
-rw-r--r--native/src/tasks.cpp232
-rw-r--r--native/src/testmain.cpp912
-rw-r--r--schedcat/__init__.py3
-rw-r--r--schedcat/generator/__init__.py0
-rw-r--r--schedcat/generator/tasks.py119
-rw-r--r--schedcat/generator/tasksets.py91
-rw-r--r--schedcat/locking/__init__.py0
-rw-r--r--schedcat/locking/bounds.py158
-rw-r--r--schedcat/mapping/__init__.py3
-rw-r--r--schedcat/mapping/binpack.py144
-rw-r--r--schedcat/mapping/rollback.py380
-rw-r--r--schedcat/model/__init__.py0
-rw-r--r--schedcat/model/resources.py54
-rw-r--r--schedcat/model/serialize.py191
-rw-r--r--schedcat/model/tasks.py147
-rw-r--r--schedcat/overheads/__init__.py0
-rw-r--r--schedcat/overheads/fp.py61
-rw-r--r--schedcat/overheads/jlfp.py84
-rw-r--r--schedcat/overheads/locking.py141
-rw-r--r--schedcat/overheads/model.py112
-rw-r--r--schedcat/overheads/pfair.py58
-rw-r--r--schedcat/overheads/quanta.py77
-rw-r--r--schedcat/sched/__init__.py23
-rw-r--r--schedcat/sched/edf/__init__.py129
-rw-r--r--schedcat/sched/edf/bak.py30
-rw-r--r--schedcat/sched/edf/bar.py108
-rw-r--r--schedcat/sched/edf/bcl.py30
-rw-r--r--schedcat/sched/edf/bcl_iterative.py62
-rw-r--r--schedcat/sched/edf/da.py94
-rw-r--r--schedcat/sched/edf/ffdbf.py131
-rw-r--r--schedcat/sched/edf/gfb.py12
-rw-r--r--schedcat/sched/edf/rta.py88
-rw-r--r--schedcat/sched/fp/__init__.py8
-rw-r--r--schedcat/sched/fp/rta.py69
-rw-r--r--schedcat/sched/pfair.py28
-rw-r--r--schedcat/sim/__init__.py10
-rw-r--r--schedcat/sim/edf.py17
-rw-r--r--schedcat/util/__init__.py3
-rw-r--r--schedcat/util/csv.py59
-rw-r--r--schedcat/util/iter.py44
-rw-r--r--schedcat/util/math.py143
-rw-r--r--schedcat/util/quantor.py16
-rw-r--r--schedcat/util/storage.py41
-rw-r--r--schedcat/util/time.py23
-rw-r--r--tests/__init__.py0
-rw-r--r--tests/__main__.py36
-rw-r--r--tests/binpack.py368
-rw-r--r--tests/edf.py140
-rw-r--r--tests/fp.py40
-rw-r--r--tests/generator.py81
-rw-r--r--tests/locking.py487
-rw-r--r--tests/model.py139
-rw-r--r--tests/overheads.py863
-rw-r--r--tests/pfair.py48
-rw-r--r--tests/quanta.py105
-rw-r--r--tests/sim.py26
-rw-r--r--tests/util.py101
94 files changed, 11499 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..971247d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,29 @@
1.DS_Store
2*~
3*.pyc
4*.pyo
5ts
6\#*
7.\#*
8*.zapped
9*.sedbak*
10.sample_py_sequential
11*.o
12*.os
13*.dblite
14.coverage
15native/testmain
16native/interface/*_wrap.cc
17native/*.py
18native/*.so
19native/config.log
20native/.config
21native/.sconf_temp
22schedcat/locking/_locking.so
23schedcat/locking/native.py
24schedcat/sched/_sched.so
25schedcat/sched/native.py
26schedcat/sim/_sim.so
27schedcat/sim/native.py
28tmp/
29TAGS
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..09358d0
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,48 @@
1
2# default tools
3PYTHON ?= python
4SCONS ?= scons
5ETAGS ?= etags
6
7.PHONY: all cpp clean clean-links test links
8
9all: links
10
11NCPUS := $(shell getconf _NPROCESSORS_ONLN)
12
13cpp:
14 (cd native; $(SCONS) -j $(NCPUS) )
15
16links: clean-links cpp
17 cd schedcat/sched; ln -s ../../native/_sched.so; ln -s ../../native/sched.py native.py
18 cd schedcat/locking; ln -s ../../native/_locking.so; ln -s ../../native/locking.py native.py
19 cd schedcat/sim; ln -s ../../native/_sim.so; ln -s ../../native/sim.py native.py
20
21clean-links:
22 cd schedcat/sched; rm -f _sched.so native.py
23 cd schedcat/locking; rm -f _locking.so native.py
24 cd schedcat/sim; rm -f _sim.so native.py;
25
26clean: clean-links
27 find . -iname '*.py[oc]' -exec rm '{}' ';'
28 rm -rf TAGS tags native/config.log
29 (cd native; $(SCONS) -c)
30
31# run unit test suite
32test:
33 @echo "=== Running unit tests"
34 $(PYTHON) -m tests
35
36# Emacs Tags
37TAGS:
38 find . -type f -and -iname '*.py' | xargs ${ETAGS}
39 find cpp/include -type f -and -iname '*.h' | xargs ${ETAGS} -a
40 find cpp/src -type f -and -iname '*.cpp' | xargs ${ETAGS} -a
41 ${ETAGS} -l python -a run_exp
42
43# Vim Tags
44tags:
45 find . -type f -and -iname '*.py' | xargs ctags
46 find cpp/include -type f -and -iname '*.h' | xargs ctags -a
47 find cpp/src -type f -and -iname '*.cpp' | xargs ctags -a
48 ctags --language-force=Python -a run_exp
diff --git a/native/SConstruct b/native/SConstruct
new file mode 100644
index 0000000..797eaba
--- /dev/null
+++ b/native/SConstruct
@@ -0,0 +1,135 @@
1# -*-Python-*-
2#
3# Build system config for the C++ implementation of selected schedulability
4# tests and simulations. See the SCons documentation for details.
5
6Help("""
7Fast Test Module --- Schedulability Tests & Co in C++
8
9---
10The build depends on GMP and SWIG.
11""")
12
13DEBUG_FLAGS = [
14 '-Wall',
15 '-Wno-deprecated',
16# '-std=gnu++0x',
17# '-Werror',
18# '-pedantic',
19# '-g', # generate debug info
20# '-pg', # generate profiler info
21 ]
22
23OPT_FLAGS = [
24 '-O3',
25]
26
27INCLUDE_DIRS = [
28 'include/', # local headers
29 '${GMP_PATH}/include', # GNU MP Bignum Library
30 ]
31
32LIB_DIRS = [
33 '${GMP_PATH}/lib', # GNU MP Bignum Library
34]
35
36LOCAL_CONFIG_FILE = '.config'
37Help("Local variables are read from %s.\n" % LOCAL_CONFIG_FILE)
38
39##############################################################
40# Output helpers.
41
42def info(msg):
43 print "[II]", msg
44
45def abort(msg):
46 print "[EE]", msg
47 Exit(1)
48
49##############################################################
50# Build configuration.
51
52build_vars = Variables(LOCAL_CONFIG_FILE, ARGUMENTS)
53
54build_vars.AddVariables(
55 PathVariable('GMP_PATH', 'Where to find the GMP library.', '/usr')
56 )
57
58
59import os
60
61gmp = Environment(
62 variables = build_vars,
63 CXX = 'g++',
64 CPPPATH = INCLUDE_DIRS,
65 CXXFLAGS = DEBUG_FLAGS + OPT_FLAGS,
66 LINKFLAGS = DEBUG_FLAGS,
67 LIBS = ['gmp', 'gmpxx'],
68 LIBPATH = LIB_DIRS,
69
70 # use custom tools
71 ENV = {'PATH' : os.environ['PATH']},
72)
73
74Help("---\n\nBuild Variables:")
75Help(build_vars.GenerateHelpText(gmp))
76
77cleaning = gmp.GetOption('clean')
78help = gmp.GetOption('help')
79
80if not cleaning and not help:
81 conf = Configure(gmp)
82
83 if not conf.CheckCXX():
84 abort("C++ compiler missing!")
85
86 if not conf.CheckHeader('gmp.h'):
87 abort("GMP header is missing!")
88
89 if not conf.CheckCXXHeader('gmpxx.h'):
90 abort("GMP C++ header is missing!")
91
92 gmp = conf.Finish()
93
94import distutils.sysconfig
95
96fw_path, = distutils.sysconfig.get_config_vars('PYTHONFRAMEWORKPREFIX')
97if not fw_path:
98 fw_path = '/System/Library/Frameworks'
99
100swig = gmp.Clone(
101 SWIGFLAGS=['-python', '-c++', '-Wall', '-includeall', '-classic'],
102 SHLIBPREFIX="",
103 FRAMEWORKS=['Python'],
104 FRAMEWORKPATH=[fw_path],
105 SWIGOUTDIR='.',
106 SWIGPATH=INCLUDE_DIRS,
107)
108swig.Append(
109 CPPPATH=[distutils.sysconfig.get_python_inc()],
110)
111
112EDF_SRC = [
113 'src/edf/baker.cpp',
114 'src/edf/baruah.cpp',
115 'src/edf/gfb.cpp',
116 'src/edf/bcl.cpp',
117 'src/edf/bcl_iterative.cpp',
118 'src/edf/rta.cpp',
119 'src/edf/ffdbf.cpp',
120 'src/edf/gedf.cpp',
121 'src/edf/load.cpp',
122 ]
123
124SCHED_SRC = [
125 'src/edf/sim.cpp',
126 'src/schedule_sim.cpp'
127]
128
129gmp.Program('testmain', ['src/testmain.cpp',
130 'src/sharedres.cpp',
131 'src/tasks.cpp'] + EDF_SRC + SCHED_SRC)
132
133swig.SharedLibrary('_sched.so', ['src/tasks.cpp', 'interface/sched.i'] + EDF_SRC)
134swig.SharedLibrary('_locking.so', ['src/sharedres.cpp', 'interface/locking.i'])
135swig.SharedLibrary('_sim.so', ['src/tasks.cpp', 'interface/sim.i'] + SCHED_SRC) \ No newline at end of file
diff --git a/native/include/edf/baker.h b/native/include/edf/baker.h
new file mode 100644
index 0000000..c3716df
--- /dev/null
+++ b/native/include/edf/baker.h
@@ -0,0 +1,21 @@
1#ifndef BAKER_H
2#define BAKER_H
3
4class BakerGedf : public SchedulabilityTest
5{
6
7 private:
8 unsigned int m;
9
10 private:
11 void beta(const Task &t_i, const Task &t_k, const mpq_class &lambda_k,
12 mpq_class &beta_i);
13 bool is_task_schedulable(unsigned int k, const TaskSet &ts);
14
15 public:
16 BakerGedf(unsigned int num_processors) : m(num_processors) {};
17
18 bool is_schedulable(const TaskSet &ts, bool check_preconditions = true);
19};
20
21#endif
diff --git a/native/include/edf/baruah.h b/native/include/edf/baruah.h
new file mode 100644
index 0000000..dcd4f14
--- /dev/null
+++ b/native/include/edf/baruah.h
@@ -0,0 +1,29 @@
1#ifndef BARUAH_H
2#define BARUAH_H
3
4class BaruahGedf : public SchedulabilityTest
5{
6
7private:
8 unsigned int m;
9
10 bool is_task_schedulable(unsigned int k,
11 const TaskSet &ts,
12 const mpz_class &ilen,
13 mpz_class &i1,
14 mpz_class &sum,
15 mpz_class *idiff,
16 mpz_class **ptr);
17
18 void get_max_test_points(const TaskSet &ts, mpq_class& m_minus_u,
19 mpz_class* maxp);
20
21public:
22 BaruahGedf(unsigned int num_processors) : m(num_processors) {};
23
24 bool is_schedulable(const TaskSet &ts, bool check_preconditions = true);
25
26 static const float MINIMUM_SLACK;
27};
28
29#endif
diff --git a/native/include/edf/bcl.h b/native/include/edf/bcl.h
new file mode 100644
index 0000000..31bf1e3
--- /dev/null
+++ b/native/include/edf/bcl.h
@@ -0,0 +1,21 @@
1#ifndef BCL_H
2#define BCL_H
3
4class BCLGedf : public SchedulabilityTest
5{
6
7 private:
8 unsigned int m;
9
10 private:
11 unsigned long max_jobs_contained(const Task &t_i, const Task &t_k);
12 void beta(const Task &t_i, const Task &t_k, mpq_class &beta_i);
13 bool is_task_schedulable(unsigned int k, const TaskSet &ts);
14
15 public:
16 BCLGedf(unsigned int num_processors) : m(num_processors) {};
17
18 bool is_schedulable(const TaskSet &ts, bool check_preconditions = true);
19};
20
21#endif
diff --git a/native/include/edf/bcl_iterative.h b/native/include/edf/bcl_iterative.h
new file mode 100644
index 0000000..c5ee620
--- /dev/null
+++ b/native/include/edf/bcl_iterative.h
@@ -0,0 +1,21 @@
1#ifndef BCL_ITERATIVE_H
2#define BCL_ITERATIVE_H
3
4class BCLIterativeGedf : public SchedulabilityTest
5{
6
7 private:
8 unsigned int m;
9 unsigned int max_rounds;
10
11 bool slack_update(unsigned int k, const TaskSet &ts,
12 unsigned long *slack, bool &ok);
13
14 public:
15 BCLIterativeGedf(unsigned int num_processors, unsigned int max_rounds = 0)
16 : m(num_processors), max_rounds(max_rounds) {};
17
18 bool is_schedulable(const TaskSet &ts, bool check_preconditions = true);
19};
20
21#endif
diff --git a/native/include/edf/ffdbf.h b/native/include/edf/ffdbf.h
new file mode 100644
index 0000000..7e3dac6
--- /dev/null
+++ b/native/include/edf/ffdbf.h
@@ -0,0 +1,28 @@
1#ifndef FFDBF_H
2#define FFDBF_H
3
4class FFDBFGedf : public SchedulabilityTest
5{
6 private:
7 const unsigned int m;
8 const unsigned long epsilon_denom;
9 const mpq_class sigma_step;
10
11 private:
12 bool witness_condition(const TaskSet &ts,
13 const mpz_class q[], const mpq_class r[],
14 const mpq_class &time, const mpq_class &speed);
15
16 public:
17 FFDBFGedf(unsigned int num_processors,
18 unsigned long epsilon_denom = 10,
19 unsigned long sigma_granularity = 50)
20 : m(num_processors),
21 epsilon_denom(epsilon_denom),
22 sigma_step(1, sigma_granularity)
23 {};
24
25 bool is_schedulable(const TaskSet &ts, bool check_preconditions = true);
26};
27
28#endif
diff --git a/native/include/edf/gedf.h b/native/include/edf/gedf.h
new file mode 100644
index 0000000..63659e3
--- /dev/null
+++ b/native/include/edf/gedf.h
@@ -0,0 +1,32 @@
1#ifndef GEDF_H
2#define GEDF_h
3
4class GlobalEDF : public SchedulabilityTest
5{
6
7 private:
8 unsigned int m;
9 unsigned long rta_step;
10 bool want_ffdbf;
11 bool want_load;
12 bool want_baruah;
13 bool want_rta;
14
15 public:
16 GlobalEDF(unsigned int num_processors,
17 unsigned long rta_min_step = 1,
18 bool want_baruah = true,
19 bool want_rta = true,
20 bool want_ffdbf = false,
21 bool want_load = false)
22 : m(num_processors), rta_step(rta_min_step),
23 want_ffdbf(want_ffdbf),
24 want_load(want_load),
25 want_baruah(want_baruah),
26 want_rta(want_rta) {};
27
28 bool is_schedulable(const TaskSet &ts, bool check_preconditions = true);
29};
30
31
32#endif
diff --git a/native/include/edf/gfb.h b/native/include/edf/gfb.h
new file mode 100644
index 0000000..d7dc3cd
--- /dev/null
+++ b/native/include/edf/gfb.h
@@ -0,0 +1,16 @@
1#ifndef GFB_H
2#define GFB_H
3
4class GFBGedf : public SchedulabilityTest
5{
6 private:
7 unsigned int m;
8
9 public:
10 GFBGedf(unsigned int num_processors) : m(num_processors) {};
11
12 bool is_schedulable(const TaskSet &ts, bool check_preconditions = true);
13
14};
15
16#endif
diff --git a/native/include/edf/load.h b/native/include/edf/load.h
new file mode 100644
index 0000000..36c8f5a
--- /dev/null
+++ b/native/include/edf/load.h
@@ -0,0 +1,19 @@
1#ifndef LOAD_H
2#define LOAD_H
3
4class LoadGedf : public SchedulabilityTest
5{
6 private:
7 unsigned int m;
8 mpq_class epsilon;
9
10 public:
11 LoadGedf(unsigned int num_processors,
12 unsigned int milli_epsilon = 100
13 ) : m(num_processors), epsilon(milli_epsilon, 1000) {};
14
15 bool is_schedulable(const TaskSet &ts, bool check_preconditions = true);
16
17};
18
19#endif
diff --git a/native/include/edf/rta.h b/native/include/edf/rta.h
new file mode 100644
index 0000000..7a556b6
--- /dev/null
+++ b/native/include/edf/rta.h
@@ -0,0 +1,33 @@
1#ifndef RTA_H
2#define RTA_H
3
4class RTAGedf : public SchedulabilityTest
5{
6
7 private:
8 unsigned int m;
9 unsigned int max_rounds;
10 unsigned int min_delta;
11
12 bool response_estimate(unsigned int k,
13 const TaskSet &ts,
14 unsigned long const *slack,
15 unsigned long response,
16 unsigned long &new_response);
17
18 bool rta_fixpoint(unsigned int k,
19 const TaskSet &ts,
20 unsigned long const *slack,
21 unsigned long &response);
22
23 public:
24 RTAGedf(unsigned int num_processors,
25 unsigned int min_fixpoint_step = 0,
26 unsigned int max_rounds = 25)
27 : m(num_processors), max_rounds(max_rounds),
28 min_delta(min_fixpoint_step) {};
29
30 bool is_schedulable(const TaskSet &ts, bool check_preconditions = true);
31};
32
33#endif
diff --git a/native/include/edf/sim.h b/native/include/edf/sim.h
new file mode 100644
index 0000000..1867426
--- /dev/null
+++ b/native/include/edf/sim.h
@@ -0,0 +1,25 @@
1#ifndef EDF_SIM_H
2#define EDF_SIM_H
3
4struct Stats
5{
6 unsigned long num_tardy_jobs;
7 unsigned long num_ok_jobs;
8 unsigned long total_tardiness;
9 unsigned long max_tardiness;
10 unsigned long first_miss;
11};
12
13bool edf_misses_deadline(unsigned int num_procs,
14 TaskSet &ts,
15 unsigned long end_of_simulation);
16
17unsigned long edf_first_violation(unsigned int num_procs,
18 TaskSet &ts,
19 unsigned long end_of_simulation);
20
21Stats edf_observe_tardiness(unsigned int num_procs,
22 TaskSet &ts,
23 unsigned long end_of_simulation);
24
25#endif
diff --git a/native/include/event.h b/native/include/event.h
new file mode 100644
index 0000000..4636659
--- /dev/null
+++ b/native/include/event.h
@@ -0,0 +1,47 @@
1#ifndef EVENT_H
2#define EVENT_H
3
4#include <queue>
5
6template <class time_t>
7class Event
8{
9 public:
10 virtual void fire(const time_t &cur_time) {}; /* callback */
11};
12
13
14template <class time_t>
15class Timeout
16{
17 private:
18 time_t fire_time;
19 Event<time_t> *handler;
20
21 public:
22 Timeout(time_t when, Event<time_t> *what)
23 : fire_time(when), handler(what) {}
24
25 const time_t& time() const
26 {
27 return fire_time;
28 }
29
30 Event<time_t>& event() const
31 {
32 return *handler;
33 }
34
35 bool operator<(const Timeout<time_t> &that) const
36 {
37 return this->time() < that.time();
38 }
39
40 bool operator>(const Timeout<time_t> &that) const
41 {
42 return this->time() > that.time();
43 }
44};
45
46
47#endif
diff --git a/native/include/math-helper.h b/native/include/math-helper.h
new file mode 100644
index 0000000..10c42c2
--- /dev/null
+++ b/native/include/math-helper.h
@@ -0,0 +1,20 @@
1#ifndef MATH_HELPER_H
2#define MATH_HELPER_H
3
4static inline void mpq_truncate(mpq_class &val)
5{
6 val.get_num() -= val.get_num() % val.get_den();
7 val.canonicalize();
8}
9
10static inline unsigned long divide_with_ceil(unsigned long numer,
11 unsigned long denom)
12{
13 if (numer % denom == 0)
14 return numer / denom;
15 else
16 /* integer division computes implicit floor */
17 return (numer / denom) + 1;
18}
19
20#endif
diff --git a/native/include/res_io.h b/native/include/res_io.h
new file mode 100644
index 0000000..0be3c32
--- /dev/null
+++ b/native/include/res_io.h
@@ -0,0 +1,8 @@
1#ifndef RES_IO_H
2#define RES_IO_H
3
4std::ostream& operator<<(std::ostream &os, const RequestBound &rb);
5std::ostream& operator<<(std::ostream &os, const TaskInfo &ti);
6std::ostream& operator<<(std::ostream &os, const ResourceSharingInfo &rsi);
7
8#endif
diff --git a/native/include/schedulability.h b/native/include/schedulability.h
new file mode 100644
index 0000000..2c53d9d
--- /dev/null
+++ b/native/include/schedulability.h
@@ -0,0 +1,11 @@
1#ifndef SCHEDULABILITY_H
2#define SCHEDULABILITY_H
3
4class SchedulabilityTest
5{
6 public:
7 virtual bool is_schedulable(const TaskSet &ts,
8 bool check_preconditions = true) = 0;
9};
10
11#endif
diff --git a/native/include/schedule_sim.h b/native/include/schedule_sim.h
new file mode 100644
index 0000000..ef10814
--- /dev/null
+++ b/native/include/schedule_sim.h
@@ -0,0 +1,315 @@
1#ifndef SCHED_H
2#define SCHED_H
3
4#include "tasks.h"
5#include "event.h"
6#include <vector>
7#include <queue>
8#include <algorithm>
9
10typedef unsigned long simtime_t;
11
12class Job {
13 private:
14 const Task &task;
15 simtime_t release;
16 simtime_t cost;
17 simtime_t allocation;
18 simtime_t seqno;
19
20 public:
21 Job(const Task &tsk,
22 simtime_t relt = 0,
23 unsigned long sequence_no = 1,
24 simtime_t cost = 0);
25
26 const Task& get_task() const { return task; }
27 simtime_t get_release() const { return release; }
28 simtime_t get_deadline() const { return release + task.get_deadline(); }
29 simtime_t get_cost() const { return cost; }
30 simtime_t get_allocation() const { return allocation; }
31 unsigned long get_seqno() const { return seqno; }
32
33 void increase_allocation(simtime_t service_time)
34 {
35 allocation += service_time;
36 }
37
38 bool is_complete() const
39 {
40 return allocation >= cost;
41 }
42
43 simtime_t remaining_demand() const
44 {
45 return cost - allocation;
46 }
47
48 void init_next(simtime_t cost = 0,
49 simtime_t inter_arrival_time = 0);
50
51 // callbacks
52 virtual void completed(simtime_t when, int proc) {};
53};
54
55class SimJob;
56
57class ScheduleSimulation
58{
59 public:
60 virtual void simulate_until(simtime_t end_of_simulation) = 0;
61
62 virtual void add_release(SimJob *job) = 0;
63 virtual void add_ready(Job *job) = 0;
64};
65
66class SimJob : public Job, public Event<simtime_t>
67{
68 private:
69 ScheduleSimulation* sim;
70
71 public:
72 SimJob(Task& tsk, ScheduleSimulation* s = NULL) : Job(tsk), sim(s) {};
73
74 void set_simulation(ScheduleSimulation* s) { sim = s; }
75 ScheduleSimulation* get_sim() { return sim; }
76
77 void fire(const simtime_t &time)
78 {
79 sim->add_ready(this);
80 }
81};
82
83class PeriodicJobSequence : public SimJob
84{
85 public:
86 PeriodicJobSequence(Task& tsk) : SimJob(tsk) {};
87
88 // simulator callback
89 void completed(simtime_t when, int proc);
90};
91
92
93class EarliestDeadlineFirst {
94 public:
95 bool operator()(const Job* a, const Job* b)
96 {
97 if (a && b)
98 return a->get_deadline() > b->get_deadline();
99 else if (b && !a)
100 return true;
101 else
102 return false;
103 }
104};
105
106// periodic job sequence
107
108class Processor
109{
110 private:
111 Job* scheduled;
112
113 public:
114 Processor() : scheduled(NULL) {}
115
116 Job* get_scheduled() const { return scheduled; };
117 void schedule(Job* new_job) { scheduled = new_job; }
118
119 void idle() { scheduled = NULL; }
120
121 bool advance_time(simtime_t delta)
122 {
123 if (scheduled)
124 {
125 scheduled->increase_allocation(delta);
126 return scheduled->is_complete();
127 }
128 else
129 return false;
130 }
131};
132
133template <typename JobPriority>
134class PreemptionOrder
135{
136 public:
137 bool operator()(const Processor& a, const Processor& b)
138 {
139 JobPriority higher_prio;
140 return higher_prio(a.get_scheduled(), b.get_scheduled());
141 }
142};
143
144
145typedef std::priority_queue<Timeout<simtime_t>,
146 std::vector<Timeout<simtime_t> >,
147 std::greater<Timeout<simtime_t> >
148 > EventQueue;
149
150template <typename JobPriority>
151class GlobalScheduler : public ScheduleSimulation
152{
153 typedef std::priority_queue<Job*,
154 std::vector<Job*>,
155 JobPriority > ReadyQueue;
156
157 private:
158 EventQueue events;
159 ReadyQueue pending;
160 simtime_t current_time;
161
162 Processor* processors;
163 int num_procs;
164
165 JobPriority lower_prio;
166 PreemptionOrder<JobPriority> first_to_preempt;
167
168 Event<simtime_t> dummy;
169
170 bool aborted;
171
172 private:
173
174 void advance_time(simtime_t until)
175 {
176 simtime_t last = current_time;
177
178 current_time = until;
179
180 // 1) advance time until next event (job completion or event)
181 for (int i = 0; i < num_procs; i++)
182 if (processors[i].advance_time(current_time - last))
183 {
184 // process job completion
185 Job* sched = processors[i].get_scheduled();
186 processors[i].idle();
187 // notify simulation callback
188 job_completed(i, sched);
189 // nofity job callback
190 sched->completed(current_time, i);
191 }
192
193 // 2) process any pending events
194 while (!events.empty())
195 {
196 const Timeout<simtime_t>& next_event = events.top();
197
198 if (next_event.time() <= current_time)
199 {
200 next_event.event().fire(current_time);
201 events.pop();
202 }
203 else
204 // no more expired events
205 break;
206 }
207
208 // 3) process any required preemptions
209 bool all_checked = false;
210 while (!pending.empty() && !all_checked)
211 {
212 Job* highest_prio = pending.top();
213 Processor* lowest_prio_proc;
214
215 lowest_prio_proc = std::min_element(processors,
216 processors + num_procs,
217 first_to_preempt);
218 Job* scheduled = lowest_prio_proc->get_scheduled();
219
220 if (lower_prio(scheduled, highest_prio))
221 {
222 // do a preemption
223 pending.pop();
224
225
226 // schedule
227 lowest_prio_proc->schedule(highest_prio);
228
229 // notify simulation callback
230 job_scheduled(lowest_prio_proc - processors,
231 scheduled,
232 highest_prio);
233 if (scheduled && !scheduled->is_complete())
234 // add back into the pending queue
235 pending.push(scheduled);
236
237 // schedule job completion event
238 Timeout<simtime_t> ev(highest_prio->remaining_demand() +
239 current_time,
240 &dummy);
241 events.push(ev);
242 }
243 else
244 all_checked = true;
245 }
246 }
247
248 public:
249 GlobalScheduler(int num_procs)
250 {
251 aborted = false;
252 current_time = 0;
253 this->num_procs = num_procs;
254 processors = new Processor[num_procs];
255 }
256
257 virtual ~GlobalScheduler()
258 {
259 delete [] processors;
260 }
261
262 simtime_t get_current_time() { return current_time; }
263
264 void abort() { aborted = true; }
265
266 void simulate_until(simtime_t end_of_simulation)
267 {
268 while (current_time <= end_of_simulation &&
269 !aborted &&
270 !events.empty()) {
271 simtime_t next = events.top().time();
272 advance_time(next);
273 }
274 }
275
276 // Simulation event callback interface
277 virtual void job_released(Job *job) {};
278 virtual void job_completed(int proc,
279 Job *job) {};
280 virtual void job_scheduled(int proc,
281 Job *preempted,
282 Job *scheduled) {};
283
284 // ScheduleSimulation interface
285 void add_release(SimJob *job)
286 {
287 if (job->get_release() >= current_time)
288 {
289 // schedule future release
290 Timeout<simtime_t> rel(job->get_release(), job);
291 events.push(rel);
292 }
293 else
294 add_ready(job);
295 }
296
297 // ScheduleSimulation interface
298 void add_ready(Job *job)
299 {
300 // release immediately
301 pending.push(job);
302 // notify callback
303 job_released(job);
304 }
305
306};
307
308
309
310void run_periodic_simulation(ScheduleSimulation& sim,
311 TaskSet& ts,
312 simtime_t end_of_simulation);
313
314
315#endif
diff --git a/native/include/sharedres.h b/native/include/sharedres.h
new file mode 100644
index 0000000..2df9a08
--- /dev/null
+++ b/native/include/sharedres.h
@@ -0,0 +1,418 @@
1#ifndef SHAREDRES_H
2#define SHAREDRES_H
3
4#ifndef SWIG
5#include <limits.h>
6#include <assert.h>
7
8#include <vector>
9#include <algorithm>
10#endif
11
12typedef enum {
13 WRITE = 0,
14 READ = 1,
15} request_type_t;
16
17class TaskInfo;
18
19class RequestBound
20{
21private:
22 unsigned int resource_id;
23 unsigned int num_requests;
24 unsigned int request_length;
25 const TaskInfo* task;
26 request_type_t request_type;
27
28public:
29 RequestBound(unsigned int res_id,
30 unsigned int num,
31 unsigned int length,
32 const TaskInfo* tsk,
33 request_type_t type = WRITE)
34 : resource_id(res_id),
35 num_requests(num),
36 request_length(length),
37 task(tsk),
38 request_type(type)
39 {}
40
41 unsigned int get_max_num_requests(unsigned long interval) const;
42
43 unsigned int get_resource_id() const { return resource_id; }
44 unsigned int get_num_requests() const { return num_requests; }
45 unsigned int get_request_length() const { return request_length; }
46
47 request_type_t get_request_type() const { return request_type; }
48
49 bool is_read() const { return get_request_type() == READ; }
50 bool is_write() const { return get_request_type() == WRITE; }
51
52 const TaskInfo* get_task() const { return task; }
53};
54
55typedef std::vector<RequestBound> Requests;
56
57class TaskInfo
58{
59private:
60 unsigned int priority;
61 unsigned long period;
62 unsigned long response;
63 unsigned int cluster;
64
65 Requests requests;
66
67public:
68 TaskInfo(unsigned long _period,
69 unsigned long _response,
70 unsigned int _cluster,
71 unsigned int _priority)
72 : priority(_priority),
73 period(_period),
74 response(_response),
75 cluster(_cluster)
76 {}
77
78 void add_request(unsigned int res_id,
79 unsigned int num,
80 unsigned int length,
81 request_type_t type = WRITE)
82 {
83 requests.push_back(RequestBound(res_id, num, length, this, type));
84 }
85
86 const Requests& get_requests() const
87 {
88 return requests;
89 }
90
91 unsigned int get_priority() const { return priority; }
92 unsigned long get_period() const { return period; }
93 unsigned long get_response() const { return response; }
94 unsigned int get_cluster() const { return cluster; }
95
96 unsigned int get_num_arrivals() const
97 {
98 return get_total_num_requests() + 1; // one for the job release
99 }
100
101 unsigned int get_total_num_requests() const
102 {
103 unsigned int count = 0;
104 Requests::const_iterator it;
105 for (it = requests.begin();
106 it != requests.end();
107 it++)
108 count += it->get_num_requests();
109 return count;
110 }
111
112 unsigned int get_max_request_length() const
113 {
114 unsigned int len = 0;
115 Requests::const_iterator it;
116 for (it = requests.begin();
117 it != requests.end();
118 it++)
119 len = std::max(len, it->get_request_length());
120 return len;
121 }
122
123};
124
125typedef std::vector<TaskInfo> TaskInfos;
126
127class ResourceSharingInfo
128{
129private:
130 TaskInfos tasks;
131
132public:
133 ResourceSharingInfo(unsigned int num_tasks)
134 {
135 // Make sure all tasks will fit without re-allocation.
136 tasks.reserve(num_tasks);
137 }
138
139 const TaskInfos& get_tasks() const
140 {
141 return tasks;
142 }
143
144 void add_task(unsigned long period,
145 unsigned long response,
146 unsigned int cluster = 0,
147 unsigned int priority = UINT_MAX)
148 {
149 // Avoid re-allocation!
150 assert(tasks.size() < tasks.capacity());
151 tasks.push_back(TaskInfo(period, response, cluster, priority));
152 }
153
154 void add_request(unsigned int resource_id,
155 unsigned int max_num,
156 unsigned int max_length)
157 {
158 assert(!tasks.empty());
159
160 TaskInfo& last_added = tasks.back();
161 last_added.add_request(resource_id, max_num, max_length);
162 }
163
164 void add_request_rw(unsigned int resource_id,
165 unsigned int max_num,
166 unsigned int max_length,
167 int type)
168 {
169 assert(!tasks.empty());
170 assert(type == WRITE || type == READ);
171
172 TaskInfo& last_added = tasks.back();
173 last_added.add_request(resource_id, max_num, max_length, (request_type_t) type);
174 }
175
176};
177
178
179#define NO_CPU (-1)
180
181class ResourceLocality
182{
183private:
184 std::vector<int> mapping;
185
186public:
187 void assign_resource(unsigned int res_id, unsigned int processor)
188 {
189 while (mapping.size() <= res_id)
190 mapping.push_back(NO_CPU);
191
192 mapping[res_id] = processor;
193 }
194
195 const int operator[](unsigned int res_id) const
196 {
197 if (mapping.size() <= res_id)
198 return NO_CPU;
199 else
200 return mapping[res_id];
201 }
202
203};
204
205
206struct Interference
207{
208 unsigned int count;
209 unsigned long total_length;
210
211 Interference() : count(0), total_length(0) {}
212
213 Interference& operator+=(const Interference& other)
214 {
215 count += other.count;
216 total_length += other.total_length;
217 return *this;
218 }
219
220 Interference operator+(const Interference& other) const
221 {
222 Interference result;
223 result.count = this->count + other.count;
224 result.total_length = this->total_length + other.total_length;
225 return result;
226 }
227
228 bool operator<(const Interference& other) const
229 {
230 return total_length < other.total_length ||
231 (total_length == other.total_length
232 && count < other.count);
233 }
234};
235
236class BlockingBounds
237{
238private:
239 std::vector<Interference> blocking;
240 std::vector<Interference> request_span;
241 std::vector<Interference> arrival;
242 std::vector<Interference> remote;
243 std::vector<Interference> local;
244
245public:
246 BlockingBounds(unsigned int num_tasks)
247 : blocking(num_tasks),
248 request_span(num_tasks)
249 {}
250
251 BlockingBounds(const ResourceSharingInfo& info)
252 : blocking(info.get_tasks().size()),
253 request_span(info.get_tasks().size()),
254 arrival(info.get_tasks().size()),
255 remote(info.get_tasks().size()),
256 local(info.get_tasks().size())
257 {}
258
259 const Interference& operator[](unsigned int idx) const
260 {
261 assert( idx < size() );
262 return blocking[idx];
263 }
264
265 Interference& operator[](unsigned int idx)
266 {
267 assert( idx < size() );
268 return blocking[idx];
269 }
270
271 void raise_request_span(unsigned idx, const Interference& val)
272 {
273 assert( idx < size() );
274 request_span[idx] = std::max(request_span[idx], val);
275 }
276
277 const Interference& get_max_request_span(unsigned idx) const
278 {
279 assert( idx < size() );
280 return request_span[idx];
281 }
282
283 const size_t size() const
284 {
285 return blocking.size();
286 }
287
288 unsigned long get_blocking_term(unsigned int tsk_index) const
289 {
290 assert( tsk_index < blocking.size() );
291 return blocking[tsk_index].total_length;
292 }
293
294 unsigned long get_blocking_count(unsigned int tsk_index) const
295 {
296 assert( tsk_index < blocking.size() );
297 return blocking[tsk_index].count;
298 }
299
300 unsigned long get_span_term(unsigned int tsk_index) const
301 {
302 assert( tsk_index < blocking.size() );
303 return request_span[tsk_index].total_length;
304 }
305
306 unsigned long get_span_count(unsigned int tsk_index) const
307 {
308 assert( tsk_index < blocking.size() );
309 return request_span[tsk_index].count;
310 }
311
312
313 unsigned long get_remote_blocking(unsigned int tsk_index) const
314 {
315 assert( tsk_index < remote.size() );
316 return remote[tsk_index].total_length;
317 }
318
319 unsigned long get_remote_count(unsigned int tsk_index) const
320 {
321 assert( tsk_index < remote.size() );
322 return remote[tsk_index].count;
323 }
324
325 void set_remote_blocking(unsigned int tsk_index,
326 const Interference& inf)
327 {
328 assert( tsk_index < remote.size() );
329 remote[tsk_index] = inf;
330 }
331
332 unsigned long get_local_blocking(unsigned int tsk_index) const
333 {
334 assert( tsk_index < local.size() );
335 return local[tsk_index].total_length;
336 }
337
338 unsigned long get_local_count(unsigned int tsk_index) const
339 {
340 assert( tsk_index < local.size() );
341 return local[tsk_index].count;
342 }
343
344 void set_local_blocking(unsigned int tsk_index,
345 const Interference& inf)
346 {
347 assert( tsk_index < local.size() );
348 local[tsk_index] = inf;
349 }
350
351 unsigned long get_arrival_blocking(unsigned int tsk_index) const
352 {
353 assert( tsk_index < arrival.size() );
354 return arrival[tsk_index].total_length;
355 }
356
357 void set_arrival_blocking(unsigned int tsk_index,
358 const Interference& inf)
359 {
360 assert( tsk_index < arrival.size() );
361 arrival[tsk_index] = inf;
362 }
363};
364
365// spinlocks
366
367BlockingBounds* task_fair_mutex_bounds(const ResourceSharingInfo& info,
368 unsigned int procs_per_cluster,
369 int dedicated_irq = NO_CPU);
370
371BlockingBounds* task_fair_rw_bounds(const ResourceSharingInfo& info,
372 const ResourceSharingInfo& info_mtx,
373 unsigned int procs_per_cluster,
374 int dedicated_irq = NO_CPU);
375
376BlockingBounds* phase_fair_rw_bounds(const ResourceSharingInfo& info,
377 unsigned int procs_per_cluster,
378 int dedicated_irq = NO_CPU);
379
380// s-oblivious protocols
381
382BlockingBounds* global_omlp_bounds(const ResourceSharingInfo& info,
383 unsigned int num_procs);
384BlockingBounds* global_fmlp_bounds(const ResourceSharingInfo& info);
385
386BlockingBounds* clustered_omlp_bounds(const ResourceSharingInfo& info,
387 unsigned int procs_per_cluster,
388 int dedicated_irq = NO_CPU);
389
390BlockingBounds* clustered_rw_omlp_bounds(const ResourceSharingInfo& info,
391 unsigned int procs_per_cluster,
392 int dedicated_irq = NO_CPU);
393
394BlockingBounds* part_omlp_bounds(const ResourceSharingInfo& info);
395
396
397// s-aware protocols
398
399BlockingBounds* part_fmlp_bounds(const ResourceSharingInfo& info,
400 bool preemptive = true);
401
402BlockingBounds* mpcp_bounds(const ResourceSharingInfo& info,
403 bool use_virtual_spinning);
404
405BlockingBounds* dpcp_bounds(const ResourceSharingInfo& info,
406 const ResourceLocality& locality);
407
408// Still missing:
409// ==============
410
411// clustered_omlp_kex_bounds
412// clustered_omlp_rw_bounds
413// spin_rw_wpref_bounds
414// spin_rw_rpref_bounds
415
416
417
418#endif
diff --git a/native/include/stl-helper.h b/native/include/stl-helper.h
new file mode 100644
index 0000000..8aa3ac0
--- /dev/null
+++ b/native/include/stl-helper.h
@@ -0,0 +1,31 @@
1#ifndef STL_HELPER_H
2#define STL_HELPER_H
3
4// typeof() is a g++ extension
5#define foreach(collection, it) \
6 for (typeof(collection.begin()) it = (collection).begin(); \
7 it != (collection).end(); \
8 it++)
9
10#define enumerate(collection, it, i) \
11 for (typeof(collection.begin()) it = ({i = 0; (collection).begin();}); \
12 it != (collection).end(); \
13 it++, i++)
14
15#define apply_foreach(collection, fun, ...) \
16 foreach(collection, __apply_it_ ## collection) { \
17 fun(*__apply_it_ ## collection, ## __VA_ARGS__); \
18 }
19
20#define map_ref(from, to, init, fun, ...) \
21 { \
22 (to).clear(); \
23 (to).reserve((from).size()); \
24 foreach(from, __map_ref_it) { \
25 (to).push_back(init()); \
26 fun(*__map_ref_it, (to).back(), \
27 ## __VA_ARGS__); \
28 } \
29 }
30
31#endif
diff --git a/native/include/task_io.h b/native/include/task_io.h
new file mode 100644
index 0000000..2aa769d
--- /dev/null
+++ b/native/include/task_io.h
@@ -0,0 +1,6 @@
1#ifndef TASK_IO_H
2#define TASK_IO_H
3
4std::ostream& operator<<(std::ostream &os, const Task &t);
5
6#endif
diff --git a/native/include/tasks.h b/native/include/tasks.h
new file mode 100644
index 0000000..175206a
--- /dev/null
+++ b/native/include/tasks.h
@@ -0,0 +1,200 @@
1#ifndef TASKS_H
2#define TASKS_H
3
4#ifndef SWIG
5
6#include <vector>
7#include <algorithm>
8
9#include <gmpxx.h>
10
11#include <math.h>
12
13#endif
14
15class Task
16{
17 private:
18 unsigned long period;
19 unsigned long wcet;
20 unsigned long deadline;
21
22 public:
23
24 /* construction and initialization */
25 void init(unsigned long wcet, unsigned long period, unsigned long deadline = 0);
26 Task(unsigned long wcet = 0,
27 unsigned long period = 0,
28 unsigned long deadline = 0) { init(wcet, period, deadline); }
29
30 /* getter / setter */
31 unsigned long get_period() const { return period; }
32 unsigned long get_wcet() const { return wcet; }
33 /* defaults to implicit deadline */
34 unsigned long get_deadline() const {return deadline; }
35
36 void set_period(unsigned long period) { this->period = period; }
37 void set_wcet(unsigned long wcet) { this->wcet = wcet; }
38 void set_deadline(unsigned long deadline) { this->deadline = deadline; }
39
40 /* properties */
41 bool has_implicit_deadline() const;
42 bool has_constrained_deadline() const;
43 bool is_feasible() const;
44
45
46 void get_utilization(mpq_class &util) const;
47 void get_density(mpq_class &density) const;
48
49 // Demand bound function (DBF) and LOAD support.
50 // This implements Fisher, Baker, and Baruah's PTAS
51
52 unsigned long bound_demand(unsigned long time) const
53 {
54 if (time <= deadline)
55 return 0;
56 else
57 {
58 unsigned long jobs;
59
60 time -= deadline;
61 jobs = time / period; // implicit floor in integer division
62 jobs += 1;
63 return jobs * wcet;
64 }
65 }
66
67 void bound_demand(const mpz_class &time, mpz_class &demand) const
68 {
69 if (time <= deadline)
70 demand = 0;
71 else
72 {
73 demand = time;
74 demand -= deadline;
75
76 demand /= period; // implicit floor in integer division
77 demand += 1;
78 demand *= wcet;
79 }
80 }
81
82 void bound_load(const mpz_class &time, mpq_class &load) const
83 {
84 mpz_class demand;
85
86 if (time > 0)
87 {
88 bound_demand(time, demand);
89 load = demand;
90 load /= time;
91 }
92 else
93 load = 0;
94 }
95
96 unsigned long approx_demand(unsigned long time, unsigned int k) const
97 {
98 if (time < k * period + deadline)
99 return bound_demand(time);
100 else
101 {
102 double approx = time - deadline;
103 approx *= wcet;
104 approx /= period;
105
106 return wcet + (unsigned long) ceil(approx);
107 }
108 }
109
110 void approx_demand(const mpz_class &time, mpz_class &demand,
111 unsigned int k) const
112 {
113 if (time < k * period + deadline)
114 bound_demand(time, demand);
115 else
116 {
117 mpz_class approx;
118
119 approx = time;
120 approx -= deadline;
121 approx *= wcet;
122
123 mpz_cdiv_q_ui(demand.get_mpz_t(), approx.get_mpz_t(), period);
124
125 demand += wcet;
126 }
127 }
128
129 void approx_load(const mpz_class &time, mpq_class &load,
130 unsigned int k) const
131 {
132 mpz_class demand;
133
134 if (time > 0)
135 {
136 approx_demand(time, demand, k);
137 load = demand;
138 load /= time;
139 }
140 else
141 load = 0;
142 }
143};
144
145typedef std::vector<Task> Tasks;
146
147class TaskSet
148{
149 private:
150 Tasks tasks;
151
152 unsigned long k_for_epsilon(unsigned int idx, const mpq_class &epsilon) const;
153
154 public:
155 TaskSet();
156 TaskSet(const TaskSet &original);
157 virtual ~TaskSet();
158
159 void add_task(unsigned long wcet, unsigned long period, unsigned long deadline = 0)
160 {
161 tasks.push_back(Task(wcet, period, deadline));
162 }
163
164 unsigned int get_task_count() const { return tasks.size(); }
165
166 Task& operator[](int idx) { return tasks[idx]; }
167
168 const Task& operator[](int idx) const { return tasks[idx]; }
169
170 bool has_only_implicit_deadlines() const;
171 bool has_only_constrained_deadlines() const;
172 bool has_only_feasible_tasks() const;
173 bool is_not_overutilized(unsigned int num_processors) const;
174
175 void get_utilization(mpq_class &util) const;
176 void get_density(mpq_class &density) const;
177 void get_max_density(mpq_class &max_density) const;
178
179 void approx_load(mpq_class &load, const mpq_class &epsilon = 0.1) const;
180
181 /* wrapper for Python access */
182 unsigned long get_period(unsigned int idx) const
183 {
184 return tasks[idx].get_period();
185 }
186
187 unsigned long get_wcet(unsigned int idx) const
188 {
189 return tasks[idx].get_wcet();
190 }
191
192 unsigned long get_deadline(unsigned int idx) const
193 {
194 return tasks[idx].get_deadline();
195 }
196};
197
198
199
200#endif
diff --git a/native/interface/locking.i b/native/interface/locking.i
new file mode 100644
index 0000000..d632f05
--- /dev/null
+++ b/native/interface/locking.i
@@ -0,0 +1,31 @@
1%module locking
2%{
3#define SWIG_FILE_WITH_INIT
4#include "sharedres.h"
5%}
6
7%newobject task_fair_mutex_bounds;
8%newobject task_fair_rw_bounds;
9%newobject phase_fair_rw_bounds;
10
11%newobject global_omlp_bounds;
12%newobject global_fmlp_bounds;
13%newobject part_omlp_bounds;
14%newobject clustered_omlp_bounds;
15%newobject clustered_rw_omlp_bounds;
16
17%newobject part_fmlp_bounds;
18%newobject mpcp_bounds;
19%newobject dpcp_bounds;
20
21%ignore Interference;
22%ignore RequestBound;
23%ignore TaskInfo;
24
25%ignore ResourceSharingInfo::get_tasks;
26
27%ignore BlockingBounds::raise_request_span;
28%ignore BlockingBounds::get_max_request_span;
29
30#include "sharedres.h"
31
diff --git a/native/interface/sched.i b/native/interface/sched.i
new file mode 100644
index 0000000..9762471
--- /dev/null
+++ b/native/interface/sched.i
@@ -0,0 +1,39 @@
1%module sched
2%{
3#define SWIG_FILE_WITH_INIT
4#include "tasks.h"
5#include "schedulability.h"
6#include "edf/baker.h"
7#include "edf/gfb.h"
8#include "edf/baruah.h"
9#include "edf/bcl.h"
10#include "edf/bcl_iterative.h"
11#include "edf/rta.h"
12#include "edf/ffdbf.h"
13#include "edf/load.h"
14#include "edf/gedf.h"
15#include "sharedres.h"
16%}
17
18%ignore Task::get_utilization(mpq_class &util) const;
19%ignore Task::get_density(mpq_class &density) const;
20%ignore Task::bound_demand(const mpz_class &time, mpz_class &demand) const;
21%ignore Task::bound_load const;
22%ignore Task::approx_demand const;
23
24%ignore TaskSet::get_utilization const;
25%ignore TaskSet::get_density const;
26%ignore TaskSet::get_max_density const;
27%ignore TaskSet::approx_load const;
28
29#include "tasks.h"
30#include "schedulability.h"
31#include "edf/baker.h"
32#include "edf/gfb.h"
33#include "edf/baruah.h"
34#include "edf/bcl.h"
35#include "edf/bcl_iterative.h"
36#include "edf/rta.h"
37#include "edf/ffdbf.h"
38#include "edf/load.h"
39#include "edf/gedf.h"
diff --git a/native/interface/sim.i b/native/interface/sim.i
new file mode 100644
index 0000000..e723dda
--- /dev/null
+++ b/native/interface/sim.i
@@ -0,0 +1,20 @@
1%module sim
2%{
3#define SWIG_FILE_WITH_INIT
4#include "tasks.h"
5#include "edf/sim.h"
6%}
7
8%ignore Task::get_utilization(mpq_class &util) const;
9%ignore Task::get_density(mpq_class &density) const;
10%ignore Task::bound_demand(const mpz_class &time, mpz_class &demand) const;
11%ignore Task::bound_load const;
12%ignore Task::approx_demand const;
13
14%ignore TaskSet::get_utilization const;
15%ignore TaskSet::get_density const;
16%ignore TaskSet::get_max_density const;
17%ignore TaskSet::approx_load const;
18
19#include "tasks.h"
20#include "edf/sim.h"
diff --git a/native/src/edf/baker.cpp b/native/src/edf/baker.cpp
new file mode 100644
index 0000000..ca2b8cc
--- /dev/null
+++ b/native/src/edf/baker.cpp
@@ -0,0 +1,68 @@
1#include <algorithm> // for min
2
3#include "tasks.h"
4#include "schedulability.h"
5
6#include "edf/baker.h"
7
8using namespace std;
9
10void BakerGedf::beta(const Task &t_i, const Task &t_k,
11 const mpq_class &lambda_k,
12 mpq_class &beta_i)
13{
14 mpq_class u_i;
15
16 // XXX: possible improvement would be to pre-compute u_i
17 // instead of incurring quadratic u_i computations.
18 t_i.get_utilization(u_i);
19
20 beta_i = t_i.get_period() - t_i.get_deadline();
21 beta_i /= t_k.get_deadline();
22 beta_i += 1;
23 beta_i *= u_i;
24
25 if (lambda_k < u_i)
26 {
27 mpq_class tmp = t_i.get_wcet();
28 tmp -= lambda_k * t_i.get_period();
29 tmp /= t_k.get_deadline();
30 beta_i += tmp;
31 }
32}
33
34bool BakerGedf::is_task_schedulable(unsigned int k, const TaskSet &ts)
35{
36 mpq_class lambda, bound, beta_i, beta_sum = 0;
37 mpq_class one = 1;
38
39 ts[k].get_density(lambda);
40
41 bound = m * (1 - lambda) + lambda;
42
43 for (unsigned int i = 0; i < ts.get_task_count() && beta_sum <= bound; i++)
44 {
45 beta(ts[i], ts[k], lambda, beta_i);
46 beta_sum += min(beta_i, one);
47 }
48
49 return beta_sum <= bound;
50}
51
52bool BakerGedf::is_schedulable(const TaskSet &ts,
53 bool check_preconditions)
54{
55 if (check_preconditions)
56 {
57 if (!(ts.has_only_feasible_tasks() &&
58 ts.is_not_overutilized(m)))
59 return false;
60 }
61
62 for (unsigned int k = 0; k < ts.get_task_count(); k++)
63 if (!is_task_schedulable(k, ts))
64 return false;
65
66 return true;
67}
68
diff --git a/native/src/edf/baruah.cpp b/native/src/edf/baruah.cpp
new file mode 100644
index 0000000..a8d78d8
--- /dev/null
+++ b/native/src/edf/baruah.cpp
@@ -0,0 +1,315 @@
1#include <algorithm> // for greater
2#include <queue>
3#include <vector>
4
5#include "tasks.h"
6#include "schedulability.h"
7
8#include "edf/baruah.h"
9
10#include <iostream>
11#include "task_io.h"
12
13using namespace std;
14
15const float BaruahGedf::MINIMUM_SLACK = 0.01;
16
17static void demand_bound_function(const Task &tsk,
18 const mpz_class &t,
19 mpz_class &db)
20{
21 db = t;
22 db -= tsk.get_deadline();
23 if (db >= 0)
24 {
25 db /= tsk.get_period();
26 db += 1;
27 db *= tsk.get_wcet();
28 }
29 else
30 db = 0;
31}
32
33class DBFPointsOfChange
34{
35private:
36 mpz_class cur;
37 unsigned long pi; // period
38
39public:
40 void init(const Task& tsk_i, const Task& tsk_k)
41 {
42 init(tsk_k.get_deadline(), tsk_i.get_deadline(), tsk_i.get_period());
43 }
44
45 void init(unsigned long dk, unsigned long di, unsigned long pi)
46 {
47 this->pi = pi;
48
49 // cur = di - dk (without underflow!)
50 cur = di;
51 cur -= dk;
52 while (cur < 0)
53 next();
54 }
55
56 const mpz_class& get_cur() const
57 {
58 return cur;
59 }
60
61 void next()
62 {
63 cur += pi;
64 }
65};
66
67class DBFComparator {
68public:
69 bool operator() (DBFPointsOfChange *a, DBFPointsOfChange *b)
70 {
71 return b->get_cur() < a->get_cur();
72 }
73};
74
75typedef priority_queue<DBFPointsOfChange*,
76 vector<DBFPointsOfChange*>,
77 DBFComparator> DBFQueue;
78
79class AllDBFPointsOfChange
80{
81private:
82 DBFPointsOfChange *dbf;
83 DBFQueue queue;
84 mpz_class last;
85 mpz_class *upper_bound;
86
87public:
88 void init(const TaskSet &ts, int k, mpz_class* bound)
89 {
90 last = -1;
91 dbf = new DBFPointsOfChange[ts.get_task_count()];
92 for (unsigned int i = 0; i < ts.get_task_count(); i++)
93 {
94 dbf[i].init(ts[i], ts[k]);
95 queue.push(dbf + i);
96 }
97 upper_bound = bound;
98 }
99
100 ~AllDBFPointsOfChange()
101 {
102 delete[] dbf;
103 }
104
105 bool get_next(mpz_class &t)
106 {
107 if (last > *upper_bound)
108 return false;
109
110 DBFPointsOfChange* pt;
111 do // avoid duplicates
112 {
113 pt = queue.top();
114 queue.pop();
115 t = pt->get_cur();
116 pt->next();
117 queue.push(pt);
118 } while (t == last);
119 last = t;
120
121 return last <= *upper_bound;
122 }
123};
124
125static
126void interval1(unsigned int i, unsigned int k, const TaskSet &ts,
127 const mpz_class &ilen, mpz_class &i1)
128{
129 mpz_class dbf, tmp;
130 tmp = ilen + ts[k].get_deadline();
131 demand_bound_function(ts[i], tmp, dbf);
132 if (i == k)
133 i1 = min(mpz_class(dbf - ts[k].get_wcet()), ilen);
134 else
135 i1 = min(dbf,
136 mpz_class(ilen + ts[k].get_deadline() -
137 (ts[k].get_wcet() - 1)));
138}
139
140
141static void demand_bound_function_prime(const Task &tsk,
142 const mpz_class &t,
143 mpz_class &db)
144// carry-in scenario
145{
146 db = t;
147 db /= tsk.get_period();
148 db *= tsk.get_wcet();
149 db += min(mpz_class(tsk.get_wcet()), mpz_class(t % tsk.get_period()));
150}
151
152static void interval2(unsigned int i, unsigned int k, const TaskSet &ts,
153 const mpz_class &ilen, mpz_class &i2)
154{
155 mpz_class dbf, tmp;
156
157 tmp = ilen + ts[k].get_deadline();
158 demand_bound_function_prime(ts[i], tmp, dbf);
159 if (i == k)
160 i2 = min(mpz_class(dbf - ts[k].get_wcet()), ilen);
161 else
162 i2 = min(dbf,
163 mpz_class(ilen + ts[k].get_deadline() -
164 (ts[k].get_wcet() - 1)));
165}
166
167class MPZComparator {
168public:
169 bool operator() (mpz_class *a, mpz_class *b)
170 {
171 return *b < *a;
172 }
173};
174
175bool BaruahGedf::is_task_schedulable(unsigned int k,
176 const TaskSet &ts,
177 const mpz_class &ilen,
178 mpz_class &i1,
179 mpz_class &sum,
180 mpz_class *idiff,
181 mpz_class **ptr)
182{
183 mpz_class bound;
184 sum = 0;
185
186 for (unsigned int i = 0; i < ts.get_task_count(); i++)
187 {
188 interval1(i, k, ts, ilen, i1);
189 interval2(i, k, ts, ilen, idiff[i]);
190 sum += i1;
191 idiff[i] -= i1;
192 }
193
194 /* sort pointers to idiff to find largest idiff values */
195 sort(ptr, ptr + ts.get_task_count(), MPZComparator());
196
197 for (unsigned int i = 0; i < m - 1 && i < ts.get_task_count(); i++)
198 sum += *ptr[i];
199
200 bound = ilen + ts[k].get_deadline() - ts[k].get_wcet();
201 bound *= m;
202 return sum <= bound;
203}
204
205void BaruahGedf::get_max_test_points(const TaskSet &ts,
206 mpq_class &m_minus_u,
207 mpz_class* maxp)
208{
209 unsigned long* wcet = new unsigned long[ts.get_task_count()];
210
211 for (unsigned int i = 0; i < ts.get_task_count(); i++)
212 wcet[i] = ts[i].get_wcet();
213
214 sort(wcet, wcet + ts.get_task_count(), greater<unsigned long>());
215
216 mpq_class u, tdu_sum;
217 mpz_class csigma, mc;
218
219 csigma = 0;
220 for (unsigned int i = 0; i < m - 1 && i < ts.get_task_count(); i++)
221 csigma += wcet[i];
222
223 tdu_sum = 0;
224 for (unsigned int i = 0; i < ts.get_task_count(); i++)
225 {
226 ts[i].get_utilization(u);
227 tdu_sum += (ts[i].get_period() - ts[i].get_deadline()) * u;
228 }
229
230 for (unsigned int i = 0; i < ts.get_task_count(); i++)
231 {
232 mc = ts[i].get_wcet();
233 mc *= m;
234 mc += 0.124;
235 maxp[i] = (csigma - (ts[i].get_deadline() * m_minus_u) + tdu_sum + mc)
236 / m_minus_u;
237 }
238
239 delete wcet;
240}
241
242bool BaruahGedf::is_schedulable(const TaskSet &ts,
243 bool check_preconditions)
244{
245 if (check_preconditions)
246 {
247 if (!(ts.has_only_feasible_tasks() &&
248 ts.is_not_overutilized(m) &&
249 ts.has_only_constrained_deadlines()))
250 return false;
251
252 if (ts.get_task_count() == 0)
253 return true;
254 }
255
256 /* Always check for too-small rest utilizations, as they
257 * create unmanagably-large testing intervals. We'll just
258 * enforce a minimum slack threshold here. */
259 mpq_class m_minus_u;
260 ts.get_utilization(m_minus_u);
261 m_minus_u *= -1;
262 m_minus_u += m;
263
264 /*
265 if (m_minus_u < MINIMUM_SLACK) {
266 cerr << "# BaruahGedf: skipping task test; slack = " << m_minus_u
267 << endl;
268 return false;
269 }
270 */
271
272 mpz_class i1, sum;
273 mpz_class *max_test_point, *idiff;
274 mpz_class** ptr; // indirect access to idiff
275
276 idiff = new mpz_class[ts.get_task_count()];
277 max_test_point = new mpz_class[ts.get_task_count()];
278 ptr = new mpz_class*[ts.get_task_count()];
279 for (unsigned int i = 0; i < ts.get_task_count(); i++)
280 ptr[i] = idiff + i;
281
282 get_max_test_points(ts, m_minus_u, max_test_point);
283
284 mpz_class ilen;
285 bool point_in_range = true;
286 bool schedulable = true;
287
288 AllDBFPointsOfChange *all_pts;
289
290 all_pts = new AllDBFPointsOfChange[ts.get_task_count()];
291 for (unsigned int k = 0; k < ts.get_task_count() && schedulable; k++)
292 all_pts[k].init(ts, k, max_test_point + k);
293
294 // for every task for which point <= max_ak
295 while (point_in_range && schedulable)
296 {
297 point_in_range = false;
298 for (unsigned int k = 0; k < ts.get_task_count() && schedulable; k++)
299 if (all_pts[k].get_next(ilen))
300 {
301 schedulable = is_task_schedulable(k, ts, ilen, i1, sum,
302 idiff, ptr);
303 point_in_range = true;
304 }
305 }
306
307
308 delete[] all_pts;
309 delete[] max_test_point;
310 delete[] idiff;
311 delete[] ptr;
312
313 return schedulable;
314}
315
diff --git a/native/src/edf/bcl.cpp b/native/src/edf/bcl.cpp
new file mode 100644
index 0000000..8cc25d9
--- /dev/null
+++ b/native/src/edf/bcl.cpp
@@ -0,0 +1,82 @@
1#include <algorithm> // for min
2
3#include "tasks.h"
4#include "schedulability.h"
5
6#include "edf/bcl.h"
7
8using namespace std;
9
10unsigned long BCLGedf::max_jobs_contained(const Task &t_i, const Task &t_k)
11{
12 if (t_i.get_deadline() > t_k.get_deadline())
13 return 0;
14 else
15 return 1 + (t_k.get_deadline() - t_i.get_deadline()) / t_i.get_period();
16}
17
18void BCLGedf::beta(const Task &t_i, const Task &t_k, mpq_class &beta_i)
19{
20 unsigned long n = max_jobs_contained(t_i, t_k);
21
22 mpz_class c_i, tmp;
23
24 c_i = t_i.get_wcet();
25 tmp = t_i.get_period();
26 tmp *= n;
27 if (tmp < t_k.get_deadline())
28 // no risk of overflow
29 tmp = t_k.get_deadline() - n * t_i.get_period();
30 else
31 // test says zero is lower limit
32 tmp = 0;
33
34 beta_i = n * c_i;
35 beta_i += min(c_i, tmp);
36 beta_i /= t_k.get_deadline();
37}
38
39bool BCLGedf::is_task_schedulable(unsigned int k, const TaskSet &ts)
40{
41 mpq_class beta_i, beta_sum = 0;
42 mpq_class lambda_term;
43 bool small_beta_exists = false;
44
45 ts[k].get_density(lambda_term);
46 lambda_term *= -1;
47 lambda_term += 1;
48
49 for (unsigned int i = 0; i < ts.get_task_count(); i++)
50 {
51 if (i != k) {
52 beta(ts[i], ts[k], beta_i);
53 beta_sum += min(beta_i, lambda_term);
54 small_beta_exists = small_beta_exists ||
55 (0 < beta_i && beta_i <= lambda_term);
56 }
57 }
58
59 lambda_term *= m;
60
61 return beta_sum < lambda_term ||
62 (small_beta_exists && beta_sum == lambda_term);
63}
64
65bool BCLGedf::is_schedulable(const TaskSet &ts,
66 bool check_preconditions)
67{
68 if (check_preconditions)
69 {
70 if (!(ts.has_only_feasible_tasks() &&
71 ts.is_not_overutilized(m) &&
72 ts.has_only_constrained_deadlines()))
73 return false;
74 }
75
76 for (unsigned int k = 0; k < ts.get_task_count(); k++)
77 if (!is_task_schedulable(k, ts))
78 return false;
79
80 return true;
81}
82
diff --git a/native/src/edf/bcl_iterative.cpp b/native/src/edf/bcl_iterative.cpp
new file mode 100644
index 0000000..e70c0c4
--- /dev/null
+++ b/native/src/edf/bcl_iterative.cpp
@@ -0,0 +1,105 @@
1#include <algorithm>
2#include <assert.h>
3
4#include "tasks.h"
5#include "schedulability.h"
6
7#include "edf/bcl_iterative.h"
8
9using namespace std;
10
11static void interfering_workload(const Task &t_i,
12 const Task &t_k,
13 unsigned long slack,
14 mpz_class &inf)
15{
16 unsigned long njobs = t_k.get_deadline() / t_i.get_period();
17
18 inf = njobs;
19 inf *= t_i.get_wcet();
20
21 unsigned long tmp = slack + njobs * t_i.get_period();
22
23 if (t_k.get_deadline() >= tmp)
24 inf += min(t_i.get_wcet(), t_k.get_deadline() - tmp);
25 //else inf += min(t.get_wcet(), 0) // always null by definition.
26}
27
28bool BCLIterativeGedf::slack_update(unsigned int k,
29 const TaskSet &ts,
30 unsigned long *slack,
31 bool &has_slack)
32{
33 mpz_class other_work = 0;
34 mpz_class inf;
35 mpz_class inf_bound = ts[k].get_deadline() - ts[k].get_wcet() + 1;
36
37 for (unsigned int i = 0; i < ts.get_task_count(); i++)
38 if (k != i)
39 {
40 interfering_workload(ts[i], ts[k], slack[i], inf);
41 other_work += min(inf, inf_bound);
42 }
43 other_work /= m;
44 unsigned long tmp = ts[k].get_wcet() + other_work.get_ui();
45
46 assert( other_work.fits_ulong_p() );
47 assert (tmp > other_work.get_ui() );
48
49 has_slack = tmp <= ts[k].get_deadline();
50 if (!has_slack)
51 // negative slack => no update, always assume zero
52 return false;
53 else
54 {
55 tmp = ts[k].get_deadline() - tmp;
56 if (tmp > slack[k])
57 {
58 // better slack => update
59 slack[k] = tmp;
60 return true;
61 }
62 else
63 // no improvement
64 return false;
65 }
66}
67
68bool BCLIterativeGedf::is_schedulable(const TaskSet &ts,
69 bool check_preconditions)
70{
71 if (check_preconditions)
72 {
73 if (!(ts.has_only_feasible_tasks()
74 && ts.is_not_overutilized(m)
75 && ts.has_only_constrained_deadlines()))
76 return false;
77 if (ts.get_task_count() == 0)
78 return true;
79 }
80
81 unsigned long* slack = new unsigned long[ts.get_task_count()];
82
83 for (unsigned int i = 0; i < ts.get_task_count(); i++)
84 slack[i] = 0;
85
86 unsigned long round = 0;
87 bool schedulable = false;
88 bool updated = true;
89
90 while (updated && !schedulable && (max_rounds == 0 || round < max_rounds))
91 {
92 round++;
93 schedulable = true;
94 updated = false;
95 for (unsigned int k = 0; k < ts.get_task_count(); k++)
96 {
97 bool ok;
98 if (slack_update(k, ts, slack, ok))
99 updated = true;
100 schedulable = schedulable && ok;
101 }
102 }
103
104 return schedulable;
105}
diff --git a/native/src/edf/ffdbf.cpp b/native/src/edf/ffdbf.cpp
new file mode 100644
index 0000000..a28ea38
--- /dev/null
+++ b/native/src/edf/ffdbf.cpp
@@ -0,0 +1,298 @@
1#include <algorithm> // for min
2#include <queue>
3#include <vector>
4
5#include "tasks.h"
6#include "schedulability.h"
7#include "math-helper.h"
8
9#include "edf/ffdbf.h"
10
11#include <iostream>
12#include "task_io.h"
13
14using namespace std;
15
16static void get_q_r(const Task &t_i, const mpq_class &time,
17 mpz_class &q_i, mpq_class &r_i)
18{
19 // compute q_i -- floor(time / period)
20 // r_i -- time % period
21
22 r_i = time / t_i.get_period();
23 q_i = r_i; // truncate, i.e. implicit floor
24
25 r_i = time;
26 r_i -= q_i * t_i.get_period();
27}
28
29static void compute_q_r(const TaskSet &ts, const mpq_class &time,
30 mpz_class q[], mpq_class r[])
31{
32 for (unsigned int i = 0; i < ts.get_task_count(); i++)
33 get_q_r(ts[i], time, q[i], r[i]);
34}
35
36static void ffdbf(const Task &t_i,
37 const mpq_class &time, const mpq_class &speed,
38 const mpz_class &q_i, const mpq_class &r_i,
39 mpq_class &demand,
40 mpq_class &tmp)
41{
42 /* this is the cost in all three cases */
43 demand += q_i * t_i.get_wcet();
44
45 /* check for (a) and (b) cases */
46 tmp = 0;
47 tmp -= t_i.get_wcet();
48 tmp /= speed;
49 tmp += t_i.get_deadline();
50 if (r_i >= tmp)
51 {
52 // add one more cost charge
53 demand += t_i.get_wcet();
54
55 if (r_i <= t_i.get_deadline())
56 {
57 /* (b) class */
58 tmp = t_i.get_deadline();
59 tmp -= r_i;
60 tmp *= speed;
61 demand -= tmp;
62 }
63 }
64}
65
66static void ffdbf_ts(const TaskSet &ts,
67 const mpz_class q[], const mpq_class r[],
68 const mpq_class &time, const mpq_class &speed,
69 mpq_class &demand, mpq_class &tmp)
70{
71 demand = 0;
72 for (unsigned int i = 0; i < ts.get_task_count(); i++)
73 ffdbf(ts[i], time, speed, q[i], r[i], demand, tmp);
74}
75
76
77class TestPoints
78{
79private:
80 mpq_class time;
81 mpq_class with_offset;
82 unsigned long period;
83 bool first_point;
84
85public:
86 void init(const Task& t_i,
87 const mpq_class& speed,
88 const mpq_class& min_time)
89 {
90 period = t_i.get_period();
91 with_offset = t_i.get_wcet() / speed;
92 if (with_offset > t_i.get_deadline())
93 with_offset = t_i.get_deadline();
94 with_offset *= -1;
95
96 time = min_time;
97 time /= period;
98 // round down, i.e., floor()
99 mpq_truncate(time);
100 time *= period;
101 time += t_i.get_deadline();
102
103 with_offset += time;
104 first_point = true;
105
106 while (get_cur() <= min_time)
107 next();
108 }
109
110 const mpq_class& get_cur() const
111 {
112 if (first_point)
113 return with_offset;
114 else
115 return time;
116 }
117
118 void next()
119 {
120 if (first_point)
121 first_point = false;
122 else
123 {
124 time += period;
125 with_offset += period;
126 first_point = true;
127 }
128 }
129};
130
131class TimeComparator {
132public:
133 bool operator() (TestPoints *a, TestPoints *b)
134 {
135 return b->get_cur() < a->get_cur();
136 }
137};
138
139typedef priority_queue<TestPoints*,
140 vector<TestPoints*>,
141 TimeComparator> TimeQueue;
142
143class AllTestPoints
144{
145private:
146 TestPoints *pts;
147 TimeQueue queue;
148 mpq_class last;
149 TaskSet const &ts;
150
151public:
152 AllTestPoints(const TaskSet &ts)
153 : ts(ts)
154 {
155 pts = new TestPoints[ts.get_task_count()];
156 }
157
158 void init(const mpq_class &speed,
159 const mpq_class &min_time)
160 {
161 last = -1;
162 // clean out queue
163 while (!queue.empty())
164 queue.pop();
165 // add all iterators
166 for (unsigned int i = 0; i < ts.get_task_count(); i++)
167 {
168 pts[i].init(ts[i], speed, min_time);
169 queue.push(pts + i);
170 }
171 }
172
173 ~AllTestPoints()
174 {
175 delete[] pts;
176 }
177
178 void get_next(mpq_class &t)
179 {
180 TestPoints* pt;
181 do // avoid duplicates
182 {
183 pt = queue.top();
184 queue.pop();
185 t = pt->get_cur();
186 pt->next();
187 queue.push(pt);
188 } while (t == last);
189 last = t;
190 }
191};
192
193bool FFDBFGedf::witness_condition(const TaskSet &ts,
194 const mpz_class q[], const mpq_class r[],
195 const mpq_class &time,
196 const mpq_class &speed)
197{
198 mpq_class demand, bound;
199
200 ffdbf_ts(ts, q, r, time, speed, demand, bound);
201
202 bound = - ((int) (m - 1));
203 bound *= speed;
204 bound += m;
205 bound *= time;
206
207 return demand <= bound;
208}
209
210bool FFDBFGedf::is_schedulable(const TaskSet &ts,
211 bool check_preconditions)
212{
213 if (m < 2)
214 return false;
215
216 if (check_preconditions)
217 {
218 if (!(ts.has_only_feasible_tasks() &&
219 ts.is_not_overutilized(m) &&
220 ts.has_only_constrained_deadlines()))
221 return false;
222 }
223
224 // allocate helpers
225 AllTestPoints testing_set(ts);
226 mpz_class *q = new mpz_class[ts.get_task_count()];
227 mpq_class *r = new mpq_class[ts.get_task_count()];
228
229 mpq_class sigma_bound;
230 mpq_class time_bound;
231 mpq_class tmp(1, epsilon_denom);
232
233 // compute sigma bound
234 tmp = 1;
235 tmp /= epsilon_denom;
236 ts.get_utilization(sigma_bound);
237 sigma_bound -= m;
238 sigma_bound /= - ((int) (m - 1)); // neg. to flip sign
239 sigma_bound -= tmp; // epsilon
240 sigma_bound = min(sigma_bound, mpq_class(1));
241
242 // compute time bound
243 time_bound = 0;
244 for (unsigned int i = 0; i < ts.get_task_count(); i++)
245 time_bound += ts[i].get_wcet();
246 time_bound /= tmp; // epsilon
247
248 mpq_class t_cur;
249 mpq_class sigma_cur, sigma_nxt;
250 bool schedulable;
251
252 t_cur = 0;
253 schedulable = false;
254
255 // Start with minimum possible sigma value, then try
256 // multiples of sigma_step.
257 ts.get_max_density(sigma_cur);
258
259 // setup brute force sigma value range
260 sigma_nxt = sigma_cur / sigma_step;
261 mpq_truncate(sigma_nxt);
262 sigma_nxt += 1;
263 sigma_nxt *= sigma_step;
264
265 while (!schedulable &&
266 sigma_cur <= sigma_bound &&
267 t_cur <= time_bound)
268 {
269 testing_set.init(sigma_cur, t_cur);
270 do {
271 testing_set.get_next(t_cur);
272 if (t_cur <= time_bound)
273 {
274 compute_q_r(ts, t_cur, q, r);
275 schedulable = witness_condition(ts, q, r, t_cur, sigma_cur);
276 }
277 else
278 // exceeded testing interval
279 schedulable = true;
280 } while (t_cur <= time_bound && schedulable);
281
282 if (!schedulable && t_cur <= time_bound)
283 {
284 // find next sigma variable
285 do
286 {
287 sigma_cur = sigma_nxt;
288 sigma_nxt += sigma_step;
289 } while (sigma_cur <= sigma_bound &&
290 !witness_condition(ts, q, r, t_cur, sigma_cur));
291 }
292 }
293
294 delete [] q;
295 delete [] r;
296
297 return schedulable;
298}
diff --git a/native/src/edf/gedf.cpp b/native/src/edf/gedf.cpp
new file mode 100644
index 0000000..35f1847
--- /dev/null
+++ b/native/src/edf/gedf.cpp
@@ -0,0 +1,54 @@
1#include "tasks.h"
2#include "schedulability.h"
3
4#include "edf/baker.h"
5#include "edf/baruah.h"
6#include "edf/gfb.h"
7#include "edf/bcl.h"
8#include "edf/bcl_iterative.h"
9#include "edf/rta.h"
10#include "edf/ffdbf.h"
11#include "edf/load.h"
12#include "edf/gedf.h"
13
14bool GlobalEDF::is_schedulable(const TaskSet &ts,
15 bool check)
16{
17 if (check)
18 {
19 if (!(ts.has_only_feasible_tasks() && ts.is_not_overutilized(m)))
20 return false;
21
22 if (ts.get_task_count() == 0)
23 return true;
24 }
25
26 // density bound on a uniprocessor.
27 if (m == 1)
28 {
29 mpq_class density;
30 ts.get_density(density);
31 if (density <= 1)
32 return true;
33 }
34
35 // Baker's test can deal with arbitrary deadlines.
36 // It's cheap, so do it first.
37 if (BakerGedf(m).is_schedulable(ts, false))
38 return true;
39
40 // Baruah's test and the BCL and GFB tests assume constrained deadlines.
41 if (ts.has_only_constrained_deadlines())
42 if (GFBGedf(m).is_schedulable(ts, false)
43 || (want_rta && RTAGedf(m, rta_step).is_schedulable(ts, false))
44 // The RTA test generalizes the BCL and BCLIterative tests.
45 || (want_baruah && BaruahGedf(m).is_schedulable(ts, false))
46 || (want_ffdbf && FFDBFGedf(m).is_schedulable(ts, false)))
47 return true;
48
49 // Load-based test can handle arbitrary deadlines.
50 if (want_load && LoadGedf(m).is_schedulable(ts, false))
51 return true;
52
53 return false;
54}
diff --git a/native/src/edf/gfb.cpp b/native/src/edf/gfb.cpp
new file mode 100644
index 0000000..0aa90b8
--- /dev/null
+++ b/native/src/edf/gfb.cpp
@@ -0,0 +1,24 @@
1#include "tasks.h"
2#include "schedulability.h"
3
4#include "edf/gfb.h"
5
6bool GFBGedf::is_schedulable(const TaskSet &ts, bool check_preconditions)
7{
8 if (check_preconditions)
9 {
10 if (!(ts.has_only_feasible_tasks()
11 && ts.is_not_overutilized(m)
12 && ts.has_only_constrained_deadlines()))
13 return false;
14 }
15
16 mpq_class total_density, max_density, bound;
17
18 ts.get_density(total_density);
19 ts.get_max_density(max_density);
20
21 bound = m - (m - 1) * max_density;
22
23 return total_density <= bound;
24}
diff --git a/native/src/edf/load.cpp b/native/src/edf/load.cpp
new file mode 100644
index 0000000..38d3c55
--- /dev/null
+++ b/native/src/edf/load.cpp
@@ -0,0 +1,48 @@
1#include "tasks.h"
2#include "schedulability.h"
3
4#include "edf/load.h"
5
6#include <iostream>
7#include <algorithm>
8
9/* This implements the LOAD test presented in:
10 *
11 * Baker & Baruah (2009), An analysis of global EDF schedulability for
12 * arbitrary-deadline sporadic task systems, Real-Time Systems, volume 43,
13 * pages 3-24.
14 */
15
16bool LoadGedf::is_schedulable(const TaskSet &ts, bool check_preconditions)
17{
18 if (check_preconditions)
19 {
20 if (!(ts.has_only_feasible_tasks()
21 && ts.is_not_overutilized(m)))
22 return false;
23 }
24
25 mpq_class load, max_density, mu, bound, cond1, cond2;
26 mpz_class mu_ceil;
27
28 // get the load of the task set
29 ts.approx_load(load, epsilon);
30
31 // compute bound (corollary 2)
32 ts.get_max_density(max_density);
33
34 mu = m - (m - 1) * max_density;
35
36 mu_ceil = mu.get_num();
37 // divide with ceiling
38 mpz_cdiv_q(mu_ceil.get_mpz_t(),
39 mu.get_num().get_mpz_t(),
40 mu.get_den().get_mpz_t());
41
42 cond1 = mu - (mu_ceil - 1) * max_density;
43 cond2 = (mu_ceil - 1) - (mu_ceil - 2) * max_density;
44
45 bound = std::max(cond1, cond2);
46
47 return load <= bound;
48}
diff --git a/native/src/edf/rta.cpp b/native/src/edf/rta.cpp
new file mode 100644
index 0000000..68eafa6
--- /dev/null
+++ b/native/src/edf/rta.cpp
@@ -0,0 +1,156 @@
1#include <algorithm>
2#include <assert.h>
3
4#include "tasks.h"
5#include "schedulability.h"
6
7#include "edf/rta.h"
8
9#include <iostream>
10#include "task_io.h"
11
12using namespace std;
13
14
15static void rta_interfering_workload(const Task &t_i,
16 unsigned long response_time,
17 unsigned long slack_i,
18 mpz_class &inf,
19 mpz_class &interval)
20{
21 interval = response_time;
22 interval += t_i.get_deadline() - t_i.get_wcet();
23 interval -= slack_i;
24
25 inf = t_i.get_wcet();
26 inf *= interval / t_i.get_period();
27
28 interval %= t_i.get_period();
29 if (interval > t_i.get_wcet())
30 inf += t_i.get_wcet();
31 else
32 inf += interval;
33}
34
35
36static void edf_interfering_workload(const Task &t_i,
37 const Task &t_k,
38 unsigned long slack_i,
39 mpz_class &inf)
40{
41 /* implicit floor in integer division */
42 unsigned long njobs = t_k.get_deadline() / t_i.get_period();
43
44 inf = njobs;
45 inf *= t_i.get_wcet();
46
47 unsigned long tmp = t_k.get_deadline() % t_i.get_period();
48 if (tmp > slack_i)
49 /* if tmp <= slack_i, then zero would be added */
50 inf += min(t_i.get_wcet(), tmp - slack_i);
51}
52
53bool RTAGedf::response_estimate(unsigned int k,
54 const TaskSet &ts,
55 unsigned long const *slack,
56 unsigned long response,
57 unsigned long &new_response)
58{
59 mpz_class other_work = 0;
60 mpz_class inf_edf;
61 mpz_class inf_rta;
62 mpz_class inf_bound = response - ts[k].get_wcet() + 1;
63 mpz_class tmp;
64
65 for (unsigned int i = 0; i < ts.get_task_count(); i++)
66 if (k != i)
67 {
68 edf_interfering_workload(ts[i], ts[k], slack[i], inf_edf);
69 rta_interfering_workload(ts[i], response, slack[i], inf_rta, tmp);
70 other_work += min(min(inf_edf, inf_rta), inf_bound);
71 }
72 /* implicit floor */
73 other_work /= m;
74 other_work += ts[k].get_wcet();
75 if (other_work.fits_ulong_p())
76 {
77 new_response = other_work.get_ui();
78 return true;
79 }
80 else
81 {
82 /* overflowed => reponse time > deadline */
83 return false;
84 }
85}
86
87bool RTAGedf::rta_fixpoint(unsigned int k,
88 const TaskSet &ts,
89 unsigned long const *slack,
90 unsigned long &response)
91{
92 unsigned long last;
93 bool ok;
94
95 last = ts[k].get_wcet();
96 ok = response_estimate(k, ts, slack, last, response);
97
98 while (ok && last != response && response <= ts[k].get_deadline())
99 {
100 if (last < response && response - last < min_delta)
101 last = min(last + min_delta, ts[k].get_deadline());
102 else
103 last = response;
104 ok = response_estimate(k, ts, slack, last, response);
105 }
106
107 return ok && response <= ts[k].get_deadline();
108}
109
110bool RTAGedf::is_schedulable(const TaskSet &ts, bool check_preconditions)
111{
112 if (check_preconditions)
113 {
114 if (!(ts.has_only_feasible_tasks()
115 && ts.is_not_overutilized(m)
116 && ts.has_only_constrained_deadlines()))
117 return false;
118 if (ts.get_task_count() == 0)
119 return true;
120 }
121
122 unsigned long* slack = new unsigned long[ts.get_task_count()];
123
124 for (unsigned int i = 0; i < ts.get_task_count(); i++)
125 slack[i] = 0;
126
127 unsigned long round = 0;
128 bool schedulable = false;
129 bool updated = true;
130
131 while (updated && !schedulable && (max_rounds == 0 || round < max_rounds))
132 {
133 round++;
134 schedulable = true;
135 updated = false;
136 for (unsigned int k = 0; k < ts.get_task_count(); k++)
137 {
138 unsigned long response, new_slack;
139 if (rta_fixpoint(k, ts, slack, response))
140 {
141 new_slack = ts[k].get_deadline() - response;
142 if (new_slack != slack[k])
143 {
144 slack[k] = new_slack;
145 updated = true;
146 }
147 }
148 else
149 {
150 schedulable = false;
151 }
152 }
153 }
154
155 return schedulable;
156}
diff --git a/native/src/edf/sim.cpp b/native/src/edf/sim.cpp
new file mode 100644
index 0000000..1510d05
--- /dev/null
+++ b/native/src/edf/sim.cpp
@@ -0,0 +1,103 @@
1#include "tasks.h"
2#include "edf/sim.h"
3
4#include "schedule_sim.h"
5
6#include <algorithm>
7
8typedef GlobalScheduler<EarliestDeadlineFirst> GedfSim;
9
10class DeadlineMissSearch : public GedfSim
11{
12 private:
13 bool dmissed;
14
15 public:
16 simtime_t when_missed;
17 simtime_t when_completed;
18
19 DeadlineMissSearch(int m) : GedfSim(m), dmissed(false) {};
20
21 virtual void job_completed(int proc, Job *job)
22 {
23 if (this->get_current_time() > job->get_deadline())
24 {
25 dmissed = true;
26 when_missed = job->get_deadline();
27 when_completed = this->get_current_time();
28 abort();
29 }
30 };
31
32 bool deadline_was_missed()
33 {
34 return dmissed;
35 }
36};
37
38class Tardiness : public GedfSim
39{
40 public:
41 Stats stats;
42
43 Tardiness(int m) : GedfSim(m)
44 {
45 stats.num_tardy_jobs = 0;
46 stats.num_ok_jobs = 0;
47 stats.total_tardiness = 0;
48 stats.max_tardiness = 0;
49 stats.first_miss = 0;
50 };
51
52 virtual void job_completed(int proc, Job *job)
53 {
54 if (this->get_current_time() > job->get_deadline())
55 {
56 simtime_t tardiness;
57 tardiness = this->get_current_time() - job->get_deadline();
58 stats.num_tardy_jobs++;
59 stats.total_tardiness += tardiness;
60 stats.max_tardiness = std::max(tardiness, stats.max_tardiness);
61 if (!stats.first_miss)
62 stats.first_miss = job->get_deadline();
63 }
64 else
65 stats.num_ok_jobs++;
66 };
67};
68
69unsigned long edf_first_violation(unsigned int num_procs,
70 TaskSet &ts,
71 unsigned long end_of_simulation)
72{
73 DeadlineMissSearch sim(num_procs);
74
75 run_periodic_simulation(sim, ts, end_of_simulation);
76 if (sim.deadline_was_missed())
77 return sim.when_missed;
78 else
79 return 0;
80}
81
82bool edf_misses_deadline(unsigned int num_procs,
83 TaskSet &ts,
84 unsigned long end_of_simulation)
85{
86 DeadlineMissSearch sim(num_procs);
87
88 run_periodic_simulation(sim, ts, end_of_simulation);
89 return sim.deadline_was_missed();
90}
91
92
93Stats edf_observe_tardiness(unsigned int num_procs,
94 TaskSet &ts,
95 unsigned long end_of_simulation)
96{
97 Tardiness sim(num_procs);
98
99 run_periodic_simulation(sim, ts, end_of_simulation);
100
101 return sim.stats;
102}
103
diff --git a/native/src/schedule_sim.cpp b/native/src/schedule_sim.cpp
new file mode 100644
index 0000000..a4be9e6
--- /dev/null
+++ b/native/src/schedule_sim.cpp
@@ -0,0 +1,67 @@
1
2#include "tasks.h"
3#include "schedule_sim.h"
4
5Job::Job(const Task &tsk,
6 unsigned long relt,
7 unsigned long sequence_no,
8 unsigned long cst)
9 : task(tsk), release(relt), allocation(0), seqno(sequence_no)
10{
11 if (!cst)
12 cost = task.get_wcet();
13 else
14 cost = cst;
15}
16
17void Job::init_next(simtime_t cost,
18 simtime_t inter_arrival_time)
19{
20 allocation = 0;
21 /* if cost == 0, then we keep the last cost */
22 if (cost != 0)
23 this->cost = cost;
24 release += task.get_period() + inter_arrival_time;
25 seqno++;
26}
27
28void PeriodicJobSequence::completed(simtime_t when, int proc)
29{
30 init_next();
31 get_sim()->add_release(this);
32}
33
34
35void run_periodic_simulation(ScheduleSimulation& sim,
36 TaskSet& ts,
37 simtime_t end_of_simulation)
38{
39 PeriodicJobSequence** jobs;
40
41 jobs = new PeriodicJobSequence*[ts.get_task_count()];
42 for (unsigned int i = 0; i < ts.get_task_count(); i++)
43 {
44 jobs[i] = new PeriodicJobSequence(ts[i]);
45 jobs[i]->set_simulation(&sim);
46 sim.add_release(jobs[i]);
47 }
48
49 sim.simulate_until(end_of_simulation);
50
51 for (unsigned int i = 0; i < ts.get_task_count(); i++)
52 delete jobs[i];
53 delete [] jobs;
54}
55
56
57
58
59
60
61
62
63
64
65
66
67
diff --git a/native/src/sharedres.cpp b/native/src/sharedres.cpp
new file mode 100644
index 0000000..7f11ae7
--- /dev/null
+++ b/native/src/sharedres.cpp
@@ -0,0 +1,2007 @@
1#include <algorithm> // for greater, sort
2#include <numeric>
3#include <functional>
4#include <limits.h>
5#include <iostream>
6
7#include "sharedres.h"
8#include "res_io.h"
9
10#include <gmpxx.h>
11#include "math-helper.h"
12
13#include "stl-helper.h"
14
15#ifdef CONFIG_USE_0X
16#include <unordered_map>
17#define hashmap std::unordered_map
18#else
19#include <ext/hash_map>
20#define hashmap __gnu_cxx::hash_map
21#endif
22
23static const unsigned int UNLIMITED = UINT_MAX;
24
25std::ostream& operator<<(std::ostream &os, const TaskInfo &ti)
26{
27 os << "TaskInfo[";
28 if (ti.get_priority() != UINT_MAX)
29 os << "priority="
30 << ti.get_priority() << ", ";
31 os << "period="
32 << ti.get_period() << ", response="
33 << ti.get_response() << ", cluster="
34 << ti.get_cluster() << ", requests=<";
35
36 foreach(ti.get_requests(), it)
37 {
38 if (it != ti.get_requests().begin())
39 os << " ";
40 os << (*it);
41 }
42
43 os << ">]";
44 return os;
45}
46
47std::ostream& operator<<(std::ostream &os, const RequestBound &rb)
48{
49 os << "(res-id="
50 << rb.get_resource_id() << ", num="
51 << rb.get_num_requests() << ", len="
52 << rb.get_request_length() << ")";
53 return os;
54}
55
56std::ostream& operator<<(std::ostream &os, const ResourceSharingInfo &rsi)
57{
58 foreach(rsi.get_tasks(), it)
59 {
60 const TaskInfo& tsk = *it;
61 os << "\t" << tsk << std::endl;
62 }
63 return os;
64}
65
66unsigned int RequestBound::get_max_num_requests(unsigned long interval) const
67{
68 unsigned long num_jobs;
69
70 num_jobs = divide_with_ceil(interval + task->get_response(),
71 task->get_period());
72
73 return (unsigned int) (num_jobs * num_requests);
74}
75
76
77// ****** non-exported helpers *******
78
79typedef std::vector<const TaskInfo*> Cluster;
80typedef std::vector<Cluster> Clusters;
81
82static void split_by_cluster(const ResourceSharingInfo& info, Clusters& clusters)
83{
84 foreach(info.get_tasks(), it)
85 {
86 const TaskInfo& tsk = *it;
87 unsigned int cluster = tsk.get_cluster();
88
89 while (cluster >= clusters.size())
90 clusters.push_back(Cluster());
91
92 clusters[cluster].push_back(&tsk);
93 }
94}
95
96
97bool has_higher_priority(const TaskInfo* a, const TaskInfo* b)
98{
99 return a->get_priority() < b->get_priority();
100}
101
102void sort_by_priority(Clusters& clusters)
103{
104 foreach(clusters, it)
105 {
106 Cluster& cluster = *it;
107 std::sort(cluster.begin(), cluster.end(), has_higher_priority);
108 }
109}
110
111typedef std::vector<const RequestBound*> ContentionSet;
112typedef std::vector<ContentionSet> Resources;
113typedef std::vector<Resources> ClusterResources;
114
115typedef std::vector<ContentionSet> AllPerCluster;
116
117static void split_by_resource(const ResourceSharingInfo& info, Resources& resources)
118{
119 foreach(info.get_tasks(), it)
120 {
121 const TaskInfo& tsk = *it;
122
123 foreach(tsk.get_requests(), jt)
124 {
125 const RequestBound& req = *jt;
126 unsigned int res = req.get_resource_id();
127
128 while (res >= resources.size())
129 resources.push_back(ContentionSet());
130
131 resources[res].push_back(&req);
132 }
133 }
134}
135
136static void all_from_cluster(const Cluster& cluster, ContentionSet& cs)
137{
138 foreach(cluster, it)
139 {
140 const TaskInfo* tsk = *it;
141
142 foreach(tsk->get_requests(), jt)
143 {
144 const RequestBound& req = *jt;
145 cs.push_back(&req);
146 }
147 }
148}
149
150static void all_per_cluster(const Clusters& clusters,
151 AllPerCluster& all)
152{
153 foreach(clusters, it)
154 {
155 all.push_back(ContentionSet());
156 all_from_cluster(*it, all.back());
157 }
158}
159
160
161static void split_by_resource(const Cluster& cluster, Resources& resources)
162{
163
164 foreach(cluster, it)
165 {
166 const TaskInfo* tsk = *it;
167
168 foreach(tsk->get_requests(), jt)
169 {
170 const RequestBound& req = *jt;
171 unsigned int res = req.get_resource_id();
172
173 while (res >= resources.size())
174 resources.push_back(ContentionSet());
175
176 resources[res].push_back(&req);
177 }
178 }
179}
180
181static void split_by_resource(const Clusters& clusters,
182 ClusterResources& resources)
183{
184 foreach(clusters, it)
185 {
186 resources.push_back(Resources());
187 split_by_resource(*it, resources.back());
188 }
189}
190
191static void split_by_type(const ContentionSet& requests,
192 ContentionSet& reads,
193 ContentionSet& writes)
194{
195 foreach(requests, it)
196 {
197 const RequestBound *req = *it;
198
199 if (req->get_request_type() == READ)
200 reads.push_back(req);
201 else
202 writes.push_back(req);
203 }
204}
205
206static void split_by_type(const Resources& resources,
207 Resources &reads,
208 Resources &writes)
209{
210 reads.reserve(resources.size());
211 writes.reserve(resources.size());
212 foreach(resources, it)
213 {
214 reads.push_back(ContentionSet());
215 writes.push_back(ContentionSet());
216 split_by_type(*it, reads.back(), writes.back());
217 }
218}
219
220static void split_by_type(const ClusterResources& per_cluster,
221 ClusterResources &reads,
222 ClusterResources &writes)
223{
224 reads.reserve(per_cluster.size());
225 writes.reserve(per_cluster.size());
226 foreach(per_cluster, it)
227 {
228 reads.push_back(Resources());
229 writes.push_back(Resources());
230 split_by_type(*it, reads.back(), writes.back());
231 }
232}
233
234static bool has_longer_request_length(const RequestBound* a,
235 const RequestBound* b)
236{
237 return a->get_request_length() > b->get_request_length();
238}
239
240static void sort_by_request_length(ContentionSet& cs)
241{
242 std::sort(cs.begin(), cs.end(), has_longer_request_length);
243}
244
245static void sort_by_request_length(Resources& resources)
246{
247 apply_foreach(resources, sort_by_request_length);
248}
249
250static void sort_by_request_length(ClusterResources& resources)
251{
252 apply_foreach(resources, sort_by_request_length);
253}
254
255typedef std::vector<ContentionSet> TaskContention;
256typedef std::vector<TaskContention> ClusterContention;
257
258// have one contention set per task
259static void derive_task_contention(const Cluster& cluster,
260 TaskContention& requests)
261{
262 requests.reserve(cluster.size());
263
264 foreach(cluster, it)
265 {
266 const TaskInfo* tsk = *it;
267
268 requests.push_back(ContentionSet());
269
270 foreach(tsk->get_requests(), jt)
271 {
272 const RequestBound& req = *jt;
273
274 requests.back().push_back(&req);
275 }
276 }
277}
278
279static void derive_task_contention(const Clusters& clusters,
280 ClusterContention& contention)
281{
282 map_ref(clusters, contention, TaskContention, derive_task_contention);
283}
284
285static Interference bound_blocking(const ContentionSet& cont,
286 unsigned long interval,
287 unsigned int max_total_requests,
288 unsigned int max_requests_per_source,
289 const TaskInfo* exclude_tsk,
290 // Note: the following parameter excludes
291 // *high-priority* tasks. Used to exclude local higher-priority tasks.
292 // Default: all tasks can block (suitable for remote blocking).
293 unsigned int min_priority = 0)
294{
295 Interference inter;
296 unsigned int remaining;
297
298 remaining = max_total_requests;
299
300 foreach(cont, it)
301 {
302 const RequestBound* req = *it;
303
304 if (!remaining)
305 break;
306
307 // only use this source if it is not excluded
308 if (req->get_task() != exclude_tsk &&
309 req->get_task()->get_priority() >= min_priority)
310 {
311 unsigned int num;
312 // This makes the assumption that there is only one
313 // request object per task. This makes sense if the
314 // contention set has been split by resource. This may
315 // be pessimistic for contention sets that contain
316 // request objects for multiple resources. The
317 // assumption also works out if max_total_requests ==
318 // max_requests_per_source.
319 num = std::min(req->get_max_num_requests(interval),
320 max_requests_per_source);
321 num = std::min(num, remaining);
322
323 inter.total_length += num * req->get_request_length();
324 inter.count += num;
325 remaining -= num;
326 }
327 }
328
329 return inter;
330}
331
332static Interference bound_blocking(const ContentionSet& cont,
333 unsigned long interval,
334 unsigned int max_total_requests,
335 unsigned int max_requests_per_source,
336 bool exclude_whole_cluster,
337 const TaskInfo* exclude_tsk)
338{
339 Interference inter;
340 unsigned int remaining;
341
342 remaining = max_total_requests;
343
344 foreach(cont, it)
345 {
346 const RequestBound* req = *it;
347
348 if (!remaining)
349 break;
350
351 // only use this source if it is not excluded
352 if (req->get_task() != exclude_tsk &&
353 (!exclude_whole_cluster ||
354 req->get_task()->get_cluster() != exclude_tsk->get_cluster()))
355 {
356 unsigned int num;
357 num = std::min(req->get_max_num_requests(interval),
358 max_requests_per_source);
359 num = std::min(num, remaining);
360
361 inter.total_length += num * req->get_request_length();
362 inter.count += num;
363 remaining -= num;
364 }
365 }
366
367 return inter;
368}
369
370struct ClusterLimit
371{
372 unsigned int max_total_requests;
373 unsigned int max_requests_per_source;
374
375 ClusterLimit(unsigned int total, unsigned int src) :
376 max_total_requests(total), max_requests_per_source(src) {}
377};
378
379typedef std::vector<ClusterLimit> ClusterLimits;
380
381static Interference bound_blocking_all_clusters(
382 const ClusterResources& clusters,
383 const ClusterLimits& limits,
384 unsigned int res_id,
385 unsigned long interval,
386 const TaskInfo* exclude_tsk)
387{
388 Interference inter;
389 unsigned int i;
390
391 // add interference from each non-excluded cluster
392 enumerate(clusters, it, i)
393 {
394 const Resources& resources = *it;
395 const ClusterLimit& limit = limits[i];
396
397 if (resources.size() > res_id)
398 inter += bound_blocking(resources[res_id],
399 interval,
400 limit.max_total_requests,
401 limit.max_requests_per_source,
402 exclude_tsk);
403 }
404
405 return inter;
406}
407
408
409static Interference max_local_request_span(const TaskInfo &tsk,
410 const TaskInfos &tasks,
411 const BlockingBounds& bounds)
412{
413 Interference span;
414 unsigned int i = 0;
415
416 enumerate(tasks, it, i)
417 {
418 const TaskInfo& t = *it;
419
420 if (&t != &tsk)
421 {
422 // only consider local, lower-priority tasks
423 if (t.get_cluster() == tsk.get_cluster() &&
424 t.get_priority() >= tsk.get_priority())
425 {
426 Interference b = bounds.get_max_request_span(i);
427 span = std::max(span, bounds.get_max_request_span(i));
428 }
429 }
430 }
431
432 return span;
433}
434
435static void charge_arrival_blocking(const ResourceSharingInfo& info,
436 BlockingBounds& bounds)
437{
438 unsigned int i = 0;
439 const TaskInfos& tasks = info.get_tasks();
440
441 enumerate(tasks, it, i)
442 {
443 Interference inf = max_local_request_span(*it, tasks, bounds);
444 bounds[i] += inf; // charge to total
445 bounds.set_arrival_blocking(i, inf);
446 }
447}
448
449
450// **** blocking term analysis ****
451
452BlockingBounds* global_omlp_bounds(const ResourceSharingInfo& info,
453 unsigned int num_procs)
454{
455 // split every thing by resources, sort, and then start counting.
456 Resources resources;
457
458 split_by_resource(info, resources);
459 sort_by_request_length(resources);
460
461 unsigned int i;
462 BlockingBounds* _results = new BlockingBounds(info);
463 BlockingBounds& results = *_results;
464
465 for (i = 0; i < info.get_tasks().size(); i++)
466 {
467 const TaskInfo& tsk = info.get_tasks()[i];
468 Interference bterm;
469
470 foreach(tsk.get_requests(), jt)
471 {
472 const RequestBound& req = *jt;
473 const ContentionSet& cs =
474 resources[req.get_resource_id()];
475
476 unsigned int num_sources = cs.size();
477 unsigned long interval = tsk.get_response();
478 unsigned long issued = req.get_num_requests();
479
480
481 unsigned int total_limit = (2 * num_procs - 1) * issued;
482 // Derived in the dissertation: at most twice per request.
483 unsigned int per_src_limit = 2 * issued;
484
485 if (num_sources <= num_procs + 1) {
486 // FIFO case: no job is ever skipped in the
487 // priority queue (since at most one job is in
488 // PQ at any time).
489 // Lemma 15 in RTSS'10: at most one blocking
490 // request per source per issued request.
491 per_src_limit = issued;
492 total_limit = (num_sources - 1) * issued;
493 }
494
495 bterm += bound_blocking(cs,
496 interval,
497 total_limit,
498 per_src_limit,
499 &tsk);
500 }
501
502 results[i] = bterm;
503 }
504
505 return _results;
506}
507
508
509BlockingBounds* global_fmlp_bounds(const ResourceSharingInfo& info)
510{
511 // split every thing by resources, sort, and then start counting.
512 Resources resources;
513
514 split_by_resource(info, resources);
515 sort_by_request_length(resources);
516
517
518 unsigned int i;
519 BlockingBounds* _results = new BlockingBounds(info);
520 BlockingBounds& results = *_results;
521
522 unsigned int num_tasks = info.get_tasks().size();
523
524 for (i = 0; i < info.get_tasks().size(); i++)
525 {
526 const TaskInfo& tsk = info.get_tasks()[i];
527 Interference bterm;
528
529
530 foreach(tsk.get_requests(), jt)
531 {
532 const RequestBound& req = *jt;
533 const ContentionSet& cs =
534 resources[req.get_resource_id()];
535
536 unsigned long interval = tsk.get_response();
537 unsigned long issued = req.get_num_requests();
538
539 // every other task may block once per request
540 unsigned int total_limit = (num_tasks - 1) * issued;
541 unsigned int per_src_limit = issued;
542
543 bterm += bound_blocking(cs,
544 interval,
545 total_limit,
546 per_src_limit,
547 &tsk);
548 }
549
550 results[i] = bterm;
551 }
552
553 return _results;
554}
555
556static Interference np_fifo_per_resource(
557 const TaskInfo& tsk, const ClusterResources& clusters,
558 unsigned int procs_per_cluster,
559 unsigned int res_id, unsigned int issued,
560 int dedicated_irq = NO_CPU)
561{
562 const unsigned long interval = tsk.get_response();
563 // At most one blocking request per remote task per
564 // request.
565 const unsigned int per_src_limit = issued;
566
567 ClusterLimits limits;
568 int idx;
569 limits.reserve(clusters.size());
570 enumerate(clusters, ct, idx)
571 {
572 unsigned int total, parallelism = procs_per_cluster;
573
574 if (idx == dedicated_irq)
575 parallelism--;
576
577 if (parallelism && (int) tsk.get_cluster() == idx)
578 parallelism--;
579
580 // At most one blocking request per remote CPU in
581 // cluster per request.
582 total = issued * parallelism;
583 limits.push_back(ClusterLimit(total, per_src_limit));
584 }
585
586 Interference blocking;
587 blocking = bound_blocking_all_clusters(clusters,
588 limits,
589 res_id,
590 interval,
591 &tsk);
592 return blocking;
593}
594
595BlockingBounds* part_omlp_bounds(const ResourceSharingInfo& info)
596{
597 // split everything by partition
598 Clusters clusters;
599
600 split_by_cluster(info, clusters);
601
602 // split each partition by resource
603 ClusterResources resources;
604
605 split_by_resource(clusters, resources);
606
607 // sort each contention set by request length
608 sort_by_request_length(resources);
609
610 // We need for each task the maximum request span. We also need the
611 // maximum direct blocking from remote partitions for each request. We
612 // can determine both in one pass.
613
614 unsigned int i;
615
616 // direct blocking results
617 BlockingBounds* _results = new BlockingBounds(info);
618 BlockingBounds& results = *_results;
619
620 for (i = 0; i < info.get_tasks().size(); i++)
621 {
622 const TaskInfo& tsk = info.get_tasks()[i];
623 Interference bterm;
624
625 foreach(tsk.get_requests(), jt)
626 {
627 const RequestBound& req = *jt;
628
629 Interference blocking;
630
631 blocking = np_fifo_per_resource(
632 tsk, resources, 1,
633 req.get_resource_id(), req.get_num_requests());
634
635 // add in blocking term
636 bterm += blocking;
637
638 // Keep track of maximum request span.
639 // Is this already a single-issue request?
640 if (req.get_num_requests() != 1)
641 // nope, need to recompute
642 blocking = np_fifo_per_resource(
643 tsk, resources, 1,
644 req.get_resource_id(), 1);
645
646 // The span includes our own request.
647 blocking.total_length += req.get_request_length();
648 blocking.count += 1;
649
650 // Update max. request span.
651 results.raise_request_span(i, blocking);
652 }
653
654 results[i] = bterm;
655 }
656
657 charge_arrival_blocking(info, results);
658
659 return _results;
660}
661
662
663BlockingBounds* clustered_omlp_bounds(const ResourceSharingInfo& info,
664 unsigned int procs_per_cluster,
665 int dedicated_irq)
666{
667 // split everything by partition
668 Clusters clusters;
669
670 split_by_cluster(info, clusters);
671
672 // split each partition by resource
673 ClusterResources resources;
674
675 split_by_resource(clusters, resources);
676
677 // sort each contention set by request length
678 sort_by_request_length(resources);
679
680 // We need for each task the maximum request span. We also need the
681 // maximum direct blocking from remote partitions for each request. We
682 // can determine both in one pass.
683
684 unsigned int i;
685
686 // direct blocking results
687 BlockingBounds* _results = new BlockingBounds(info);
688 BlockingBounds& results = *_results;
689
690 for (i = 0; i < info.get_tasks().size(); i++)
691 {
692 const TaskInfo& tsk = info.get_tasks()[i];
693
694 Interference bterm;
695
696 foreach(tsk.get_requests(), jt)
697 {
698 const RequestBound& req = *jt;
699 Interference blocking;
700
701 blocking = np_fifo_per_resource(
702 tsk, resources, procs_per_cluster,
703 req.get_resource_id(),
704 req.get_num_requests(),
705 dedicated_irq);
706
707 // add in blocking term
708 bterm += blocking;
709
710 // Keep track of maximum request span.
711 // Is this already a single-issue request?
712 if (req.get_num_requests() != 1)
713 blocking = np_fifo_per_resource(
714 tsk, resources, procs_per_cluster,
715 req.get_resource_id(), 1);
716
717 // The span includes our own request.
718 blocking.total_length += req.get_request_length();
719 blocking.count += 1;
720 // Update max. request span.
721 results.raise_request_span(i, blocking);
722 }
723
724 results[i] = bterm;
725 }
726
727 // This is the initial delay due to priority donation.
728 charge_arrival_blocking(info, results);
729
730 return _results;
731}
732
733struct RWCount {
734 unsigned int res_id;
735 unsigned int num_reads;
736 unsigned int num_writes;
737 unsigned int rlength;
738 unsigned int wlength;
739
740 RWCount(unsigned int id) : res_id(id),
741 num_reads(0),
742 num_writes(0),
743 rlength(0),
744 wlength(0)
745 {}
746};
747
748typedef std::vector<RWCount> RWCounts;
749
750static void merge_rw_requests(const TaskInfo &tsk, RWCounts &counts)
751{
752 foreach(tsk.get_requests(), req)
753 {
754 unsigned int res_id = req->get_resource_id();
755
756 while (counts.size() <= res_id)
757 counts.push_back(RWCount(counts.size()));
758
759 if (req->is_read())
760 {
761 counts[res_id].num_reads += req->get_num_requests();
762 counts[res_id].rlength = req->get_request_length();
763 }
764 else
765 {
766 counts[res_id].num_writes += req->get_num_requests();
767 counts[res_id].wlength = req->get_request_length();
768 }
769 }
770}
771
772
773static Interference pf_writer_fifo(
774 const TaskInfo& tsk, const ClusterResources& writes,
775 const unsigned int num_writes,
776 const unsigned int num_reads,
777 const unsigned int res_id,
778 const unsigned int procs_per_cluster,
779 const int dedicated_irq)
780{
781 const unsigned int per_src_wlimit = num_reads + num_writes;
782 const unsigned long interval = tsk.get_response();
783 ClusterLimits limits;
784 int idx;
785
786 limits.reserve(writes.size());
787 enumerate(writes, ct, idx)
788 {
789 unsigned int total, parallelism = procs_per_cluster;
790
791 if (idx == dedicated_irq)
792 parallelism--;
793
794 if (parallelism && (int) tsk.get_cluster() == idx)
795 parallelism--;
796
797 // At most one blocking request per remote CPU in
798 // cluster per request.
799 if (parallelism)
800 total = num_reads + num_writes * parallelism;
801 else
802 // No interference from writers if we are hogging
803 // the only available CPU.
804 total = 0;
805
806 limits.push_back(ClusterLimit(total, per_src_wlimit));
807 }
808
809 Interference blocking;
810 blocking = bound_blocking_all_clusters(writes,
811 limits,
812 res_id,
813 interval,
814 &tsk);
815 return blocking;
816
817}
818
819static Interference pf_reader_all(
820 const TaskInfo& tsk,
821 const Resources& all_reads,
822 const unsigned int num_writes,
823 const unsigned int num_wblock,
824 const unsigned int num_reads,
825 const unsigned int res_id,
826 const unsigned int procs_per_cluster,
827 const unsigned int num_procs)
828{
829 const unsigned long interval = tsk.get_response();
830 Interference blocking;
831 unsigned int rlimit = std::min(num_wblock + num_writes,
832 num_reads + num_writes * (num_procs - 1));
833 blocking = bound_blocking(all_reads[res_id],
834 interval,
835 rlimit,
836 rlimit,
837 // exclude all if c == 1
838 procs_per_cluster == 1,
839 &tsk);
840 return blocking;
841}
842
843BlockingBounds* clustered_rw_omlp_bounds(const ResourceSharingInfo& info,
844 unsigned int procs_per_cluster,
845 int dedicated_irq)
846{
847 // split everything by partition
848 Clusters clusters;
849
850 split_by_cluster(info, clusters);
851
852 // split each partition by resource
853 ClusterResources resources;
854
855 split_by_resource(clusters, resources);
856
857 // split all by resource
858 Resources all_task_reqs, all_reads, __all_writes;
859 split_by_resource(info, all_task_reqs);
860 split_by_type(all_task_reqs, all_reads, __all_writes);
861
862 // sort each contention set by request length
863 sort_by_request_length(resources);
864 sort_by_request_length(all_reads);
865
866 // split by type --- sorted order is maintained
867 ClusterResources __reads, writes;
868 split_by_type(resources, __reads, writes);
869
870
871 // We need for each task the maximum request span. We also need the
872 // maximum direct blocking from remote partitions for each request. We
873 // can determine both in one pass.
874
875 const unsigned int num_procs = procs_per_cluster * clusters.size();
876 unsigned int i;
877
878 // direct blocking results
879 BlockingBounds* _results = new BlockingBounds(info);
880 BlockingBounds& results = *_results;
881
882 for (i = 0; i < info.get_tasks().size(); i++)
883 {
884 const TaskInfo& tsk = info.get_tasks()[i];
885 RWCounts rwcounts;
886 Interference bterm;
887
888 merge_rw_requests(tsk, rwcounts);
889
890 foreach(rwcounts, jt)
891 {
892 const RWCount& rw = *jt;
893
894 // skip placeholders
895 if (!rw.num_reads && !rw.num_writes)
896 continue;
897
898 Interference wblocking, rblocking;
899
900 wblocking = pf_writer_fifo(tsk, writes, rw.num_writes,
901 rw.num_reads, rw.res_id,
902 procs_per_cluster,
903 dedicated_irq);
904
905 rblocking = pf_reader_all(tsk, all_reads, rw.num_writes,
906 wblocking.count, rw.num_reads,
907 rw.res_id, procs_per_cluster,
908 num_procs);
909
910 //**** SINGLE WRITE
911 Interference rblocking_w1, wblocking_w1;
912
913 // Keep track of maximum request span.
914 // Is this already a single-issue request?
915 if (rw.num_writes &&
916 (rw.num_writes != 1 || rw.num_reads != 0))
917 {
918 wblocking_w1 = pf_writer_fifo(tsk, writes, 1, 0,
919 rw.res_id, procs_per_cluster,
920 dedicated_irq);
921
922 rblocking_w1 = pf_reader_all(
923 tsk, all_reads, 1,
924 wblocking_w1.count, 0,
925 rw.res_id, procs_per_cluster,
926 num_procs);
927 }
928 else if (rw.num_writes)
929 {
930 wblocking_w1 = wblocking;
931 rblocking_w1 = rblocking;
932 }
933 // else: zero, nothing to do
934
935 //**** SINGLE READ
936
937 Interference rblocking_r1, wblocking_r1;
938
939
940 if (rw.num_reads &&
941 (rw.num_reads != 1 || rw.num_writes != 0))
942 {
943 wblocking_r1 = pf_writer_fifo(tsk, writes, 0, 1,
944 rw.res_id, procs_per_cluster,
945 dedicated_irq);
946
947 rblocking_r1 = pf_reader_all(
948 tsk, all_reads, 0,
949 wblocking_r1.count, 1,
950 rw.res_id, procs_per_cluster,
951 num_procs);
952 }
953 else if (rw.num_reads)
954 {
955 wblocking_r1 = wblocking;
956 rblocking_r1 = rblocking;
957 }
958
959 // else: zero, nothing to do
960
961 // The span includes our own request.
962 if (rw.num_writes)
963 {
964 wblocking_w1.total_length += rw.wlength;
965 wblocking_w1.count += 1;
966 }
967 if (rw.num_reads)
968 {
969 rblocking_r1.total_length += rw.rlength;
970 wblocking_r1.count += 1;
971 }
972
973 // combine
974 wblocking_w1 += rblocking_w1;
975 wblocking_r1 += rblocking_r1;
976 wblocking += rblocking;
977
978 results.raise_request_span(i, wblocking_w1);
979 results.raise_request_span(i, wblocking_r1);
980 bterm += wblocking;
981 }
982 results[i] = bterm;
983 }
984
985 // This is the initial delay due to priority donation.
986 charge_arrival_blocking(info, results);
987
988 return _results;
989}
990
991
992BlockingBounds* task_fair_mutex_bounds(const ResourceSharingInfo& info,
993 unsigned int procs_per_cluster,
994 int dedicated_irq)
995{
996 // These are structurally equivalent. Therefore, no need to reimplement
997 // everything from scratch.
998 return clustered_omlp_bounds(info, procs_per_cluster, dedicated_irq);
999}
1000
1001
1002BlockingBounds* phase_fair_rw_bounds(const ResourceSharingInfo& info,
1003 unsigned int procs_per_cluster,
1004 int dedicated_irq)
1005{
1006 // These are structurally equivalent. Therefore, no need to reimplement
1007 // everything from scratch.
1008 return clustered_rw_omlp_bounds(info, procs_per_cluster, dedicated_irq);
1009}
1010
1011
1012static Interference bound_blocking_all(
1013 const TaskInfo* tsk,
1014 const ContentionSet& all_reqs, // presumed sorted, for all clusters/tasks
1015 const unsigned int max_remote_requests, // per cluster
1016 const unsigned int max_local_requests, // local cluster
1017 const unsigned int max_requests, // per task
1018 unsigned int max_total) // stop after counting max_total
1019{
1020 unsigned long interval = tsk->get_response();
1021 hashmap<unsigned long, unsigned int> task_counter(512);
1022 hashmap<unsigned long, unsigned int>::iterator tctr;
1023 hashmap<unsigned int, unsigned int> cluster_counter(64);
1024 hashmap<unsigned int, unsigned int>::iterator cctr;
1025 Interference inter;
1026
1027 cluster_counter[tsk->get_cluster()] = max_local_requests;
1028
1029 foreach(all_reqs, it)
1030 {
1031 const RequestBound* req = *it;
1032 const TaskInfo* t = req->get_task();
1033 unsigned long key = (unsigned long) t;
1034 unsigned int cluster = t->get_cluster();
1035
1036 if (!max_total)
1037 // we are done
1038 break;
1039
1040 if (t == tsk)
1041 // doesn't block itself
1042 continue;
1043
1044 // make sure we have seen this task
1045 tctr = task_counter.find(key);
1046 if (tctr == task_counter.end())
1047 {
1048 task_counter[key] = max_requests;
1049 tctr = task_counter.find(key);
1050 }
1051
1052 if (!tctr->second)
1053 continue;
1054
1055 cctr = cluster_counter.find(cluster);
1056 if (cctr == cluster_counter.end())
1057 {
1058 cluster_counter[cluster] = max_remote_requests;
1059 cctr = cluster_counter.find(cluster);
1060 }
1061
1062 if (!cctr->second)
1063 continue;
1064
1065 unsigned int remaining;
1066 remaining = std::min(tctr->second, cctr->second);
1067 remaining = std::min(remaining, max_total);
1068 unsigned int num = std::min(req->get_max_num_requests(interval), remaining);
1069
1070 inter.total_length += num * req->get_request_length();
1071 inter.count += num;
1072 cctr->second -= num;
1073 tctr->second -= num;
1074 max_total -= num;
1075 }
1076
1077 return inter;
1078}
1079
1080
1081static Interference tf_reader_all(
1082 const TaskInfo& tsk,
1083 const Resources& all_reads,
1084 const unsigned int num_writes,
1085 const unsigned int num_wblock,
1086 const unsigned int num_reads,
1087 const unsigned int res_id,
1088 const unsigned int procs_per_cluster)
1089{
1090 Interference blocking;
1091 unsigned int num_reqs = num_reads + num_writes;
1092 unsigned int max_reader_phases = num_wblock + num_writes;
1093 unsigned int task_limit = std::min(max_reader_phases, num_reqs);
1094
1095 return bound_blocking_all(
1096 &tsk, all_reads[res_id],
1097 num_reqs * procs_per_cluster,
1098 num_reqs * (procs_per_cluster - 1),
1099 task_limit,
1100 max_reader_phases);
1101}
1102
1103
1104BlockingBounds* task_fair_rw_bounds(const ResourceSharingInfo& info,
1105 const ResourceSharingInfo& info_mtx,
1106 unsigned int procs_per_cluster,
1107 int dedicated_irq)
1108{
1109 // split everything by partition
1110 Clusters clusters, clusters_mtx;
1111
1112 split_by_cluster(info, clusters);
1113 split_by_cluster(info_mtx, clusters_mtx);
1114
1115 // split each partition by resource
1116 ClusterResources resources, resources_mtx;
1117
1118 split_by_resource(clusters, resources);
1119 split_by_resource(clusters_mtx, resources_mtx);
1120
1121 // split all by resource
1122 Resources all_task_reqs, all_reads, __all_writes;
1123 split_by_resource(info, all_task_reqs);
1124 split_by_type(all_task_reqs, all_reads, __all_writes);
1125
1126 // sort each contention set by request length
1127 sort_by_request_length(resources);
1128 sort_by_request_length(resources_mtx);
1129 sort_by_request_length(all_reads);
1130
1131 // split by type --- sorted order is maintained
1132 ClusterResources __reads, writes;
1133 split_by_type(resources, __reads, writes);
1134
1135
1136 // We need for each task the maximum request span. We also need the
1137 // maximum direct blocking from remote partitions for each request. We
1138 // can determine both in one pass.
1139
1140 unsigned int i;
1141
1142 // direct blocking results
1143 BlockingBounds* _results = new BlockingBounds(info);
1144 BlockingBounds& results = *_results;
1145
1146 for (i = 0; i < info.get_tasks().size(); i++)
1147 {
1148 const TaskInfo& tsk = info.get_tasks()[i];
1149 RWCounts rwcounts;
1150
1151 Interference bterm;
1152
1153 merge_rw_requests(tsk, rwcounts);
1154
1155 foreach(rwcounts, jt)
1156 {
1157 const RWCount& rw = *jt;
1158
1159 // skip placeholders
1160 if (!rw.num_reads && !rw.num_writes)
1161 continue;
1162
1163
1164 // 1) treat it as a mutex as a baseline
1165 Interference mtx, mtx_1;
1166
1167 mtx = np_fifo_per_resource(
1168 tsk, resources_mtx, procs_per_cluster, rw.res_id,
1169 rw.num_reads + rw.num_writes,
1170 dedicated_irq);
1171
1172 if (rw.num_reads + rw.num_writes == 1)
1173 mtx_1 = mtx;
1174 else
1175 mtx_1 = np_fifo_per_resource(
1176 tsk, resources_mtx, procs_per_cluster,
1177 rw.res_id, 1, dedicated_irq);
1178
1179 // The span includes our own request.
1180 mtx_1.total_length += std::max(rw.wlength, rw.rlength);
1181 mtx_1.count += 1;
1182
1183 // 2) apply real RW analysis
1184 Interference wblocking, wblocking_1;
1185 Interference rblocking, rblocking_r1, rblocking_w1;
1186
1187 wblocking = np_fifo_per_resource(
1188 tsk, writes, procs_per_cluster, rw.res_id,
1189 rw.num_reads + rw.num_writes,
1190 dedicated_irq);
1191 wblocking_1 = np_fifo_per_resource(
1192 tsk, writes, procs_per_cluster, rw.res_id, 1,
1193 dedicated_irq);
1194
1195 rblocking = tf_reader_all(
1196 tsk, all_reads, rw.num_writes, wblocking.count,
1197 rw.num_reads, rw.res_id, procs_per_cluster);
1198
1199 if (rw.num_writes)
1200 {
1201 // single write
1202 rblocking_w1 = tf_reader_all(
1203 tsk, all_reads, 1, wblocking.count,
1204 0, rw.res_id, procs_per_cluster);
1205 // The span includes our own request.
1206 rblocking_w1.total_length += rw.wlength;
1207 rblocking_w1.count += 1;
1208 }
1209 if (rw.num_reads)
1210 {
1211 // single read
1212 rblocking_r1 = tf_reader_all(
1213 tsk, all_reads, 0, wblocking.count,
1214 1, rw.res_id, procs_per_cluster);
1215 // The span includes our own request.
1216 rblocking_r1.total_length += rw.rlength;
1217 rblocking_r1.count += 1;
1218 }
1219
1220 // combine
1221 wblocking += rblocking;
1222 wblocking_1 += std::max(rblocking_w1, rblocking_r1);
1223
1224 bterm += std::min(wblocking, mtx);
1225 results.raise_request_span(i, std::min(wblocking_1, mtx_1));
1226 }
1227 results[i] = bterm;
1228 }
1229
1230 // This is the initial delay due to priority donation.
1231 charge_arrival_blocking(info, results);
1232
1233 return _results;
1234}
1235
1236
1237/* this analysis corresponds to the FMLP+ in the dissertation */
1238
1239static void pfmlp_count_direct_blocking(const TaskInfo* tsk,
1240 const ClusterResources& resources,
1241 std::vector<Interference>& counts)
1242{
1243 unsigned int interval = tsk->get_response();
1244
1245
1246 // for each resource requested by tsk
1247 foreach(tsk->get_requests(), jt)
1248 {
1249 const RequestBound& req = *jt;
1250 unsigned long issued = req.get_num_requests();
1251 unsigned int res_id = req.get_resource_id();
1252
1253 unsigned int i;
1254
1255 // for each cluster
1256 for (i = 0; i < resources.size(); i++)
1257 {
1258 // count interference... direct blocking will be counted later
1259 // make sure that cluster acceses res_id at all
1260 if (resources[i].size() > res_id)
1261 // yes it does---how often can it block?
1262 counts[i] += bound_blocking(resources[i][res_id],
1263 interval,
1264 UNLIMITED, // no total limit
1265 issued, // once per request
1266 tsk);
1267 }
1268 }
1269}
1270
1271typedef std::vector<unsigned int> AccessCounts;
1272typedef std::vector<AccessCounts> PerClusterAccessCounts;
1273
1274// How many times does a task issue requests that can
1275// conflict with tasks in a remote cluster. Indexed by cluster id.
1276typedef std::vector<unsigned int> IssuedRequests;
1277// Issued requests for each task. Indexed by task id.
1278typedef std::vector<IssuedRequests> PerTaskIssuedCounts;
1279
1280static void derive_access_counts(const ContentionSet &cluster_contention,
1281 AccessCounts &counts)
1282{
1283 foreach(cluster_contention, it)
1284 {
1285 const RequestBound *req = *it;
1286 unsigned int res_id = req->get_resource_id();
1287
1288 while (counts.size() <= res_id)
1289 counts.push_back(0);
1290
1291 counts[res_id] += req->get_num_requests();
1292 }
1293}
1294
1295static void count_accesses_for_task(const TaskInfo& tsk,
1296 const PerClusterAccessCounts& acc_counts,
1297 IssuedRequests& ireqs)
1298{
1299 foreach(acc_counts, it)
1300 {
1301 const AccessCounts &ac = *it;
1302 unsigned int count = 0;
1303
1304 // Check for each request of the task to see
1305 // if it conflicts with the cluster.
1306 foreach(tsk.get_requests(), jt)
1307 {
1308 const RequestBound &req = *jt;
1309 unsigned int res_id = req.get_resource_id();
1310 if (ac.size() > res_id && ac[res_id] > 0)
1311 {
1312 // cluster acceses res_id as well
1313 count += req.get_num_requests();
1314 }
1315 }
1316 ireqs.push_back(count);
1317 }
1318}
1319
1320static void derive_access_counts(const AllPerCluster &per_cluster,
1321 const ResourceSharingInfo &info,
1322 PerTaskIssuedCounts &issued_reqs)
1323{
1324 PerClusterAccessCounts counts;
1325
1326 /* which resources are accessed by each cluster? */
1327 map_ref(per_cluster, counts, AccessCounts, derive_access_counts);
1328
1329 issued_reqs.reserve(info.get_tasks().size());
1330
1331 foreach(info.get_tasks(), it)
1332 {
1333 issued_reqs.push_back(IssuedRequests());
1334 count_accesses_for_task(*it, counts, issued_reqs.back());
1335 }
1336}
1337
1338static Interference pfmlp_bound_remote_blocking(const TaskInfo* tsk,
1339 const IssuedRequests &icounts,
1340 const std::vector<Interference>& counts,
1341 const ClusterContention& contention)
1342{
1343 unsigned int i;
1344
1345 unsigned long interval = tsk->get_response();
1346 Interference blocking;
1347
1348 // for each cluster
1349 for (i = 0; i < contention.size(); i++)
1350 {
1351 // Each task can either directly or indirectly block tsk
1352 // each time that tsk is directly blocked, but no more than
1353 // once per request issued by tsk.
1354 unsigned int max_per_task = std::min(counts[i].count, icounts[i]);
1355
1356 // skip local cluster and independent clusters
1357 if (i == tsk->get_cluster() || !max_per_task)
1358 continue;
1359
1360 Interference b;
1361
1362 // for each task in cluster
1363 foreach(contention[i], it)
1364 {
1365
1366 // count longest critical sections
1367 b += bound_blocking(*it,
1368 interval,
1369 max_per_task,
1370 UNLIMITED, // no limit per source
1371 tsk);
1372 }
1373
1374 blocking += b;
1375 }
1376 return blocking;
1377}
1378
1379static Interference pfmlp_bound_np_blocking(const TaskInfo* tsk,
1380 const std::vector<Interference>& counts,
1381 const AllPerCluster& per_cluster)
1382{
1383 unsigned int i;
1384
1385 unsigned long interval = tsk->get_response();
1386 Interference blocking;
1387
1388 // for each cluster
1389 for (i = 0; i < per_cluster.size(); i++)
1390 {
1391 // skip local cluster, this is only remote
1392 if (i == tsk->get_cluster())
1393 continue;
1394
1395 // could be the same task each time tsk is directly blocked
1396 unsigned int max_direct = counts[i].count;
1397 Interference b;
1398
1399 // count longest critical sections
1400 b += bound_blocking(per_cluster[i],
1401 interval,
1402 max_direct,
1403 max_direct,
1404 tsk);
1405 blocking += b;
1406 }
1407 return blocking;
1408}
1409
1410static Interference pfmlp_bound_local_blocking(const TaskInfo* tsk,
1411 const std::vector<Interference>& counts,
1412 const ClusterContention& contention)
1413{
1414 // Locally, we have to account two things.
1415 // 1) Direct blocking from lower-priority tasks.
1416 // 2) Boost blocking from lower-priority tasks.
1417 // (Higher-priority requests are not counted as blocking.)
1418 // Since lower-priority jobs are boosted while
1419 // they directly block, 1) is subsumed by 2).
1420 // Lower-priority tasks cannot issue requests while a higher-priority
1421 // job executes. Therefore, at most one blocking request
1422 // is issued prior to the release of the job under analysis,
1423 // and one prior to each time that the job under analysis resumes.
1424
1425 Interference blocking;
1426 Interference num_db = std::accumulate(counts.begin(), counts.end(),
1427 Interference());
1428 unsigned int num_arrivals = std::min(tsk->get_num_arrivals(),
1429 num_db.count + 1);
1430 unsigned long interval = tsk->get_response();
1431
1432 const TaskContention& cont = contention[tsk->get_cluster()];
1433
1434 // for each task in cluster
1435 foreach(cont, it)
1436 {
1437 // count longest critical sections
1438 blocking += bound_blocking(*it,
1439 interval,
1440 num_arrivals,
1441 UNLIMITED, // no limit per source
1442 tsk,
1443 tsk->get_priority());
1444 }
1445
1446 return blocking;
1447}
1448
1449BlockingBounds* part_fmlp_bounds(const ResourceSharingInfo& info, bool preemptive)
1450{
1451 // split everything by partition
1452 Clusters clusters;
1453
1454 split_by_cluster(info, clusters);
1455
1456 // split each partition by resource
1457 ClusterResources resources;
1458 split_by_resource(clusters, resources);
1459
1460 // find interference on a per-task basis
1461 ClusterContention contention;
1462 derive_task_contention(clusters, contention);
1463
1464 // sort each contention set by request length
1465 sort_by_request_length(contention);
1466
1467 // find total interference on a per-cluster basis
1468 AllPerCluster per_cluster;
1469 PerTaskIssuedCounts access_counts;
1470
1471 all_per_cluster(clusters, per_cluster);
1472 sort_by_request_length(per_cluster);
1473
1474 derive_access_counts(per_cluster, info, access_counts);
1475
1476 // We need to find two blocking sources. Direct blocking (i.e., jobs
1477 // that are enqueued prior to the job under analysis) and boost
1478 // blocking, which occurs when the job under analysis is delayed
1479 // because some other job is priority-boosted. Boost blocking can be
1480 // local and transitive from remote CPUs. To compute this correctly,
1481 // we need to count how many times some job on a remote CPU can directly
1482 // block the job under analysis. So we first compute direct blocking
1483 // and count on which CPUs a job can be blocked.
1484
1485 unsigned int i;
1486
1487 // direct blocking results
1488 BlockingBounds* _results = new BlockingBounds(info);
1489 BlockingBounds& results = *_results;
1490
1491 for (i = 0; i < info.get_tasks().size(); i++)
1492 {
1493 const TaskInfo& tsk = info.get_tasks()[i];
1494 std::vector<Interference> counts(resources.size());
1495 Interference remote, local;
1496
1497 // Determine counts.
1498 pfmlp_count_direct_blocking(&tsk, resources, counts);
1499
1500 // Find longest remote requests.
1501 remote = pfmlp_bound_remote_blocking(&tsk, access_counts[i], counts,
1502 contention);
1503
1504 // Add in local boost blocking.
1505 local = pfmlp_bound_local_blocking(&tsk, counts, contention);
1506
1507 if (!preemptive)
1508 {
1509 // Charge for additional delays due to remot non-preemptive
1510 // sections.
1511 remote += pfmlp_bound_np_blocking(&tsk, counts, per_cluster);
1512 }
1513 results[i] = remote + local;
1514 results.set_remote_blocking(i, remote);
1515 results.set_local_blocking(i, local);
1516 }
1517
1518 return _results;
1519}
1520
1521// *************************** MPCP ******************************************
1522
1523
1524typedef std::vector<unsigned int> PriorityCeilings;
1525
1526static void determine_priority_ceilings(const Resources& resources,
1527 PriorityCeilings& ceilings)
1528{
1529 ceilings.reserve(resources.size());
1530
1531 foreach(resources, it)
1532 {
1533 unsigned int ceiling = UINT_MAX;
1534 const ContentionSet& cs = *it;
1535
1536 foreach(cs, jt)
1537 {
1538 const RequestBound* req = *jt;
1539 ceiling = std::min(ceiling, req->get_task()->get_priority());
1540 }
1541
1542 ceilings.push_back(ceiling);
1543 }
1544}
1545
1546typedef std::vector<unsigned long> ResponseTimes;
1547typedef std::vector<ResponseTimes> TaskResponseTimes;
1548typedef std::vector<TaskResponseTimes> ClusterResponseTimes;
1549
1550static unsigned long get_max_gcs_length(const TaskInfo* tsk,
1551 const PriorityCeilings& ceilings,
1552 unsigned int preempted_ceiling)
1553{
1554 unsigned long gcs_length = 0;
1555
1556 foreach(tsk->get_requests(), it)
1557 {
1558 unsigned int prio = ceilings[it->get_resource_id()];
1559 if (prio < preempted_ceiling)
1560 gcs_length = std::max(gcs_length,
1561 (unsigned long) it->get_request_length());
1562 }
1563
1564 return gcs_length;
1565}
1566
1567static void determine_gcs_response_times(const TaskInfo* tsk,
1568 const Cluster& cluster,
1569 const PriorityCeilings& ceilings,
1570 ResponseTimes& times)
1571{
1572 times.reserve(tsk->get_requests().size());
1573
1574 foreach(tsk->get_requests(), it)
1575 {
1576 unsigned long resp = it->get_request_length();
1577 unsigned int prio = ceilings[it->get_resource_id()];
1578
1579 // Equation (2) in LNR:09.
1580 // One request of each local gcs that can preempt our ceiling,
1581 // but at most one per task (since tasks are sequential).
1582
1583 foreach(cluster, jt)
1584 {
1585 const TaskInfo* t = *jt;
1586
1587 if (t != tsk)
1588 resp += get_max_gcs_length(t, ceilings, prio);
1589 }
1590
1591 times.push_back(resp);
1592 }
1593}
1594
1595static void determine_gcs_response_times(const Cluster& cluster,
1596 const PriorityCeilings& ceilings,
1597 TaskResponseTimes& times)
1598{
1599 times.reserve(cluster.size());
1600 foreach(cluster, it)
1601 {
1602 times.push_back(ResponseTimes());
1603 determine_gcs_response_times(*it, cluster, ceilings,
1604 times.back());
1605 }
1606}
1607
1608static void determine_gcs_response_times(const Clusters& clusters,
1609 const PriorityCeilings& ceilings,
1610 ClusterResponseTimes& times)
1611{
1612 times.reserve(clusters.size());
1613 foreach(clusters, it)
1614 {
1615 times.push_back(TaskResponseTimes());
1616 determine_gcs_response_times(*it, ceilings, times.back());
1617 }
1618}
1619
1620static unsigned long response_time_for(unsigned int res_id,
1621 unsigned long interval,
1622 const TaskInfo* tsk,
1623 const ResponseTimes& resp,
1624 bool multiple)
1625{
1626 const Requests& requests = tsk->get_requests();
1627 unsigned int i = 0;
1628
1629 for (i = 0; i < requests.size(); i++)
1630 if (requests[i].get_resource_id() == res_id)
1631 {
1632 if (multiple)
1633 {
1634 // Equation (3) in LNR:09.
1635 // How many jobs?
1636 unsigned long num_jobs;
1637 num_jobs = divide_with_ceil(interval, tsk->get_period());
1638 num_jobs += 1;
1639
1640 // Note: this may represent multiple gcs, so multiply.
1641 return num_jobs * resp[i] * requests[i].get_num_requests();
1642 }
1643 else
1644 // Just one request.
1645 return resp[i];
1646 }
1647 // if we get here, then the task does not access res_id
1648 return 0;
1649}
1650
1651static unsigned long mpcp_remote_blocking(unsigned int res_id,
1652 unsigned long interval,
1653 const TaskInfo* tsk,
1654 const Cluster& cluster,
1655 const TaskResponseTimes times,
1656 unsigned long& max_lower)
1657{
1658 unsigned int i;
1659 unsigned long blocking = 0;
1660
1661 // consider each task in cluster
1662 for (i = 0; i < cluster.size(); i++)
1663 {
1664 const TaskInfo* t = cluster[i];
1665 if (t != tsk)
1666 {
1667 if (t->get_priority() < tsk->get_priority())
1668 // This is a higher-priority task;
1669 // it can block multiple times.
1670 blocking += response_time_for(res_id, interval,
1671 t, times[i], true);
1672 else
1673 // This is a lower-priority task;
1674 // it can block only once.
1675 max_lower = std::max(max_lower,
1676 response_time_for(res_id, interval,
1677 t, times[i], false));
1678 }
1679 }
1680
1681 return blocking;
1682}
1683
1684static unsigned long mpcp_remote_blocking(unsigned int res_id,
1685 unsigned long interval,
1686 const TaskInfo* tsk,
1687 const Clusters& clusters,
1688 const ClusterResponseTimes times,
1689 unsigned long& max_lower)
1690{
1691 unsigned int i;
1692 unsigned long blocking;
1693
1694 max_lower = 0;
1695 blocking = 0;
1696
1697 for (i = 0; i < clusters.size(); i++)
1698 {
1699 blocking += mpcp_remote_blocking(res_id, interval,
1700 tsk, clusters[i], times[i],
1701 max_lower);
1702 }
1703 return blocking;
1704}
1705
1706static unsigned long mpcp_remote_blocking(unsigned int res_id,
1707 const TaskInfo* tsk,
1708 const Clusters& clusters,
1709 const ClusterResponseTimes times)
1710{
1711 unsigned long interval;
1712 unsigned long blocking = 1;
1713 unsigned long max_lower;
1714
1715 do
1716 {
1717 // last bound
1718 interval = blocking;
1719 // Bail out if it doesn't converge.
1720 if (interval > tsk->get_response())
1721 return UNLIMITED;
1722
1723 blocking = mpcp_remote_blocking(res_id, interval,
1724 tsk, clusters, times,
1725 max_lower);
1726
1727 // Account for the maximum lower-priority gcs
1728 // that could get in the way.
1729 blocking += max_lower;
1730
1731 // Loop until it converges.
1732 } while ( interval != blocking );
1733
1734 return blocking;
1735}
1736
1737static unsigned long mpcp_remote_blocking(const TaskInfo* tsk,
1738 const Clusters& clusters,
1739 const ClusterResponseTimes times)
1740{
1741 unsigned long blocking = 0;
1742
1743
1744 const Requests& requests = tsk->get_requests();
1745 unsigned int i = 0;
1746
1747 for (i = 0; i < requests.size(); i++)
1748 {
1749 unsigned int b;
1750 b = mpcp_remote_blocking(requests[i].get_resource_id(),
1751 tsk, clusters, times);
1752 if (b != UNLIMITED)
1753 // may represent multiple, multiply accordingly
1754 blocking += b * requests[i].get_num_requests();
1755 else
1756 // bail out if it didn't converge
1757 return b;
1758 }
1759
1760 return blocking;
1761}
1762
1763static unsigned long mpcp_arrival_blocking(const TaskInfo* tsk,
1764 const Cluster& cluster,
1765 bool virtual_spinning)
1766{
1767 unsigned int prio = tsk->get_priority();
1768 unsigned int blocking = 0;
1769 unsigned int i;
1770
1771 for (i = 0; i < cluster.size(); i++)
1772 if (cluster[i] != tsk && cluster[i]->get_priority() >= prio)
1773 blocking += cluster[i]->get_max_request_length();
1774
1775 if (virtual_spinning)
1776 // Equation (4) in LNR:09.
1777 return blocking;
1778 else
1779 // Equation (1) in LNR:09.
1780 return blocking * tsk->get_num_arrivals();
1781}
1782
1783BlockingBounds* mpcp_bounds(const ResourceSharingInfo& info,
1784 bool use_virtual_spinning)
1785{
1786 Resources resources;
1787 Clusters clusters;
1788
1789 split_by_resource(info, resources);
1790 split_by_cluster(info, clusters);
1791
1792 // 2) Determine priority ceiling for each request.
1793 PriorityCeilings gc;
1794 determine_priority_ceilings(resources, gc);
1795
1796
1797 // 3) For each request, determine response time. This only depends on the
1798 // priority ceiling for each request.
1799 ClusterResponseTimes responses;
1800 determine_gcs_response_times(clusters, gc, responses);
1801
1802 unsigned int i;
1803
1804 BlockingBounds* _results = new BlockingBounds(info);
1805 BlockingBounds& results = *_results;
1806
1807 for (i = 0; i < info.get_tasks().size(); i++)
1808 {
1809 const TaskInfo& tsk = info.get_tasks()[i];
1810
1811 unsigned long remote, local = 0;
1812
1813 // 4) Determine remote blocking for each request. This depends on the
1814 // response times for each remote request.
1815 remote = mpcp_remote_blocking(&tsk, clusters, responses);
1816
1817 // 5) Determine arrival blocking for each task.
1818 if (remote != UNLIMITED)
1819 local = mpcp_arrival_blocking(&tsk, clusters[tsk.get_cluster()],
1820 use_virtual_spinning);
1821
1822 // 6) Sum up blocking: remote blocking + arrival blocking.
1823 results[i].total_length = remote + local;
1824
1825
1826 Interference inf;
1827 inf.total_length = remote;
1828 results.set_remote_blocking(i, inf);
1829 }
1830
1831 return _results;
1832}
1833
1834
1835// ************************************************** DPCP **************
1836/*
1837
1838 DPCP blocking terms (Rajkumar, 1991, page 87):
1839
1840 1) Local PCP blocking => does not apply here, we only care about global
1841 resources.
1842
1843 2) A lower-priority gcs on a remote proc each time that Ji issues a request.
1844
1845 3) All requests of all higher-priority tasks on all remote processors that Ji
1846 accesses.
1847
1848 4) Global critical sections on Ji's CPU. Since gcs are not part of the job
1849 execution time in our model, it does not matter whether the local gcs's
1850 belong to lower or higher-priority tasks.
1851 */
1852
1853
1854static void split_by_locality(const ResourceSharingInfo& info,
1855 const ResourceLocality& locality,
1856 AllPerCluster& per_cluster)
1857{
1858 foreach(info.get_tasks(), it)
1859 {
1860 while (it->get_cluster() >= per_cluster.size())
1861 per_cluster.push_back(ContentionSet());
1862
1863 foreach(it->get_requests(), jt)
1864 {
1865 const RequestBound &req = *jt;
1866 int cpu = locality[req.get_resource_id()];
1867
1868 if (cpu == NO_CPU)
1869 // NO_CPU => dedicated synchronization processor
1870 continue;
1871
1872 while ((unsigned int) cpu >= per_cluster.size())
1873 per_cluster.push_back(ContentionSet());
1874
1875 per_cluster[cpu].push_back(&req);
1876 }
1877 }
1878}
1879
1880static unsigned int count_requests_to_cpu(
1881 const TaskInfo& tsk,
1882 const ResourceLocality& locality,
1883 int cpu)
1884{
1885 unsigned int count = 0;
1886
1887 foreach(tsk.get_requests(), req)
1888 if (locality[req->get_resource_id()] == cpu)
1889 count += req->get_num_requests();
1890
1891 return count;
1892}
1893
1894static Interference bound_blocking_dpcp(
1895 const TaskInfo* tsk,
1896 const ContentionSet& cont,
1897 unsigned int max_lower_prio)
1898{
1899 Interference inter;
1900 const unsigned int interval = tsk->get_response();
1901
1902 // assumption: cont is ordered by request length
1903 foreach(cont, it)
1904 {
1905 const RequestBound* req = *it;
1906
1907 // can't block itself
1908 if (req->get_task() != tsk)
1909 {
1910 unsigned int num;
1911 if (req->get_task()->get_priority() < tsk->get_priority())
1912 {
1913 // higher prio => all of them
1914 num = req->get_max_num_requests(interval);
1915 inter.count += num;
1916 inter.total_length += num * req->get_request_length();
1917 }
1918 else if (max_lower_prio)
1919 {
1920 // lower prio => only remaining
1921 num = std::min(req->get_max_num_requests(interval), max_lower_prio);
1922 inter.count += num;
1923 inter.total_length += num * req->get_request_length();
1924 max_lower_prio -= num;
1925 }
1926 }
1927 }
1928
1929 return inter;
1930}
1931
1932static Interference dpcp_remote_bound(
1933 const TaskInfo& tsk,
1934 const ResourceLocality& locality,
1935 const AllPerCluster& per_cpu)
1936{
1937 Interference blocking;
1938 unsigned int cpu = 0;
1939
1940 foreach(per_cpu, it)
1941 {
1942 // this is about remote delays
1943 if (cpu != tsk.get_cluster())
1944 {
1945 const ContentionSet &cs = *it;
1946 unsigned int reqs;
1947 reqs = count_requests_to_cpu(tsk, locality, cpu);
1948
1949 if (reqs > 0)
1950 blocking += bound_blocking_dpcp(&tsk, cs, reqs);
1951 }
1952 cpu++;
1953 }
1954
1955 return blocking;
1956}
1957
1958
1959static Interference dpcp_local_bound(
1960 const TaskInfo* tsk,
1961 const ContentionSet& local)
1962{
1963 Interference blocking;
1964 const unsigned int interval = tsk->get_response();
1965
1966 foreach(local, it)
1967 {
1968 const RequestBound* req = *it;
1969 if (req->get_task() != tsk)
1970 {
1971 unsigned int num;
1972 num = req->get_max_num_requests(interval);
1973 blocking.count += num;
1974 blocking.total_length += num * req->get_request_length();
1975 }
1976 }
1977
1978 return blocking;
1979}
1980
1981
1982BlockingBounds* dpcp_bounds(const ResourceSharingInfo& info,
1983 const ResourceLocality& locality)
1984{
1985 AllPerCluster per_cpu;
1986
1987 split_by_locality(info, locality, per_cpu);
1988 sort_by_request_length(per_cpu);
1989
1990 BlockingBounds* _results = new BlockingBounds(info);
1991 BlockingBounds& results = *_results;
1992
1993 for (unsigned int i = 0; i < info.get_tasks().size(); i++)
1994 {
1995 const TaskInfo& tsk = info.get_tasks()[i];
1996 Interference remote, local;
1997
1998 remote = dpcp_remote_bound(tsk, locality, per_cpu);
1999 local = dpcp_local_bound(&tsk, per_cpu[tsk.get_cluster()]);
2000
2001 results[i] = remote + local;
2002 results.set_remote_blocking(i, remote);
2003 results.set_local_blocking(i, local);
2004 }
2005 return _results;
2006}
2007
diff --git a/native/src/tasks.cpp b/native/src/tasks.cpp
new file mode 100644
index 0000000..9875b65
--- /dev/null
+++ b/native/src/tasks.cpp
@@ -0,0 +1,232 @@
1#include <algorithm> // for max
2#include <string.h>
3
4#include <vector>
5
6#include <iostream>
7
8#include "tasks.h"
9#include "task_io.h"
10
11void Task::init(unsigned long wcet,
12 unsigned long period,
13 unsigned long deadline)
14{
15 this->wcet = wcet;
16 this->period = period;
17 if (!deadline)
18 this->deadline = period; // implicit
19 else
20 this->deadline = deadline;
21}
22
23bool Task::has_implicit_deadline() const
24{
25 return deadline == period;
26}
27
28bool Task::has_constrained_deadline() const
29{
30 return deadline <= period;
31}
32
33bool Task::is_feasible() const
34{
35 return get_deadline() >= get_wcet()
36 && get_period() >= get_wcet()
37 && get_wcet() > 0;
38}
39
40void Task::get_utilization(mpq_class &util) const
41{
42 // assumes period != 0
43 util = get_wcet();
44 util /= get_period();
45}
46
47void Task::get_density(mpq_class &density) const
48{
49 // assumes deadline != 0
50 density = get_wcet();
51 density /= get_deadline();
52}
53
54std::ostream& operator<<(std::ostream &os, const Task &t)
55{
56 os << "Task(" << t.get_wcet() << ", " << t.get_period();
57 if (!t.has_implicit_deadline())
58 os << ", " << t.get_deadline();
59 os << ")";
60 return os;
61}
62
63TaskSet::TaskSet()
64{
65}
66
67TaskSet::TaskSet(const TaskSet &original) : tasks(original.tasks)
68{
69}
70
71TaskSet::~TaskSet()
72{
73}
74
75#define FORALL(i, pred) \
76 for (unsigned int i = 0; i < tasks.size(); i++) \
77 { \
78 if (!pred) \
79 return false; \
80 } \
81 return true; \
82
83bool TaskSet::has_only_implicit_deadlines() const
84{
85 FORALL(i, tasks[i].has_implicit_deadline());
86}
87
88bool TaskSet::has_only_constrained_deadlines() const
89{
90 FORALL(i, tasks[i].has_constrained_deadline());
91}
92
93bool TaskSet::has_only_feasible_tasks() const
94{
95 FORALL(i, tasks[i].is_feasible());
96}
97
98void TaskSet::get_utilization(mpq_class &util) const
99{
100 mpq_class tmp;
101 util = 0;
102 for (unsigned int i = 0; i < tasks.size(); i++)
103 {
104 tasks[i].get_utilization(tmp);
105 util += tmp;
106 }
107}
108
109void TaskSet::get_density(mpq_class &density) const
110{
111 mpq_class tmp;
112 density = 0;
113 for (unsigned int i = 0; i < tasks.size(); i++)
114 {
115 tasks[i].get_density(tmp);
116 density += tmp;
117 }
118}
119
120void TaskSet::get_max_density(mpq_class &max_density) const
121{
122 mpq_class tmp;
123 max_density = 0;
124
125 for (unsigned int i = 0; i < tasks.size(); i++)
126 {
127 tasks[i].get_density(tmp);
128 max_density = std::max(max_density, tmp);
129 }
130}
131
132bool TaskSet::is_not_overutilized(unsigned int num_processors) const
133{
134 mpq_class util;
135 get_utilization(util);
136 return util <= num_processors;
137}
138
139// Lemma 7 in FBB:06.
140unsigned long TaskSet::k_for_epsilon(unsigned int idx,
141 const mpq_class &epsilon) const
142{
143 mpq_class bound;
144 mpq_class dp_ratio(tasks[idx].get_deadline(),
145 tasks[idx].get_period());
146
147 tasks[idx].get_utilization(bound);
148 bound *= tasks.size();
149 bound /= epsilon;
150 bound -= dp_ratio;
151
152 return (unsigned long) ceil(std::max(0.0, bound.get_d()));
153}
154
155void TaskSet::approx_load(mpq_class &load, const mpq_class &epsilon) const
156{
157 mpq_class density;
158
159 get_density(density);
160 get_utilization(load);
161
162 if (density > load)
163 {
164 // ok, actually have to do the work;
165 load += epsilon;
166
167 std::vector<unsigned long> k;
168 k.reserve(tasks.size());
169
170 unsigned long total_times = tasks.size();
171
172 for (unsigned int i = 0; i < tasks.size(); i++)
173 {
174 k[i] = k_for_epsilon(i, epsilon);
175 total_times += k[i];
176 }
177
178 std::cout << "total_times = " << total_times << std::endl;
179
180 std::vector<mpz_class> times;
181 times.reserve(total_times);
182
183 // determine all test points
184 for (unsigned int i = 0; i < tasks.size(); i++)
185 {
186 mpz_class time = tasks[i].get_deadline();
187
188 for (unsigned long j = 0; j <= k[i]; j++)
189 {
190 times.push_back(time);
191 time += tasks[i].get_period();
192 }
193 }
194
195 // sort times
196 std::sort(times.begin(), times.end());
197
198 // iterate through test points
199 mpz_class last = 0;
200
201 for (unsigned int t = 0; t < total_times; t++)
202 {
203 // avoid redundant check
204 if (times[t] > last)
205 {
206 mpq_class load_at_point = 0;
207 mpq_class tmp;
208
209 // compute approximate load at point
210 for (unsigned int i = 0; i < tasks.size(); i++)
211 {
212 tasks[i].approx_load(times[t], tmp, k[i]);
213 load_at_point += tmp;
214 }
215
216 // check if we have a new maximum
217
218 if (load_at_point > density)
219 {
220 // reached threshold, can stop iteration
221 load = density;
222 return;
223 }
224 else if (load_at_point > load)
225 load = load_at_point;
226
227 last = times[t];
228 }
229 }
230
231 }
232}
diff --git a/native/src/testmain.cpp b/native/src/testmain.cpp
new file mode 100644
index 0000000..eeeb96b
--- /dev/null
+++ b/native/src/testmain.cpp
@@ -0,0 +1,912 @@
1#include <iostream>
2
3#include "tasks.h"
4#include "task_io.h"
5#include "schedulability.h"
6
7#include "sharedres.h"
8#include "res_io.h"
9
10#include "edf/baker.h"
11#include "edf/baruah.h"
12#include "edf/gfb.h"
13#include "edf/bcl.h"
14#include "edf/bcl_iterative.h"
15#include "edf/gedf.h"
16#include "edf/sim.h"
17
18#include "event.h"
19#include "schedule_sim.h"
20
21#include "math-helper.h"
22
23using namespace std;
24
25void test_baker()
26{
27 TaskSet ts = TaskSet();
28
29 ts.add_task(49, 100);
30 ts.add_task(49, 100);
31 ts.add_task(2, 100, 50);
32
33 BakerGedf t = BakerGedf(2);
34 cout << "Baker schedulable? : " << t.is_schedulable(ts) << endl;
35
36 GFBGedf gfb = GFBGedf(2);
37 cout << "GFB schedulable? : " << gfb.is_schedulable(ts) << endl;
38
39 cout << "BCL schedulable? : " << BCLGedf(2).is_schedulable(ts) << endl;
40
41 cout << "Baruah schedulable? : " << BaruahGedf(2).is_schedulable(ts)
42 << endl;
43 cout << "BCL Iter. sched.? : " << BCLIterativeGedf(2).is_schedulable(ts)
44 << endl;
45
46 cout << "G-EDF schedulable? : " << GlobalEDF(2).is_schedulable(ts) << endl;
47}
48
49TaskSet* init_baruah()
50{
51 TaskSet* rts = new TaskSet();
52 TaskSet& ts = *rts;
53
54 ts.add_task(544, 89000);
55 ts.add_task(7038, 96000);
56 ts.add_task(8213, 91000);
57 ts.add_task(2937, 39000);
58 ts.add_task(3674, 51000);
59 ts.add_task(3758, 97000);
60 ts.add_task(91, 31000);
61 ts.add_task(4960, 55000);
62 ts.add_task(3888, 89000);
63 ts.add_task(1187, 32000);
64 ts.add_task(2393, 44000);
65 ts.add_task(1513, 17000);
66 ts.add_task(2264, 38000);
67 ts.add_task(6660, 84000);
68 ts.add_task(1183, 96000);
69 ts.add_task(4810, 95000);
70 ts.add_task(1641, 20000);
71 ts.add_task(3968, 71000);
72 ts.add_task(280, 82000);
73 ts.add_task(4259, 51000);
74 ts.add_task(1981, 70000);
75 ts.add_task(393, 34000);
76 ts.add_task(3882, 93000);
77 ts.add_task(5921, 68000);
78 ts.add_task(901, 21000);
79 ts.add_task(2166, 40000);
80 ts.add_task(1532, 17000);
81 ts.add_task(1159, 36000);
82 ts.add_task(2170, 89000);
83 ts.add_task(8770, 91000);
84 ts.add_task(1643, 48000);
85 ts.add_task(110, 69000);
86 ts.add_task(1300, 84000);
87 ts.add_task(1488, 20000);
88 ts.add_task(2031, 21000);
89 ts.add_task(7139, 95000);
90 ts.add_task(3905, 63000);
91 ts.add_task(8126, 82000);
92 ts.add_task(6309, 82000);
93 ts.add_task(7386, 80000);
94 ts.add_task(5044, 83000);
95 ts.add_task(425, 77000);
96 ts.add_task(1439, 38000);
97 ts.add_task(6332, 74000);
98 ts.add_task(1237, 62000);
99 ts.add_task(2547, 32000);
100 ts.add_task(1196, 12000);
101 ts.add_task(9996, 100000);
102 ts.add_task(2730, 31000);
103 ts.add_task(773, 48000);
104 ts.add_task(3894, 59000);
105 ts.add_task(1234, 39000);
106 ts.add_task(1585, 34000);
107 ts.add_task(1905, 67000);
108 ts.add_task(3440, 62000);
109 ts.add_task(678, 24000);
110 ts.add_task(7211, 97000);
111 ts.add_task(1453, 60000);
112 ts.add_task(6560, 84000);
113 ts.add_task(122, 73000);
114 ts.add_task(382, 42000);
115 ts.add_task(2906, 46000);
116 ts.add_task(880, 11000);
117 ts.add_task(2704, 29000);
118 ts.add_task(2387, 36000);
119 ts.add_task(3111, 46000);
120 ts.add_task(4654, 78000);
121 ts.add_task(808, 81000);
122 ts.add_task(1485, 18000);
123 ts.add_task(1865, 73000);
124 ts.add_task(2956, 64000);
125 ts.add_task(1058, 51000);
126 ts.add_task(1773, 86000);
127 ts.add_task(2610, 54000);
128 ts.add_task(6795, 86000);
129 ts.add_task(8381, 84000);
130 ts.add_task(5631, 85000);
131 ts.add_task(1567, 69000);
132 ts.add_task(303, 24000);
133 ts.add_task(2889, 44000);
134 ts.add_task(4201, 67000);
135 ts.add_task(2771, 85000);
136 ts.add_task(1287, 71000);
137 ts.add_task(4572, 67000);
138 ts.add_task(4277, 54000);
139 ts.add_task(3114, 82000);
140 ts.add_task(4527, 49000);
141 ts.add_task(2336, 60000);
142 ts.add_task(8131, 85000);
143 ts.add_task(2680, 27000);
144 ts.add_task(2598, 34000);
145 ts.add_task(888, 58000);
146 ts.add_task(1051, 14000);
147 ts.add_task(1216, 27000);
148 ts.add_task(2768, 40000);
149 ts.add_task(875, 65000);
150 ts.add_task(3762, 49000);
151 ts.add_task(5294, 56000);
152 ts.add_task(6273, 97000);
153 ts.add_task(7594, 91000);
154 ts.add_task(2948, 83000);
155 ts.add_task(1315, 16000);
156 ts.add_task(4982, 79000);
157 ts.add_task(127, 10000);
158 ts.add_task(372, 11000);
159 ts.add_task(4487, 59000);
160 ts.add_task(1388, 46000);
161 ts.add_task(3443, 40000);
162 ts.add_task(221, 32000);
163 ts.add_task(1121, 12000);
164 ts.add_task(872, 32000);
165 ts.add_task(1540, 43000);
166 ts.add_task(2794, 43000);
167 ts.add_task(4840, 68000);
168 ts.add_task(40, 21000);
169 ts.add_task(710, 44000);
170 ts.add_task(253, 37000);
171 ts.add_task(611, 14000);
172 ts.add_task(2646, 48000);
173 ts.add_task(3239, 64000);
174 ts.add_task(413, 22000);
175 ts.add_task(1451, 35000);
176 ts.add_task(444, 29000);
177 ts.add_task(119, 38000);
178 ts.add_task(873, 15000);
179 ts.add_task(688, 20000);
180 ts.add_task(5667, 88000);
181 ts.add_task(1226, 34000);
182 ts.add_task(1743, 25000);
183 ts.add_task(1732, 25000);
184 ts.add_task(359, 77000);
185 ts.add_task(8101, 86000);
186 ts.add_task(1909, 44000);
187 ts.add_task(2326, 33000);
188 ts.add_task(148, 70000);
189 ts.add_task(764, 26000);
190 ts.add_task(1951, 26000);
191 ts.add_task(430, 33000);
192 ts.add_task(430, 24000);
193 ts.add_task(3216, 69000);
194 ts.add_task(6476, 71000);
195 ts.add_task(1728, 88000);
196 ts.add_task(517, 92000);
197 ts.add_task(6755, 69000);
198 ts.add_task(737, 17000);
199 ts.add_task(1480, 68000);
200 ts.add_task(2392, 53000);
201 ts.add_task(795, 12000);
202 ts.add_task(1676, 31000);
203 ts.add_task(4412, 80000);
204 ts.add_task(2937, 53000);
205 ts.add_task(2129, 76000);
206 ts.add_task(1413, 34000);
207 ts.add_task(214, 10000);
208 ts.add_task(1844, 50000);
209 ts.add_task(2612, 31000);
210 ts.add_task(4326, 65000);
211 ts.add_task(7053, 98000);
212 ts.add_task(2952, 83000);
213 ts.add_task(507, 68000);
214 ts.add_task(1112, 51000);
215 ts.add_task(110, 89000);
216 ts.add_task(1468, 17000);
217 ts.add_task(7788, 83000);
218 ts.add_task(688, 16000);
219 ts.add_task(2195, 48000);
220 ts.add_task(1636, 61000);
221 ts.add_task(530, 19000);
222 ts.add_task(3543, 45000);
223 ts.add_task(2023, 24000);
224 ts.add_task(3818, 55000);
225 ts.add_task(2032, 65000);
226 ts.add_task(1790, 63000);
227 ts.add_task(69, 12000);
228 ts.add_task(1569, 90000);
229 ts.add_task(8860, 98000);
230 ts.add_task(2330, 64000);
231 ts.add_task(971, 35000);
232 ts.add_task(2168, 87000);
233 ts.add_task(2309, 56000);
234 ts.add_task(752, 14000);
235 ts.add_task(4573, 81000);
236 ts.add_task(1015, 99000);
237 ts.add_task(4131, 60000);
238 ts.add_task(1324, 50000);
239 ts.add_task(2354, 68000);
240 ts.add_task(4137, 86000);
241 ts.add_task(2671, 36000);
242 ts.add_task(3642, 50000);
243 ts.add_task(3017, 33000);
244 ts.add_task(567, 15000);
245 ts.add_task(3310, 45000);
246 ts.add_task(1727, 23000);
247 ts.add_task(9067, 100000);
248 ts.add_task(324, 11000);
249 ts.add_task(2299, 62000);
250 ts.add_task(645, 28000);
251 ts.add_task(7903, 91000);
252 ts.add_task(843, 22000);
253 ts.add_task(5727, 80000);
254 ts.add_task(5308, 75000);
255 ts.add_task(574, 11000);
256 ts.add_task(497, 30000);
257 ts.add_task(7536, 91000);
258 ts.add_task(540, 92000);
259 ts.add_task(233, 12000);
260 ts.add_task(2253, 29000);
261 ts.add_task(1298, 84000);
262 ts.add_task(1516, 84000);
263 ts.add_task(2292, 57000);
264 ts.add_task(2216, 25000);
265 ts.add_task(2496, 43000);
266 ts.add_task(4050, 47000);
267 ts.add_task(480, 17000);
268 ts.add_task(941, 27000);
269 ts.add_task(9024, 91000);
270 ts.add_task(1318, 29000);
271 ts.add_task(2862, 56000);
272 ts.add_task(3194, 61000);
273 ts.add_task(614, 15000);
274 ts.add_task(3039, 92000);
275 ts.add_task(4494, 58000);
276 ts.add_task(814, 11000);
277 ts.add_task(9271, 97000);
278 ts.add_task(569, 62000);
279 ts.add_task(3625, 84000);
280 ts.add_task(2095, 23000);
281 ts.add_task(3789, 95000);
282 ts.add_task(4866, 78000);
283 ts.add_task(3109, 96000);
284 ts.add_task(2659, 42000);
285 ts.add_task(1427, 44000);
286 ts.add_task(3311, 55000);
287 ts.add_task(651, 26000);
288 ts.add_task(1254, 52000);
289 ts.add_task(3250, 91000);
290 ts.add_task(2073, 92000);
291 ts.add_task(6143, 90000);
292 ts.add_task(7444, 85000);
293 ts.add_task(7359, 87000);
294 ts.add_task(350, 51000);
295 ts.add_task(5597, 70000);
296 ts.add_task(5278, 77000);
297 ts.add_task(3116, 72000);
298 ts.add_task(4043, 51000);
299 ts.add_task(4912, 59000);
300 ts.add_task(8909, 90000);
301 ts.add_task(755, 48000);
302 ts.add_task(348, 10000);
303 ts.add_task(3065, 88000);
304 ts.add_task(4136, 49000);
305 ts.add_task(8198, 82000);
306 ts.add_task(4925, 91000);
307 ts.add_task(779, 10000);
308 ts.add_task(1134, 12000);
309 ts.add_task(3999, 46000);
310 ts.add_task(1687, 38000);
311 ts.add_task(565, 22000);
312 ts.add_task(1553, 56000);
313 ts.add_task(8208, 89000);
314 ts.add_task(2237, 31000);
315 ts.add_task(6885, 90000);
316 ts.add_task(664, 16000);
317 ts.add_task(549, 17000);
318 ts.add_task(3799, 50000);
319 ts.add_task(3707, 52000);
320 ts.add_task(896, 27000);
321 ts.add_task(1897, 74000);
322 ts.add_task(1528, 25000);
323 ts.add_task(4931, 55000);
324 ts.add_task(1882, 95000);
325 ts.add_task(3642, 96000);
326 ts.add_task(2586, 57000);
327 ts.add_task(2432, 31000);
328 ts.add_task(1036, 24000);
329 ts.add_task(4127, 45000);
330 ts.add_task(7284, 84000);
331 ts.add_task(2020, 57000);
332 ts.add_task(901, 10000);
333 ts.add_task(2017, 21000);
334 ts.add_task(4991, 52000);
335 ts.add_task(3064, 63000);
336 ts.add_task(1369, 23000);
337 ts.add_task(5174, 67000);
338 ts.add_task(1023, 26000);
339 ts.add_task(629, 54000);
340 ts.add_task(1164, 22000);
341 ts.add_task(3074, 38000);
342 ts.add_task(2285, 72000);
343 ts.add_task(2190, 53000);
344 ts.add_task(681, 33000);
345 ts.add_task(3818, 66000);
346 ts.add_task(1926, 41000);
347 ts.add_task(5677, 73000);
348 ts.add_task(1132, 16000);
349 ts.add_task(930, 27000);
350 ts.add_task(2323, 63000);
351 ts.add_task(635, 13000);
352 ts.add_task(1328, 57000);
353 ts.add_task(2107, 28000);
354 ts.add_task(1174, 39000);
355 ts.add_task(190, 70000);
356 ts.add_task(1437, 15000);
357 ts.add_task(6367, 82000);
358 ts.add_task(323, 80000);
359 ts.add_task(1230, 13000);
360 ts.add_task(1603, 88000);
361 ts.add_task(367, 24000);
362 ts.add_task(3227, 48000);
363 ts.add_task(7160, 73000);
364 ts.add_task(136, 12000);
365 ts.add_task(2582, 77000);
366 ts.add_task(145, 45000);
367 ts.add_task(6384, 79000);
368 ts.add_task(1013, 63000);
369 ts.add_task(7001, 88000);
370 ts.add_task(1525, 27000);
371 ts.add_task(3928, 78000);
372 ts.add_task(734, 62000);
373 ts.add_task(953, 43000);
374 ts.add_task(3062, 77000);
375 ts.add_task(740, 15000);
376 ts.add_task(3978, 53000);
377 ts.add_task(1113, 55000);
378 ts.add_task(2475, 94000);
379 ts.add_task(3168, 34000);
380 ts.add_task(236, 40000);
381 ts.add_task(148, 39000);
382 ts.add_task(2814, 53000);
383 ts.add_task(5107, 64000);
384 ts.add_task(5425, 78000);
385 ts.add_task(320, 14000);
386 ts.add_task(6885, 99000);
387 ts.add_task(4699, 61000);
388 ts.add_task(5917, 77000);
389 ts.add_task(7350, 80000);
390 ts.add_task(2231, 29000);
391 ts.add_task(4231, 79000);
392 ts.add_task(4007, 86000);
393 ts.add_task(198, 53000);
394 ts.add_task(7140, 72000);
395 ts.add_task(217, 43000);
396 ts.add_task(309, 41000);
397 ts.add_task(212, 18000);
398 ts.add_task(1167, 24000);
399 ts.add_task(5243, 58000);
400 ts.add_task(1623, 63000);
401 ts.add_task(242, 28000);
402 ts.add_task(293, 74000);
403 ts.add_task(6670, 96000);
404 ts.add_task(2009, 41000);
405 ts.add_task(887, 24000);
406 ts.add_task(615, 16000);
407 ts.add_task(1493, 51000);
408 ts.add_task(5020, 53000);
409 ts.add_task(6192, 81000);
410 ts.add_task(4928, 63000);
411 ts.add_task(3958, 60000);
412 ts.add_task(3479, 56000);
413 ts.add_task(1470, 75000);
414 ts.add_task(1020, 17000);
415 ts.add_task(4903, 56000);
416 ts.add_task(7938, 86000);
417 ts.add_task(871, 11000);
418 ts.add_task(7242, 95000);
419 ts.add_task(845, 40000);
420 ts.add_task(2646, 33000);
421 ts.add_task(4409, 51000);
422 ts.add_task(736, 32000);
423 ts.add_task(691, 14000);
424 ts.add_task(328, 100000);
425 ts.add_task(8384, 91000);
426 ts.add_task(536, 50000);
427 ts.add_task(180, 40000);
428 ts.add_task(6117, 89000);
429 ts.add_task(913, 37000);
430 ts.add_task(4403, 70000);
431 ts.add_task(6350, 78000);
432 ts.add_task(419, 60000);
433 ts.add_task(3469, 91000);
434 ts.add_task(296, 23000);
435 ts.add_task(2256, 24000);
436 ts.add_task(1588, 90000);
437 ts.add_task(2659, 100000);
438 ts.add_task(1759, 18000);
439 ts.add_task(4062, 93000);
440 ts.add_task(1216, 14000);
441 ts.add_task(162, 32000);
442 ts.add_task(1643, 68000);
443 ts.add_task(2409, 46000);
444 ts.add_task(1522, 28000);
445 ts.add_task(840, 30000);
446 ts.add_task(2491, 41000);
447 ts.add_task(2712, 96000);
448 ts.add_task(3297, 100000);
449 ts.add_task(6269, 96000);
450 ts.add_task(2319, 93000);
451 ts.add_task(973, 55000);
452 ts.add_task(3753, 68000);
453 ts.add_task(1449, 36000);
454 ts.add_task(1293, 17000);
455 ts.add_task(1991, 37000);
456 ts.add_task(958, 13000);
457 ts.add_task(3343, 61000);
458 ts.add_task(493, 82000);
459 ts.add_task(1555, 51000);
460 ts.add_task(3194, 92000);
461 ts.add_task(1594, 18000);
462 ts.add_task(650, 33000);
463 ts.add_task(5761, 63000);
464 ts.add_task(3998, 98000);
465 ts.add_task(5874, 100000);
466 ts.add_task(2371, 47000);
467 ts.add_task(1771, 74000);
468 ts.add_task(983, 22000);
469 ts.add_task(2026, 73000);
470 ts.add_task(3573, 54000);
471 ts.add_task(939, 18000);
472 ts.add_task(3585, 60000);
473 ts.add_task(2480, 43000);
474 ts.add_task(3534, 54000);
475 ts.add_task(7482, 80000);
476 ts.add_task(57, 17000);
477 ts.add_task(1342, 86000);
478 ts.add_task(2339, 33000);
479 ts.add_task(675, 61000);
480
481 return rts;
482}
483
484void test_baruah()
485{
486 TaskSet *ts = init_baruah();
487
488 cout << "Baruah schedulable? : " << BaruahGedf(24).is_schedulable(*ts)
489 << endl;
490}
491
492int bar_main(int argc, char** argv)
493{
494 test_baruah();
495 return 0;
496}
497
498int xxxmain(int argc, char** argv)
499{
500 cout << "GMP C++ test." << endl;
501
502 mpz_class a, b;
503
504 a = "123123123123";
505 b = "456456456456";
506
507 cout << "a : " << a << endl;
508 cout << "b : " << b << endl;
509 cout << "a*b*10: " << a * b * 10 << endl;
510
511 mpq_class q = a;
512
513 q /= b;
514 cout << "a/b :" << q << endl;
515
516 mpz_class fact;
517 fact = 1;
518 for (int n = 2; n < 101; n++) {
519 fact *= n;
520 }
521 cout << "Factorial is " << fact << endl;
522 cout << "casted: " << fact.get_ui() << endl;
523
524 Task t = Task(10, 100);
525
526 Job j = Job(t, 123, 12);
527
528 cout << "wcet: " << t.get_wcet() << " period: " << t.get_period()
529 << " deadline: " << t.get_deadline() << endl;
530
531
532 mpq_class lambda, bound;
533 unsigned int m = 10;
534
535 lambda = 3;
536 lambda /= 10;
537 bound = m * (1 - lambda) + lambda;
538
539 cout << "lambda: " << lambda << " bound: " << bound << endl;
540
541 test_baker();
542
543 return 0;
544}
545
546
547template <typename JobPriority>
548class DebugGlobalScheduler : public GlobalScheduler<JobPriority>
549{
550 public:
551 DebugGlobalScheduler(int m) : GlobalScheduler<JobPriority>(m) {};
552
553 void at_time()
554 {
555 cout << "at time " << this->get_current_time() << ": ";
556 }
557
558 virtual void job_released(Job *job)
559 {
560 at_time();
561 cout << "released job " << job->get_seqno() << " of " << job->get_task()
562 << endl;
563 };
564
565 virtual void job_completed(int proc,
566 Job *job)
567 {
568 at_time();
569 cout << "completed job " << job->get_seqno() << " of " << job->get_task();
570 if (job->get_deadline() < this->get_current_time())
571 cout << " TARDINESS: " << this->get_current_time() - job->get_deadline();
572 cout << endl;
573 };
574
575 virtual void job_scheduled(int proc,
576 Job *preempted,
577 Job *scheduled)
578 {
579 at_time();
580 cout << "scheduled job " << scheduled->get_seqno() << " of "
581 << scheduled->get_task() << " on CPU " << proc;
582 if (preempted)
583 cout << "; preempted job " << preempted->get_seqno() << " of "
584 << preempted->get_task();
585 else
586 cout << " [CPU was idle] ";
587 cout << endl;
588 };
589};
590
591#define NUM_TASKS 3
592
593int xmain(int argc, char** argv)
594{
595 DebugGlobalScheduler<EarliestDeadlineFirst> theSim(2);
596
597 TaskSet ts = TaskSet();
598
599 /* ts[0].init(10, 100);
600 ts[1].init(3, 9);
601 ts[2].init(11, 33);
602 ts[3].init(11, 17);
603 ts[4].init(2, 5);
604 */
605
606 ts.add_task(20, 30);
607 ts.add_task(20, 30);
608 ts.add_task(20, 30);
609
610 PeriodicJobSequence* gen[NUM_TASKS];
611 for (int i = 0; i < NUM_TASKS; i++) {
612 gen[i] = new PeriodicJobSequence(ts[i]);
613 gen[i]->set_simulation(&theSim);
614 theSim.add_release(gen[i]);
615 }
616
617 theSim.simulate_until(1000);
618
619 return 0;
620}
621
622
623int xxxxmain(int argc, char** argv)
624{
625 GlobalScheduler<EarliestDeadlineFirst> theSim(24);
626
627 TaskSet* ts = init_baruah();
628
629 PeriodicJobSequence** gen;
630 gen = new PeriodicJobSequence*[ts->get_task_count()];
631
632 for (unsigned int i = 0; i < ts->get_task_count(); i++) {
633 gen[i] = new PeriodicJobSequence((*ts)[i]);
634 gen[i]->set_simulation(&theSim);
635 theSim.add_release(gen[i]);
636 }
637
638 theSim.simulate_until(1000 * 1000 * 1000); // 1000 seconds
639
640 return 0;
641}
642
643
644int yymain(int argc, char** argv)
645{
646 TaskSet* ts = init_baruah();
647 simtime_t end = 10 * 60 * 1000 * 1000; // 10 minutes
648 for (int m = 1; m < 30; m++)
649 cout << "\nOn " << m << " CPUs "
650 << "deadline missed: " << edf_misses_deadline(m, *ts, end) << endl;
651 return 0;
652}
653
654
655int main4(int argc, char** argv)
656{
657 mpq_class a, b;
658 mpz_class c;
659
660 a = 20;
661 a /= 3;
662 cout << a << endl;
663 // b = a % 3;
664 b = a / 3;
665 cout << b << endl;
666
667 c = b; // truncate
668
669 cout << c << endl;
670
671 mpq_truncate(b);
672 cout << b << endl;
673
674 return 0;
675}
676
677
678int main5(int argc, char** argv)
679{
680 unsigned long a, b;
681
682 a = 133;
683 b = 10;
684
685 cout << a << " // " << b << " = " << divide_with_ceil(a, b) << endl;
686
687 a = 130;
688
689 cout << a << " // " << b << " = " << divide_with_ceil(a, b) << endl;
690
691 a = 129;
692
693 cout << a << " // " << b << " = " << divide_with_ceil(a, b) << endl;
694
695 return 0;
696}
697
698/*
699int main6(int argc, char** argv)
700{
701 RequestSourceSet rset(10);
702 unsigned int i;
703
704 for (i = 0; i < rset.get_source_count(); i++)
705 rset[i].init(1, 10 + i, 100 * (i + 1), 0, i);
706
707 rset.sort();
708
709 for (i = 0; i < rset.get_source_count(); i++)
710 cout << "pos " << i << " " << rset[i]
711 << " -> " << rset[i].get_max_num_requests(700) << endl;
712
713
714 cout << "blocking: " << rset.bound_blocking(700, 0, 4, 0) << endl;
715
716 return 0;
717}
718
719
720
721*/
722
723
724
725int main6(int argc, char** argv)
726{
727 TaskInfo ti(100, 100, 0, 0);
728
729 ti.add_request(123, 3, 3);
730 ti.add_request(103, 1, 2);
731
732 cout << "task: " << ti << endl;
733
734 return 0;
735}
736
737
738int main7(int argc, char** argv)
739{
740 ResourceSharingInfo rsi(3);
741 unsigned int i;
742
743 rsi.add_task(50, 50);
744 rsi.add_request(0, 2, 1);
745
746 rsi.add_task(30, 30);
747 rsi.add_request(0, 1, 3);
748
749 rsi.add_task(20, 20);
750 rsi.add_request(0, 1, 1);
751
752 cout << rsi << endl;
753
754 // cout << "Global OMLP: " << rset.global_omlp_bound(20, 1, 16, 2) << endl;
755
756 BlockingBounds* results;
757
758 results = global_omlp_bounds(rsi, 16);
759
760 for (i = 0; i < 3; i++)
761 cout << i << ": count=" << (*results)[i].count
762 << " total=" << (*results)[i].total_length << endl;
763
764 return 0;
765}
766
767
768int main(int argc, char** argv)
769{
770 ResourceSharingInfo rsi(100);
771 unsigned int i;
772
773 rsi.add_task(50000, 50000, 0, 2);
774 rsi.add_request(0, 2, 1);
775
776 rsi.add_task(30000, 30000, 0, 1);
777 rsi.add_request(0, 4, 3);
778
779 rsi.add_task(20000, 20000, 0, 0);
780 rsi.add_request(0, 4, 1);
781
782
783 rsi.add_task(50000, 50000, 1, 3);
784 rsi.add_request(0, 2, 1);
785
786 rsi.add_task(30000, 30000, 1, 2);
787 rsi.add_request(0, 3, 3);
788 rsi.add_request(1, 100, 100);
789
790 rsi.add_task(20000, 20000, 1, 1);
791 rsi.add_request(0, 3, 1);
792
793 rsi.add_task(50000, 50000, 2, 2);
794 rsi.add_request(0, 2, 1);
795
796 rsi.add_task(30000, 30000, 2, 1);
797 rsi.add_request(0, 5, 3);
798
799 rsi.add_task(20000, 20000, 2, 0);
800 rsi.add_request(0, 2, 1);
801
802 for (i = 0; i < 30; i++)
803 {
804 rsi.add_task(100000 + i, 100000 + i, 0, 3 + i);
805 rsi.add_request(0, 1, 1);
806 rsi.add_request(3, 1, 1);
807 }
808
809 rsi.add_task(3000, 3000, 3, 0);
810 rsi.add_request(1, 1, 1);
811
812 rsi.add_task(5000, 5000, 1, 0);
813
814 rsi.add_task(100000, 100000, 4, 100);
815 rsi.add_request(3, 3, 3);
816
817 cout << rsi << endl;
818
819 BlockingBounds* results;
820
821 results = global_omlp_bounds(rsi, 6);
822
823 cout << endl << endl << "Global OMLP" << endl;
824 for (i = 0; i < results->size(); i++)
825 cout << i << ": count=" << (*results)[i].count
826 << " total=" << (*results)[i].total_length << endl;
827
828 delete results;
829
830
831 results = global_fmlp_bounds(rsi);
832
833 cout << endl << endl << "Global FMLP" << endl;
834 for (i = 0; i < results->size(); i++)
835 cout << i << ": count=" << (*results)[i].count
836 << " total=" << (*results)[i].total_length << endl;
837
838 delete results;
839
840 results = part_omlp_bounds(rsi);
841
842 cout << endl << endl << "Partitioned OMLP" << endl;
843 for (i = 0; i < results->size(); i++)
844 cout << i
845 << ": count=" << (*results)[i].count
846 << " total=" << (*results)[i].total_length
847 << " --- request span: count=" << results->get_span_count(i)
848 << " total=" << results->get_span_term(i)
849 << endl;
850
851 delete results;
852
853 results = clustered_omlp_bounds(rsi, 1);
854
855 cout << endl << endl << "Clustered OMLP c=1" << endl;
856 for (i = 0; i < results->size(); i++)
857 cout << i
858 << ": count=" << (*results)[i].count
859 << " total=" << (*results)[i].total_length
860 << " --- request span: count=" << results->get_span_count(i)
861 << " total=" << results->get_span_term(i)
862 << endl;
863
864 delete results;
865
866 results = clustered_omlp_bounds(rsi, 3);
867
868 cout << endl << endl << "Clustered OMLP c=3" << endl;
869 for (i = 0; i < results->size(); i++)
870 cout << i
871 << ": count=" << (*results)[i].count
872 << " total=" << (*results)[i].total_length
873 << " --- request span: count=" << results->get_span_count(i)
874 << " total=" << results->get_span_term(i)
875 << endl;
876
877 delete results;
878
879 results = part_fmlp_bounds(rsi);
880
881 cout << endl << endl << "Part FMLP" << endl;
882 for (i = 0; i < results->size(); i++)
883 cout << i
884 << ": count=" << (*results)[i].count
885 << " total=" << (*results)[i].total_length
886 << endl;
887
888 delete results;
889
890 results = mpcp_bounds(rsi, false);
891
892 cout << endl << endl << "MPCP::susp" << endl;
893 for (i = 0; i < results->size(); i++)
894 cout << i
895 << ": total=" << (*results)[i].total_length
896 << " remote=" << results->get_span_term(i)
897 << endl;
898
899 delete results;
900
901 results = mpcp_bounds(rsi, true);
902
903 cout << endl << endl << "MPCP::spin" << endl;
904 for (i = 0; i < results->size(); i++)
905 cout << i
906 << ": total=" << (*results)[i].total_length
907 << " remote=" << results->get_span_term(i)
908 << endl;
909
910 delete results;
911 return 0;
912}
diff --git a/schedcat/__init__.py b/schedcat/__init__.py
new file mode 100644
index 0000000..a775c57
--- /dev/null
+++ b/schedcat/__init__.py
@@ -0,0 +1,3 @@
1"""
2SchedCAT: Schedulability test Collection And Tools
3"""
diff --git a/schedcat/generator/__init__.py b/schedcat/generator/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/schedcat/generator/__init__.py
diff --git a/schedcat/generator/tasks.py b/schedcat/generator/tasks.py
new file mode 100644
index 0000000..b12ef9f
--- /dev/null
+++ b/schedcat/generator/tasks.py
@@ -0,0 +1,119 @@
1from __future__ import division
2
3from math import trunc
4
5import random
6
7import schedcat.model.tasks as ts
8
9def uniform_int(minval, maxval):
10 "Create a function that draws ints uniformly from {minval, ..., maxval}"
11 def _draw():
12 return random.randint(minval, maxval)
13 return _draw
14
15def uniform(minval, maxval):
16 "Create a function that draws floats uniformly from [minval, maxval]"
17 def _draw():
18 return random.uniform(minval, maxval)
19 return _draw
20
21def uniform_choice(choices):
22 "Create a function that draws uniformly elements from choices"
23 selector = uniform_int(0, len(choices) - 1)
24 def _draw():
25 return choices[selector()]
26 return _draw
27
28def truncate(minval, maxval):
29 def _limit(fun):
30 def _f(*args, **kargs):
31 val = fun(*args, **kargs)
32 return min(maxval, max(minval, val))
33 return _f
34 return _limit
35
36def redraw(minval, maxval):
37 def _redraw(dist):
38 def _f(*args, **kargs):
39 in_range = False
40 while not in_range:
41 val = dist(*args, **kargs)
42 in_range = minval <= val <= maxval
43 return val
44 return _f
45 return _redraw
46
47def exponential(minval, maxval, mean, limiter=redraw):
48 """Create a function that draws floats from an exponential
49 distribution with expected value 'mean'. If a drawn value is less
50 than minval or greater than maxval, then either another value is
51 drawn (if limiter=redraw) or the drawn value is set to minval or
52 maxval (if limiter=truncate)."""
53 def _draw():
54 return random.expovariate(1.0 / mean)
55 return limiter(minval, maxval)(_draw)
56
57def multimodal(weighted_distributions):
58 """Create a function that draws values from several distributions
59 with probability according to the given weights in a list of
60 (distribution, weight) pairs."""
61 total_weight = sum([w for (d, w) in weighted_distributions])
62 selector = uniform(0, total_weight)
63 def _draw():
64 x = selector()
65 wsum = 0
66 for (d, w) in weighted_distributions:
67 wsum += w
68 if wsum >= x:
69 return d()
70 assert False # should never drop off
71 return _draw
72
73
74
75class TaskGenerator(object):
76 """Sporadic task generator"""
77
78 def __init__(self, period, util, deadline=lambda x, y: y):
79 """Creates TaskGenerator based on a given a period and
80 utilization distributions."""
81 self.period = period
82 self.util = util
83 self.deadline = deadline
84
85 def tasks(self, max_tasks=None, max_util=None, squeeze=False,
86 time_conversion=trunc):
87 """Generate a sequence of tasks until either max_tasks is reached
88 or max_util is reached. If max_util would be exceeded and squeeze is
89 true, then the last-generated task's utilization is scaled to exactly
90 match max_util. Otherwise, the last-generated task is discarded.
91 time_conversion is used to convert the generated (non-integral) values
92 into integral task parameters.
93 """
94 count = 0
95 usum = 0
96 while ((max_tasks is None or count < max_tasks) and
97 (max_util is None or usum < max_util)):
98 period = self.period()
99 util = self.util()
100 cost = period * util
101 deadline = self.deadline(cost, period)
102 # scale as required
103 period = max(1, int(time_conversion(period)))
104 cost = max(1, int(time_conversion(cost)))
105 deadline = max(1, int(time_conversion(deadline)))
106 util = cost / period
107 count += 1
108 usum += util
109 if max_util and usum > max_util:
110 if squeeze:
111 # make last task fit exactly
112 util -= (usum - max_util)
113 cost = trunc(period * util)
114 else:
115 break
116 yield ts.SporadicTask(cost, period, deadline)
117
118 def make_task_set(self, *extra, **kextra):
119 return ts.TaskSystem(self.tasks(*extra, **kextra))
diff --git a/schedcat/generator/tasksets.py b/schedcat/generator/tasksets.py
new file mode 100644
index 0000000..87b502b
--- /dev/null
+++ b/schedcat/generator/tasksets.py
@@ -0,0 +1,91 @@
1"""
2Generate random task sets for schedulability experiments.
3"""
4
5
6import re
7import random
8from functools import partial
9
10import schedcat.generator.tasks as gen
11
12def decode_params(name):
13 # uni-UMIN-UMAX-PMIN-PMAX
14 # bimo-
15 # exp-UMIN-UMAX-MEAN-PMIN-PMAX
16
17 pass
18
19NAMED_PERIODS = {
20# Named period distributions used in several UNC papers, in milliseconds.
21 'uni-short' : gen.uniform_int( 3, 33),
22 'uni-moderate' : gen.uniform_int(10, 100),
23 'uni-long' : gen.uniform_int(50, 250),
24 }
25
26NAMED_UTILIZATIONS = {
27# Named utilization distributions used in several UNC papers, in milliseconds.
28 'uni-light' : gen.uniform(0.001, 0.1),
29 'uni-medium' : gen.uniform(0.1 , 0.4),
30 'uni-heavy' : gen.uniform(0.5 , 0.9),
31
32 'exp-light' : gen.exponential(0, 1, 0.10),
33 'exp-medium' : gen.exponential(0, 1, 0.25),
34 'exp-heavy' : gen.exponential(0, 1, 0.50),
35
36 'bimo-light' : gen.multimodal([(gen.uniform(0.001, 0.5), 8),
37 (gen.uniform(0.5 , 0.9), 1)]),
38 'bimo-medium' : gen.multimodal([(gen.uniform(0.001, 0.5), 6),
39 (gen.uniform(0.5 , 0.9), 3)]),
40 'bimo-heavy' : gen.multimodal([(gen.uniform(0.001, 0.5), 4),
41 (gen.uniform(0.5 , 0.9), 5)]),
42}
43
44def uniform_slack(min_slack_ratio, max_slack_ratio):
45 """Choose deadlines uniformly such that the slack
46 is within [cost + min_slack_ratio * (period - cost),
47 cost + max_slack_ratio * (period - cost)].
48
49 Setting max_slack_ratio = 1 implies constrained deadlines.
50 """
51 def choose_deadline(cost, period):
52 slack = period - cost
53 earliest = slack * min_slack_ratio
54 latest = slack * max_slack_ratio
55 return cost + random.uniform(earliest, latest)
56 return choose_deadline
57
58NAMED_DEADLINES = {
59 'implicit' : None,
60 'uni-constrained' : uniform_slack(0, 1),
61 'uni-arbitrary' : uniform_slack(0, 2),
62}
63
64def mkgen(utils, periods, deadlines=None):
65 if deadlines is None:
66 g = gen.TaskGenerator(periods, utils)
67 else:
68 g = gen.TaskGenerator(periods, utils, deadlines)
69 return partial(g.make_task_set)
70
71def make_standard_dists(dl='implicit'):
72 by_period = {}
73 for p in NAMED_PERIODS:
74 by_util = {}
75 by_period[p] = by_util
76 for u in NAMED_UTILIZATIONS:
77 by_util[u] = mkgen(NAMED_UTILIZATIONS[u],
78 NAMED_PERIODS[p],
79 NAMED_DEADLINES[dl])
80 return by_period
81
82# keyed by deadline type, then by period, then by utilization
83DIST_BY_KEY = {}
84for dl in NAMED_DEADLINES:
85 DIST_BY_KEY[dl] = make_standard_dists(dl)
86
87ALL_DISTS = {}
88for dl in NAMED_DEADLINES:
89 for p in NAMED_PERIODS:
90 for u in NAMED_UTILIZATIONS:
91 ALL_DISTS[':'.join([u, p, dl])] = DIST_BY_KEY[dl][p][u]
diff --git a/schedcat/locking/__init__.py b/schedcat/locking/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/schedcat/locking/__init__.py
diff --git a/schedcat/locking/bounds.py b/schedcat/locking/bounds.py
new file mode 100644
index 0000000..3d16cc1
--- /dev/null
+++ b/schedcat/locking/bounds.py
@@ -0,0 +1,158 @@
1import schedcat.locking.native as cpp
2
3# assumes mutex constraints
4def get_cpp_model(all_tasks, use_task_period=False):
5 rsi = cpp.ResourceSharingInfo(len(all_tasks))
6 for t in all_tasks:
7 rsi.add_task(t.period,
8 t.period if use_task_period else t.response_time,
9 t.partition,
10 t.locking_prio)
11 for req in t.resmodel:
12 req = t.resmodel[req]
13 rsi.add_request_rw(req.res_id, req.max_requests, req.max_length, cpp.WRITE)
14 return rsi
15
16def get_cpp_model_rw(all_tasks, use_task_period=False):
17 rsi = cpp.ResourceSharingInfo(len(all_tasks))
18 for t in all_tasks:
19 rsi.add_task(t.period,
20 t.period if use_task_period else t.response_time,
21 t.partition,
22 t.locking_prio)
23 for req in t.resmodel:
24 req = t.resmodel[req]
25 if req.max_writes > 0:
26 rsi.add_request_rw(req.res_id, req.max_writes, req.max_write_length, cpp.WRITE)
27 if req.max_reads > 0:
28 rsi.add_request_rw(req.res_id, req.max_reads, req.max_read_length, cpp.READ)
29 return rsi
30
31def assign_edf_locking_prios(all_tasks):
32 all_deadlines = set([t.deadline for t in all_tasks])
33 prio = {}
34 for i, dl in enumerate(sorted(all_deadlines)):
35 prio[int(dl)] = i
36
37 for t in all_tasks:
38 t.locking_prio = prio[int(t.deadline)]
39
40def assign_fp_locking_prios(all_tasks):
41 # prioritized in index order
42 for i, t in enumerate(all_tasks):
43 t.locking_prio = i
44
45# S-aware bounds
46
47def apply_mpcp_bounds(all_tasks, use_virtual_spin=False):
48 model = get_cpp_model(all_tasks)
49 res = cpp.mpcp_bounds(model, use_virtual_spin)
50
51 if use_virtual_spin:
52 for i,t in enumerate(all_tasks):
53 # no suspension time
54 t.suspended = 0
55 # all blocking, including arrival blocking
56 t.blocked = res.get_blocking_term(i)
57 # remote blocking increases CPU demand (s-oblivious)
58 t.cost += res.get_remote_blocking(i)
59 else:
60 for i,t in enumerate(all_tasks):
61 # remote blocking <=> suspension time
62 t.suspended = res.get_remote_blocking(i)
63 # all blocking, including arrival blocking
64 t.blocked = res.get_blocking_term(i)
65
66def get_round_robin_resource_mapping(num_resources, num_cpus,
67 dedicated_irq=cpp.NO_CPU):
68 "Default resource assignment: just assign resources to CPUs in index order."
69 loc = cpp.ResourceLocality()
70 for res_id in xrange(num_resources):
71 cpu = res_id % num_cpus
72 if cpu == dedicated_irq:
73 cpu = (cpu + 1) % num_cpus
74 loc.assign_resource(res_id, cpu)
75 return loc
76
77# default resource assignment: round robin
78def apply_dpcp_bounds(all_tasks, resource_mapping):
79 # The DPCP bounds are expressed in terms of task periods,
80 # not response time.
81 model = get_cpp_model(all_tasks)
82 res = cpp.dpcp_bounds(model, resource_mapping)
83
84 for i,t in enumerate(all_tasks):
85 # remote blocking <=> suspension time
86 t.suspended = res.get_remote_blocking(i)
87 # all blocking, including arrival blocking
88 t.blocked = res.get_blocking_term(i)
89
90def apply_part_fmlp_bounds(all_tasks, preemptive=True):
91 model = get_cpp_model(all_tasks)
92 res = cpp.part_fmlp_bounds(model, preemptive)
93
94 for i,t in enumerate(all_tasks):
95 # remote blocking <=> suspension time
96 t.suspended = res.get_remote_blocking(i)
97 # all blocking, including local blocking
98 t.blocked = res.get_blocking_term(i)
99 t.local_blocking_count = res.get_local_count(i)
100
101# S-oblivious bounds
102
103def apply_suspension_oblivious(all_tasks, res):
104 for i,t in enumerate(all_tasks):
105 # s-oblivious <=> no suspension
106 t.suspended = 0
107 # might be zero
108 t.arrival_blocked = res.get_arrival_blocking(i)
109 # all blocking, including arrival blocking
110 t.blocked = res.get_blocking_term(i)
111 # s-oblivious: charge it as execution cost
112 t.cost += t.blocked
113
114def apply_global_fmlp_sob_bounds(all_tasks):
115 model = get_cpp_model(all_tasks)
116 res = cpp.global_fmlp_bounds(model)
117 apply_suspension_oblivious(all_tasks, res)
118
119def apply_global_omlp_bounds(all_tasks, num_cpus):
120 model = get_cpp_model(all_tasks)
121 res = cpp.global_omlp_bounds(model, num_cpus)
122 apply_suspension_oblivious(all_tasks, res)
123
124def apply_clustered_omlp_bounds(all_tasks, procs_per_cluster,
125 dedicated_irq=cpp.NO_CPU):
126 model = get_cpp_model(all_tasks)
127 res = cpp.clustered_omlp_bounds(model, procs_per_cluster, dedicated_irq)
128 apply_suspension_oblivious(all_tasks, res)
129
130def apply_clustered_rw_omlp_bounds(all_tasks, procs_per_cluster,
131 dedicated_irq=cpp.NO_CPU):
132 model = get_cpp_model_rw(all_tasks)
133 res = cpp.clustered_rw_omlp_bounds(model, procs_per_cluster, dedicated_irq)
134 apply_suspension_oblivious(all_tasks, res)
135
136# spinlocks are charged similarly to s-oblivious analysis
137
138def apply_task_fair_mutex_bounds(all_tasks, procs_per_cluster,
139 dedicated_irq=cpp.NO_CPU):
140 model = get_cpp_model(all_tasks)
141 res = cpp.task_fair_mutex_bounds(model, procs_per_cluster, dedicated_irq)
142 apply_suspension_oblivious(all_tasks, res)
143
144def apply_task_fair_rw_bounds(all_tasks, procs_per_cluster,
145 dedicated_irq=cpp.NO_CPU):
146 model = get_cpp_model_rw(all_tasks)
147 # mutex equivalent model
148 model_mtx = get_cpp_model(all_tasks)
149 res = cpp.task_fair_rw_bounds(model, model_mtx, procs_per_cluster, dedicated_irq)
150 apply_suspension_oblivious(all_tasks, res)
151
152def apply_phase_fair_rw_bounds(all_tasks, procs_per_cluster,
153 dedicated_irq=cpp.NO_CPU):
154 model = get_cpp_model_rw(all_tasks)
155 res = cpp.phase_fair_rw_bounds(model, procs_per_cluster, dedicated_irq)
156 apply_suspension_oblivious(all_tasks, res)
157
158
diff --git a/schedcat/mapping/__init__.py b/schedcat/mapping/__init__.py
new file mode 100644
index 0000000..a775c57
--- /dev/null
+++ b/schedcat/mapping/__init__.py
@@ -0,0 +1,3 @@
1"""
2SchedCAT: Schedulability test Collection And Tools
3"""
diff --git a/schedcat/mapping/binpack.py b/schedcat/mapping/binpack.py
new file mode 100644
index 0000000..00a2bd9
--- /dev/null
+++ b/schedcat/mapping/binpack.py
@@ -0,0 +1,144 @@
1#!/usr/bin/env python
2#
3# Copyright (c) 2007,2008,2009, Bjoern B. Brandenburg <bbb [at] cs.unc.edu>
4#
5# All rights reserved.
6#
7# Redistribution and use in source and binary forms, with or without
8# modification, are permitted provided that the following conditions are met:
9# * Redistributions of source code must retain the above copyright
10# notice, this list of conditions and the following disclaimer.
11# * Redistributions in binary form must reproduce the above copyright
12# notice, this list of conditions and the following disclaimer in the
13# documentation and/or other materials provided with the distribution.
14# * Neither the name of the copyright holder nor the
15# names of its contributors may be used to endorse or promote products
16# derived from this software without specific prior written permission.
17#
18# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28# POSSIBILITY OF SUCH DAMAGE.
29
30"""
31Simple bin-packing heuristics in Python.
32
33Based on:
34
35 OPTIMIZATION THEORY
36 by Hubertus Th. Jongen, Klaus Meer, Eberhard Triesch
37 ISBN 1-4020-8099-9
38 KLUWER ACADEMIC PUBLISHERS
39 http://www.springerlink.com/content/h053786u87674865/
40
41and:
42
43 http://york.cuny.edu/~malk/tidbits/tidbit-bin-packing.html
44
45"""
46
47id = lambda x: x
48
49class DidNotFit(Exception):
50 def __init__(self, item):
51 self.item = item
52
53 def __str__(self):
54 return '%s could not be packed' % str(self.item)
55
56def ignore(_):
57 pass
58
59def report_failure(x):
60 raise DidNotFit(x)
61
62def value(sets, weight=id):
63 return sum([sum([weight(x) for x in s]) for s in sets])
64
65def next_fit(items, bins, capacity=1.0, weight=id, misfit=ignore,
66 empty_bin=list):
67 sets = [empty_bin() for _ in xrange(0, bins)]
68 cur = 0
69 sum = 0.0
70 for x in items:
71 c = weight(x)
72 while sum + c > capacity:
73 sum = 0.0
74 cur += 1
75 if cur == bins:
76 misfit(x)
77 return sets
78 sets[cur] += [x]
79 sum += c
80 return sets
81
82def first_fit(items, bins, capacity=1.0, weight=id, misfit=ignore,
83 empty_bin=list):
84 sets = [empty_bin() for _ in xrange(0, bins)]
85 sums = [0.0 for _ in xrange(0, bins)]
86 for x in items:
87 c = weight(x)
88 for i in xrange(0, bins):
89 if sums[i] + c <= capacity:
90 sets[i] += [x]
91 sums[i] += c
92 break
93 else:
94 misfit(x)
95
96 return sets
97
98def worst_fit(items, bins, capacity=1.0, weight=id, misfit=ignore,
99 empty_bin=list):
100 sets = [empty_bin() for _ in xrange(0, bins)]
101 sums = [0.0 for _ in xrange(0, bins)]
102 for x in items:
103 c = weight(x)
104 # pick the bin where the item will leave the most space
105 # after placing it, aka the bin with the least sum
106 i = sums.index(min(sums))
107 if sums[i] + c <= capacity:
108 sets[i] += [x]
109 sums[i] += c
110 else:
111 misfit(x)
112 return sets
113
114def best_fit(items, bins, capacity=1.0, weight=id, misfit=ignore,
115 empty_bin=list):
116 sets = [empty_bin() for _ in xrange(0, bins)]
117 sums = [0.0 for _ in xrange(0, bins)]
118 for x in items:
119 c = weight(x)
120 # find the first bin that is sufficiently large
121 idxs = range(0, bins)
122 idxs.sort(key=lambda i: sums[i], reverse=True)
123 for i in idxs:
124 if sums[i] + c <= capacity:
125 sets[i] += [x]
126 sums[i] += c
127 break
128 else:
129 misfit(x)
130 return sets
131
132def decreasing(algorithm):
133 def alg_decreasing(items, bins, capacity=1.0, weight=id, *args, **kargs):
134 # don't clobber original items
135 items_sorted = list(items)
136 items_sorted.sort(key=weight, reverse=True)
137 return algorithm(items_sorted, bins, capacity, weight, *args, **kargs)
138 return alg_decreasing
139
140next_fit_decreasing = decreasing(next_fit)
141first_fit_decreasing = decreasing(first_fit)
142worst_fit_decreasing = decreasing(worst_fit)
143best_fit_decreasing = decreasing(best_fit)
144
diff --git a/schedcat/mapping/rollback.py b/schedcat/mapping/rollback.py
new file mode 100644
index 0000000..d1fc86f
--- /dev/null
+++ b/schedcat/mapping/rollback.py
@@ -0,0 +1,380 @@
1#!/usr/bin/env python
2#
3# Copyright (c) 2010,2011,2012 Bjoern B. Brandenburg <bbb [at] cs.unc.edu>
4#
5# All rights reserved.
6#
7# Redistribution and use in source and binary forms, with or without
8# modification, are permitted provided that the following conditions are met:
9# * Redistributions of source code must retain the above copyright
10# notice, this list of conditions and the following disclaimer.
11# * Redistributions in binary form must reproduce the above copyright
12# notice, this list of conditions and the following disclaimer in the
13# documentation and/or other materials provided with the distribution.
14# * Neither the name of the copyright holder nor the
15# names of its contributors may be used to endorse or promote products
16# derived from this software without specific prior written permission.
17#
18# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28# POSSIBILITY OF SUCH DAMAGE.
29
30"""
31Bin-packing-inspired assignment heuristics that support items that
32change in size (e.g., utilizations of overhead-affected tasks) and
33task-assignment with rollback (if adding a task causes a partition
34to become unschedulable, then put it elsewhere).
35
36This was originally implemented for (and discussed in):
37
38 A. Bastoni, B. Brandenburg, and J. Anderson, "Is Semi-Partitioned
39 Scheduling Practical?", Proceedings of the 23rd Euromicro Conference
40 on Real-Time Systems (ECRTS 2011), pp. 125-135. IEEE, July 2011.
41
42"""
43
44from .binpack import ignore
45
46class BasicBin(object):
47 def __init__(self, initial_items=None):
48 self.items = [] if initial_items is None else initial_items
49
50 def infeasible_to_fit(self, item):
51 """Returns True if item cannot possible fit, so it's not even worth
52 trying."""
53 return False
54
55 def prepare(self):
56 """prepare() is called to setup a 'transaction' that might have to be
57 rolled back"""
58 pass
59
60 def temporary_assign(self, new_item):
61 """try_assign(x) is called to temporarily add x to the bin"""
62 self.items.append(new_item)
63
64 def validate(self):
65 """validate() is called to test if the last assignment was ok"""
66 return True
67
68 def commit(self):
69 """Called if validate() returned True"""
70 pass
71
72 def rollback(self):
73 """Called if validate() returned False"""
74 # default: restore old items
75 self.items.pop()
76
77 def try_to_add(self, new_item):
78 self.prepare()
79 self.temporary_assign(new_item)
80 did_it_fit = self.validate()
81 if did_it_fit:
82 self.commit()
83 else:
84 self.rollback()
85 return did_it_fit
86
87 def try_to_add_no_commit(self, new_item):
88 """Doesn't commit if it works, but rolls back if it fails."""
89 self.prepare()
90 self.temporary_assign(new_item)
91 did_it_fit = self.validate()
92 if not did_it_fit:
93 self.rollback()
94 return did_it_fit
95
96 def add(self, new_item):
97 if not self.try_to_add(new_item):
98 raise DidNotFit(new_item) # nope, already full
99
100
101class Bin(BasicBin):
102 def __init__(self, initial_items=None, size=lambda x: x, capacity=1):
103 BasicBin.__init__(self, initial_items)
104 self.max_capacity = capacity
105 # default: let's just pretend that these are all numbers
106 self.size = size
107
108 def capacity(self):
109 return self.max_capacity
110
111 def validate(self):
112 return sum([self.size(x) for x in self.items]) <= self.capacity()
113
114 def allocated_capacity(self):
115 return sum([self.size(x) for x in self.items])
116
117 def spare_capacity(self):
118 return self.capacity() - self.allocated_capacity()
119
120 def infeasible_to_fit(self, item):
121 return self.size(item) > self.spare_capacity()
122
123
124class GlobalConstraintBin(Bin):
125 """A Bin that also checks a global constraint before accepting an item."""
126 def __init__(self, global_validate=lambda: True, *args, **kargs):
127 Bin.__init__(self, *args, **kargs)
128 self.global_validate = global_validate
129
130 def validate(self):
131 return super(GlobalConstraintBin, self).validate() and self.global_validate()
132
133
134class DuctileItem(object):
135 """A ductile item is an item that can change size based on other items
136 in the bin. All "virtual methods" must be overriden."""
137
138 def size(self):
139 """returns the current size of the item"""
140 assert False
141
142 def copy(self):
143 """returns a copy of this item; may return self if it never changes"""
144 assert False
145
146 def update_size(self, added_item, bin):
147 """change the size of the item based on an item that was added to the
148 bin"""
149 assert False
150
151 def determine_size(self, bin):
152 """determine the initial size of the item based on the bin in which it
153 will be placed"""
154 assert False
155
156
157class FixedSizeItem(object):
158 """A wrapper to add fixed-size items to ductile bins."""
159 def __init__(self, item, item_size):
160 self.item_size = item_size
161 self.item = item
162
163 def size(self):
164 return self.item_size
165
166 def copy(self):
167 return self
168
169 def update_size(self, added_item, bin):
170 pass # size does not change
171
172 def determine_size(self, bin):
173 pass # size does not change
174
175
176class DuctileBin(Bin):
177 """ This is a base class for implementing binpacking with ductile items.
178 """
179 def __init__(self, *args, **kargs):
180 Bin.__init__(self, *args, **kargs)
181 self.size = self.item_size
182
183 def item_size(self, item):
184 return item.size()
185
186 def prepare(self):
187 self.saved_items = self.items
188 self.items = [obj.copy() for obj in self.items]
189
190 def temporary_assign(self, new_item):
191 for obj in self.items:
192 obj.update_size(new_item, self)
193 new_item.determine_size(self)
194 self.items.append(new_item)
195
196 def rollback(self):
197 self.items = self.saved_items
198 self.saved_items = None
199
200
201class CheckedBin(Bin):
202 """Debug helper: can be wrapped around a bin to validate that the methods
203 are called in the right sequence"""
204 STABLE = "stable"
205 PREPPED = "prepped"
206 ASSIGNED = "assigned"
207 VALIDATED = "validated"
208
209 def __init__(self, bin):
210 self.__bin = bin
211 self.__state = CheckedBin.STABLE
212
213 def __getattr__(self, name):
214 # everything that we don't have goes to the proxy
215 return self.__bin.__dict__[name]
216
217 def prepare(self):
218 assert self.__state == CheckedBin.STABLE
219 self.__bin.prepare()
220 self.__state = CheckedBin.PREPPED
221
222 def temporary_assign(self, new_item):
223 assert self.__state == CheckedBin.PREPPED
224 self.__bin.temporary_assign(new_item)
225 self.__state = CheckedBin.ASSIGNED
226
227 def validate(self):
228 assert self.__state == CheckedBin.ASSIGNED
229 res = self.__bin.validate()
230 self.__state = CheckedBin.VALIDATED
231 return res
232
233 def commit(self):
234 assert self.__state == CheckedBin.VALIDATED
235 self.__bin.commit()
236 self.__state = CheckedBin.STABLE
237
238 def rollback(self):
239 assert self.__state == CheckedBin.VALIDATED
240 self.__bin.rollback()
241 self.__state = CheckedBin.STABLE
242
243
244class Heuristic(object):
245 """Base class for bin-packing heuristics."""
246
247 def __init__(self, initial_bins=None, make_bin=None):
248 self.bins = [] if initial_bins is None else initial_bins
249 self.make_bin = make_bin
250 self.misfits = []
251 self.remaining_items = []
252
253 def select_bins_for_item(self, item):
254 return [] # overide with iterator
255
256 def try_to_place_item(self, item):
257 for bin in self.select_bins_for_item(item):
258 if bin.try_to_add(item):
259 return True
260 return False
261
262 def binpack(self, items=[], report_misfit=ignore):
263 """Binpack items into given finitet number of bins."""
264 self.remaining_items.extend(items)
265
266 count = 0
267 while self.remaining_items:
268 item = self.remaining_items.pop(0)
269 if self.try_to_place_item(item):
270 # success
271 count += 1
272 else:
273 # Did not fit in any of the bins that we tried.
274 # See if we can add a new bin.
275 made_space = False
276 if self.make_bin:
277 # yes, let's try that
278 self.bins.append(self.make_bin())
279 # try to fit it in an empty bin
280 made_space = self.bins[-1].try_to_add(item)
281 if not made_space:
282 # Either can't add bins or item won't fit into
283 # an empty bin by itself.
284 self.misfits.append(item)
285 report_misfit(item)
286 else:
287 count += 1
288 return count
289
290
291class NextFit(Heuristic):
292 def __init__(self, *args, **kargs):
293 Heuristic.__init__(self, *args, **kargs)
294 self.cur_index = 0
295
296 def select_bins_for_item(self, item):
297 while self.cur_index < len(self.bins):
298 b = self.bins[self.cur_index]
299 if not b.infeasible_to_fit(item):
300 yield b
301 # if that didn't work, then try the next
302 self.cur_index += 1
303
304
305class FirstFit(Heuristic):
306 def select_bins_for_item(self, item):
307 # simply try each bin
308 for bin in self.bins:
309 if not bin.infeasible_to_fit(item):
310 yield bin
311
312
313class FitBased(Heuristic):
314 def try_to_place_item(self, item):
315 # assumes bins have a spare_capacity() function
316
317 # first get rid of the hopeless
318 candidates = [b for b in self.bins if not b.infeasible_to_fit(item)]
319
320 # try to fit it where possible
321 packed = []
322 for b in candidates:
323 b.prepare()
324 b.temporary_assign(item)
325 if b.validate():
326 packed.append(b)
327 else:
328 b.rollback()
329
330 # now we have every bin where it fits
331 # find the one with the most slack
332 remainders = [b.spare_capacity() for b in packed]
333 # last item has most slack
334 if remainders:
335 best = packed[remainders.index(self.select_remainder(remainders))]
336 best.commit()
337 for b in packed:
338 if b != best:
339 b.rollback()
340 return True
341 else:
342 return False
343
344
345class WorstFit(FitBased):
346 def select_remainder(self, remainders):
347 return max(remainders)
348
349
350class BestFit(FitBased):
351 def select_remainder(self, remainders):
352 return min(remainders)
353
354
355class CapacityBased(Heuristic):
356 def select_bins_for_item(self, item):
357 bins = zip(self.bins, [b.spare_capacity() for b in self.bins])
358 self.order_bins(bins)
359 for (b, _) in bins:
360 if not b.infeasible_to_fit(item):
361 yield b
362
363
364class MaxSpareCapacity(CapacityBased):
365 """For item types that do not change size when adding a new item to a bin,
366 this is the same as worst-fit, but faster. For item types that change, this is
367 not necessarily the same as WorstFit
368 """
369 def order_bins(self, bins):
370 bins.sort(key = lambda (b, c): c, reverse=True)
371
372
373class MinSpareCapacity(CapacityBased):
374 """For item types that do not change size when adding a new item to a bin,
375 this is the same as best-fit, but faster. For item types that change, this is
376 not necessarily the same as BestFit
377 """
378 def order_bins(self, bins):
379 bins.sort(key = lambda (b, c): c, reverse=False)
380
diff --git a/schedcat/model/__init__.py b/schedcat/model/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/schedcat/model/__init__.py
diff --git a/schedcat/model/resources.py b/schedcat/model/resources.py
new file mode 100644
index 0000000..4d677df
--- /dev/null
+++ b/schedcat/model/resources.py
@@ -0,0 +1,54 @@
1
2
3class ResourceRequirement(object):
4 def __init__(self, res_id, num_writes=1, write_length=1,
5 num_reads=0, read_length=0):
6 self.res_id = res_id
7 self.max_writes = num_writes
8 self.max_reads = num_reads
9 self.max_write_length = write_length
10 self.max_read_length = read_length
11
12 @property
13 def max_requests(self):
14 "Number of requests of any kind."
15 return self.max_writes + self.max_reads
16
17 @property
18 def max_length(self):
19 "Maximum request length (of any kind)."
20 return max(self.max_write_length, self.max_read_length)
21
22 def add_request(self, length, read=False):
23 "Increase requirements."
24 if read:
25 self.max_reads += 1
26 self.max_read_length = max(self.max_read_length, length)
27 else:
28 self.max_writes += 1
29 self.max_write_length = max(self.max_write_length, length)
30
31
32 def add_read_request(self, length):
33 self.add_request(length, True)
34
35 def add_write_request(self, length):
36 self.add_request(length, False)
37
38 def convert_reads_to_writes(self):
39 self.max_writes = self.max_requests
40 self.max_write_length = self.max_length
41 self.max_reads = 0
42 self.max_read_length = 0
43
44
45class ResourceRequirements(dict):
46 def __missing__(self, key):
47 self[key] = ResourceRequirement(key, 0, 0, 0, 0)
48 return self[key]
49
50
51def initialize_resource_model(taskset):
52 for t in taskset:
53 # mapping of res_id to ResourceRequirement object
54 t.resmodel = ResourceRequirements()
diff --git a/schedcat/model/serialize.py b/schedcat/model/serialize.py
new file mode 100644
index 0000000..e5dd863
--- /dev/null
+++ b/schedcat/model/serialize.py
@@ -0,0 +1,191 @@
1#!/usr/bin/env python
2
3import xml.etree.ElementTree as ET
4
5from .tasks import TaskSystem, SporadicTask
6from .resources import ResourceRequirement, ResourceRequirements
7from schedcat.util.storage import storage
8
9EPSILON = 10**-7
10
11def maybe_int(x):
12 "Try to interpret x as an integer. Convert from string, if necessary."
13 if type(x) == float and abs(x) % 1 <= EPSILON:
14 return int(x)
15 elif type(x) == str:
16 try:
17 return maybe_int(float(x))
18 except ValueError:
19 return x
20 else:
21 return x
22
23def set_attribute(tag, attr_name, obj, field_name=None):
24 "Set XML attributes based on obj attributes that might not exist."
25 if field_name is None:
26 field_name = attr_name
27 if field_name in obj.__dict__:
28 tag.set(attr_name, str(obj.__dict__[field_name]))
29
30def subtag_for_attribute(tag, obj, field_name, tag_name=None):
31 if tag_name is None:
32 tag_name = field_name
33 if field_name in obj.__dict__:
34 return ET.SubElement(tag, tag_name)
35 else:
36 return None
37
38def res_requirement(r, rmodel=None):
39 if rmodel is None:
40 tag = ET.Element('requirement')
41 else:
42 tag = ET.SubElement(rmodel, 'requirement')
43
44 set_attribute(tag, 'res_id', r)
45 set_attribute(tag, 'max_reads', r)
46 set_attribute(tag, 'max_writes', r)
47 set_attribute(tag, 'max_read_length', r)
48 set_attribute(tag, 'max_write_length', r)
49
50 return tag
51
52def task(t):
53 tag = ET.Element('task')
54 if not t.id is None:
55 set_attribute(tag, 'id', t)
56 set_attribute(tag, 'period', t)
57 set_attribute(tag, 'wcet', t, 'cost')
58 if not t.implicit_deadline():
59 set_attribute(tag, 'deadline', t)
60
61 set_attribute(tag, 'partition', t)
62 set_attribute(tag, 'response_time', t)
63 set_attribute(tag, 'wss', t)
64
65 rmodel = subtag_for_attribute(tag, t, 'resmodel', 'resources')
66 if not rmodel is None:
67 for res_id in t.resmodel:
68 res_requirement(t.resmodel[res_id], rmodel)
69
70 tag.task = t
71 task.xml = tag
72 return tag
73
74
75
76def parse_request(req_node):
77 return ResourceRequirement(
78 maybe_int(req_node.get('res_id', 0)),
79 int(req_node.get('max_writes', 1)),
80 int(req_node.get('max_write_length', 1)),
81 int(req_node.get('max_reads', 0)),
82 int(req_node.get('max_read_length', 0)),
83 )
84
85def parse_resmodel(node):
86 resmodel = node.find('resources')
87 if resmodel != None:
88 reqs = ResourceRequirements()
89 for req in [parse_request(n) for n in resmodel.findall('requirement')]:
90 reqs[req.res_id] = req
91 return reqs
92 else:
93 return None
94
95def get_attribute(node, attr_name, obj, field_name=None, convert=lambda _: _):
96 if field_name is None:
97 field_name = attr_name
98 x = node.get(attr_name, None)
99 if not x is None:
100 obj.__dict__[field_name] = convert(x)
101 return True
102 else:
103 return False
104
105def parse_task(node):
106 cost = maybe_int(node.get('wcet'))
107 period = maybe_int(node.get('period'))
108
109 t = SporadicTask(cost, period)
110
111 get_attribute(node, 'deadline', t, convert=maybe_int)
112 get_attribute(node, 'id', t, convert=maybe_int)
113 get_attribute(node, 'partition', t, convert=maybe_int)
114 get_attribute(node, 'wss', t, convert=maybe_int)
115
116 resmodel = parse_resmodel(node)
117 if not resmodel is None:
118 t.resmodel = resmodel
119
120 t.xml = node
121 node.task = t
122 return t
123
124def taskset(ts):
125 tag = ET.Element('taskset')
126
127 prop = ET.SubElement(tag, 'properties')
128 prop.set('utilization', str(ts.utilization()))
129 prop.set('utilization_q', str(ts.utilization_q()))
130 prop.set('density_q', str(ts.density_q()))
131 prop.set('density', str(ts.density()))
132 prop.set('count', str(len(ts)))
133 hp = ts.hyperperiod()
134 if hp:
135 prop.set('hyperperiod', str(hp))
136
137 for t in ts:
138 tag.append(task(t))
139 return tag
140
141def testpoint(tasksets, params):
142 tag = ET.Element('testpoint')
143
144 config = ET.SubElement(tag, 'config')
145 for k in params:
146 config.set(k, str(params[k]))
147 for ts in tasksets:
148 tag.append(taskset(ts))
149 return tag
150
151def parse_taskset(node):
152 tasks = [parse_task(n) for n in node.findall('task')]
153 return TaskSystem(tasks)
154
155def parse_testpoint(node):
156 params = {}
157 config = node.find('config')
158 if not config is None:
159 for k in config.keys():
160 params[k] = maybe_int(config.get(k))
161 tss = [parse_taskset(n) for n in node.findall('taskset')]
162 return (params, tss)
163
164def write_xml(xml, fname):
165 tree = ET.ElementTree(xml)
166 tree.write(fname)
167
168def write_testpoint(tasksets, params, fname):
169 xml = testpoint(tasksets, params)
170 write_xml(xml, fname)
171
172def write(ts, fname):
173 xml = taskset(ts)
174 write_xml(xml, fname)
175
176def load(file):
177 tree = ET.ElementTree()
178 tree.parse(file)
179 root = tree.getroot()
180 if root.tag == 'taskset':
181 ts = parse_taskset(root)
182 ts.xml = tree.getroot()
183 return ts
184 elif root.tag == 'testpoint':
185 params, tss = parse_testpoint(root)
186 return storage(xml=root, params=params, tasksets=tss)
187 elif root.tag == 'task':
188 t = parse_task(root)
189 return t
190 else:
191 return None
diff --git a/schedcat/model/tasks.py b/schedcat/model/tasks.py
new file mode 100644
index 0000000..a6d6d11
--- /dev/null
+++ b/schedcat/model/tasks.py
@@ -0,0 +1,147 @@
1from __future__ import division # use sane division semantics
2
3import copy
4
5from math import floor, ceil
6from schedcat.util.math import lcm
7from schedcat.util.quantor import forall
8
9from fractions import Fraction
10
11class SporadicTask(object):
12 def __init__(self, exec_cost, period, deadline=None, id=None):
13 """By default, the construct only creates the bare minimum
14 attributes. Other code (or subclasses) can add additional
15 attributes (such as response time bounds, resource usage, etc.)
16 """
17 if deadline is None:
18 # implicit deadline by default
19 deadline = period
20 self.period = period
21 self.cost = exec_cost
22 self.deadline = deadline
23 self.id = id
24
25 def implicit_deadline(self):
26 return self.deadline == self.period
27
28 def constrained_deadline(self):
29 return self.deadline <= self.period
30
31 def utilization(self):
32 return self.cost / self.period
33
34 def utilization_q(self):
35 return Fraction(self.cost, self.period)
36
37 def density(self):
38 return self.cost / min(self.period, self.deadline)
39
40 def density_q(self):
41 return Fraction(self.cost, min(self.period, self.deadline))
42
43 def tardiness(self):
44 """Return this task's tardiness.
45 Note: this function can only be called after some test
46 established a response time bound (response_time must be defined)!
47 """
48 return max(0, self.response_time - self.deadline)
49
50 def maxjobs(self, interval_length):
51 """Compute the maximum number of jobs that can execute during
52 some interval.
53 Note: this function can only be called after some test
54 established a response time bound (response_time must be defined)!
55 """
56 return int(ceil((interval_length + self.response_time) / self.period))
57
58 def __repr__(self):
59 idstr = ", id=%s" % self.id if self.id is not None else ""
60 dstr = ", deadline=%s" % self.deadline if self.deadline != self.period else ""
61 return "SporadicTask(%s, %s%s%s)" % (self.cost, self.period, dstr, idstr)
62
63
64class TaskSystem(list):
65 def __init__(self, tasks=[]):
66 self.extend(tasks)
67
68 def __str__(self):
69 return "\n".join([str(t) for t in self])
70
71 def __repr__(self):
72 return "TaskSystem([" + ", ".join([repr(t) for t in self]) + "])"
73
74 def only_implicit_deadlines(self):
75 return forall(self)(lambda t: t.implicit_deadline())
76
77 def only_constrained_deadlines(self):
78 return forall(self)(lambda t: t.constrained_deadline())
79
80 def assign_ids(self):
81 for i, t in enumerate(self):
82 t.id = i + 1
83
84 def assign_ids_by_period(self):
85 for i, t in enumerate(sorted(self, key=lambda t: t.period)):
86 t.id = i + 1
87
88 def assign_ids_by_period(self):
89 for i, t in enumerate(sorted(self, key=lambda t: t.deadline)):
90 t.id = i + 1
91
92 def sort_by_period(self):
93 self.sort(key=lambda t: t.period)
94
95 def sort_by_deadline(self):
96 self.sort(key=lambda t: t.deadline)
97
98 def utilization(self, heaviest=None):
99 u = [t.utilization() for t in self]
100 if heaviest is None:
101 return sum(u)
102 else:
103 u.sort(reverse=True)
104 return sum(u[:heaviest])
105
106 def utilization_q(self, heaviest=None):
107 u = [t.utilization_q() for t in self]
108 if heaviest is None:
109 return sum(u)
110 else:
111 u.sort(reverse=True)
112 return sum(u[:heaviest])
113
114 def density(self):
115 return sum([t.density() for t in self])
116
117 def density_q(self):
118 return sum([t.density_q() for t in self])
119
120 def hyperperiod(self):
121 return lcm(*[t.period for t in self])
122
123 def max_utilization(self):
124 return max([t.utilization() for t in self])
125
126 def max_density(self):
127 return max([t.density() for t in self])
128
129 def max_density_q(self):
130 return max([t.density_q() for t in self])
131
132 def max_cost(self):
133 return max([t.cost for t in self])
134
135 def max_period(self):
136 return max([t.period for t in self])
137
138 def min_deadline(self):
139 return min([t.deadline for t in self])
140
141 def max_wss(self):
142 "Assumes t.wss has been initialized for each task."
143 return max([t.wss for t in self])
144
145 def copy(self):
146 ts = TaskSystem([copy.copy(t) for t in self])
147 return ts
diff --git a/schedcat/overheads/__init__.py b/schedcat/overheads/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/schedcat/overheads/__init__.py
diff --git a/schedcat/overheads/fp.py b/schedcat/overheads/fp.py
new file mode 100644
index 0000000..97859bb
--- /dev/null
+++ b/schedcat/overheads/fp.py
@@ -0,0 +1,61 @@
1from __future__ import division
2
3from math import ceil, floor
4
5from schedcat.model.tasks import SporadicTask, TaskSystem
6
7def charge_scheduling_overheads(oheads, num_cpus, dedicated_irq, taskset):
8 if not oheads or not taskset:
9 return TaskSystem(taskset)
10
11 event_latency = oheads.release_latency(taskset)
12
13 # pseudo-task representing the tick interrupt
14 tck = oheads.tick(taskset)
15 if tck > 0:
16 tick_isr = SporadicTask(tck, oheads.quantum_length)
17 tick_isr.jitter = event_latency
18 tick_tasks = [tick_isr]
19 else:
20 tick_tasks = []
21
22 # pseudo-tasks representing release interrupts
23 rel_cost = oheads.release(taskset)
24 if not dedicated_irq and rel_cost > 0:
25 release_tasks = [SporadicTask(rel_cost, t.period) for t in taskset]
26 for isr in release_tasks:
27 isr.jitter = event_latency
28 else:
29 release_tasks = [] # releases don't impact tasks directly
30
31 # account for initial release delay as jitter
32 release_delay = event_latency + oheads.release(taskset)
33 if dedicated_irq:
34 release_delay += oheads.ipi_latency(taskset)
35
36 for t in taskset:
37 t.jitter = release_delay
38
39 # account for scheduling cost and CPMD
40 sched = oheads.schedule(taskset)
41 cxs = oheads.ctx_switch(taskset)
42 cpmd = oheads.cache_affinity_loss(taskset)
43 preemption = 2 * (sched + cxs) + cpmd
44 for t in taskset:
45 t.cost += preemption
46
47 return TaskSystem(tick_tasks + release_tasks + taskset)
48
49def quantize_params(taskset):
50 """After applying overheads, use this function to make
51 task parameters integral again."""
52
53 for t in taskset:
54 t.cost = int(ceil(t.cost))
55 t.period = int(floor(t.period))
56 t.deadline = int(floor(t.deadline))
57 t.jitter = int(ceil(t.jitter))
58 if t.density() > 1:
59 return False
60
61 return taskset
diff --git a/schedcat/overheads/jlfp.py b/schedcat/overheads/jlfp.py
new file mode 100644
index 0000000..9a5bf68
--- /dev/null
+++ b/schedcat/overheads/jlfp.py
@@ -0,0 +1,84 @@
1from __future__ import division
2
3from math import ceil, floor
4
5def charge_initial_load(oheads, taskset):
6 """Increase WCET to reflect the cost of establishing a warm cache.
7 Note: assumes that .wss (working set size) has been populated in each task.
8 """
9 if oheads:
10 for ti in taskset:
11 load = oheads.initial_cache_load(ti.wss)
12 assert load >= 0 # negative overheads make no sense
13 ti.cost += load
14 if ti.density() > 1:
15 # infeasible
16 return False
17 return taskset
18
19def preemption_centric_irq_costs(oheads, dedicated_irq, taskset):
20 n = len(taskset)
21 qlen = oheads.quantum_length
22 tck = oheads.tick(n)
23 ev_lat = oheads.release_latency(n)
24
25 # tick interrupt
26 utick = tck / qlen
27
28 urel = 0.0
29 if not dedicated_irq:
30 rel = oheads.release(n)
31 for ti in taskset:
32 urel += (rel / ti.period)
33
34 # cost of preemption
35 cpre = tck + ev_lat * utick
36 if not dedicated_irq:
37 cpre += n * rel + ev_lat * urel
38
39 return (1.0 - utick - urel, cpre)
40
41def charge_scheduling_overheads(oheads, num_cpus, dedicated_irq, taskset):
42 if not oheads:
43 return taskset
44
45 uscale, cpre = preemption_centric_irq_costs(oheads, dedicated_irq, taskset)
46
47 if uscale <= 0:
48 # interrupt overload
49 return False
50
51 n = len(taskset)
52 wss = taskset.max_wss()
53
54 sched = 2 * (oheads.schedule(n) + oheads.ctx_switch(n)) \
55 + oheads.cache_affinity_loss(wss)
56
57 irq_latency = oheads.release_latency(n)
58
59 if dedicated_irq or num_cpus > 1:
60 unscaled = 2 * cpre + oheads.ipi_latency(n)
61 else:
62 unscaled = 2 * cpre
63
64 for ti in taskset:
65 ti.period -= irq_latency
66 ti.deadline -= irq_latency
67 ti.cost = (ti.cost + sched) / uscale + unscaled
68 if ti.density() > 1:
69 return False
70
71 return taskset
72
73def quantize_params(taskset):
74 """After applying overheads, use this function to make
75 task parameters integral again."""
76
77 for t in taskset:
78 t.cost = int(ceil(t.cost))
79 t.period = int(floor(t.period))
80 t.deadline = int(floor(t.deadline))
81 if t.density() > 1:
82 return False
83
84 return taskset
diff --git a/schedcat/overheads/locking.py b/schedcat/overheads/locking.py
new file mode 100644
index 0000000..36ed831
--- /dev/null
+++ b/schedcat/overheads/locking.py
@@ -0,0 +1,141 @@
1from __future__ import division
2
3from math import ceil
4
5# All overhead accounting in this file assumes absence of any interrupts.
6
7def charge_spinlock_overheads(oheads, tasks):
8 if oheads is None or not tasks:
9 return tasks
10
11 # the individual charges
12 rcost = oheads.read_lock(tasks) + oheads.read_unlock(tasks)
13 wcost = oheads.lock(tasks) + oheads.unlock(tasks)
14 scost = oheads.syscall_in(tasks) + oheads.syscall_out(tasks)
15
16 # inflate each request and each task's exec cost
17 for t in tasks:
18 extra_wcet = 0
19
20 for res_id in t.resmodel:
21 req = t.resmodel[res_id]
22 if req.max_reads:
23 req.max_read_length += rcost
24 req.max_read_length = int(ceil(req.max_read_length))
25 extra_wcet += req.max_reads * rcost
26
27 if req.max_writes:
28 req.max_write_length += wcost
29 req.max_write_length = int(ceil(req.max_write_length))
30 extra_wcet += req.max_writes * wcost
31 extra_wcet += req.max_requests * scost
32
33 t.cost += int(ceil(extra_wcet))
34 if t.density() > 1:
35 return False
36 return tasks
37
38# for shared-memory semaphore protocols such as MPCP, FMLP, OMLP, etc.
39def charge_semaphore_overheads(oheads, preemptive, suspension_aware, tasks):
40 if oheads is None or not tasks:
41 return tasks
42
43 lock = oheads.lock(tasks)
44 unlock = oheads.unlock(tasks)
45 sysin = oheads.syscall_in(tasks)
46 sysout = oheads.syscall_out(tasks)
47 sched = oheads.schedule(tasks) + oheads.ctx_switch(tasks)
48 cpmd = oheads.cache_affinity_loss(tasks)
49 ipi = oheads.ipi_latency(tasks)
50
51 # per-request execution cost increase (equ 7.3)
52 # 3 sched: wait + resume + yield
53 exec_increase = 3 * sched + \
54 2 * sysin + \
55 2 * sysout + \
56 1 * lock + \
57 1 * unlock + \
58 2 * cpmd
59
60 # delay to be woken up
61 if suspension_aware:
62 susp_increase = ipi
63 else:
64 # s-oblivious: count IPI as execution time
65 susp_increase = 0
66 exec_increase += ipi
67
68 # For non-preemptive protocols, this is the remote case.
69 # Additional local costs are charged separately.
70 # This only affects the FMLP+, the partitioned OMLP, and the
71 # clustered OMLP.
72 cs_increase = ipi + sched + sysout + sysin + unlock
73
74 # preemptive protocols, add in additional scheduling cost
75 if preemptive:
76 cs_increase += sched
77 else:
78 # non-preemptive semaphore: add additional delay to local cost
79 cs_increase_local = cs_increase + sched
80
81 # inflate each request and each task's exec cost
82 for t in tasks:
83 extra_wcet = 0
84 extra_susp = 0
85
86 for res_id in t.resmodel:
87 req = t.resmodel[res_id]
88 assert req.max_reads == 0 # doesn't handle RW at the moment
89
90 if req.max_writes:
91 if not preemptive:
92 req.max_write_length_local = int(ceil(req.max_write_length + cs_increase_local))
93 req.max_write_length += cs_increase
94 req.max_write_length = int(ceil(req.max_write_length))
95 extra_wcet += req.max_writes * exec_increase
96 extra_susp += req.max_writes * susp_increase
97
98 t.cost += int(ceil(extra_wcet))
99 if suspension_aware:
100 t.suspended += int(ceil(extra_susp))
101 if t.density() > 1:
102 return False
103 return tasks
104
105def charge_dpcp_overheads(oheads, tasks):
106 if oheads is None or not tasks:
107 return tasks
108
109 lock = oheads.lock(tasks)
110 unlock = oheads.unlock(tasks)
111 sysin = oheads.syscall_in(tasks)
112 sysout = oheads.syscall_out(tasks)
113 sched = oheads.schedule(tasks) + oheads.ctx_switch(tasks)
114 cpmd = oheads.cache_affinity_loss(tasks)
115 ipi = oheads.ipi_latency(tasks)
116
117
118 exec_increase = sysin + sysout + 2 * sched + 2 * cpmd
119 cs_increase = 3 * sched + sysin + sysout + lock + unlock
120 susp_increase = 2 * ipi + cs_increase
121
122 # inflate each request and each task's exec cost
123 for t in tasks:
124 extra_wcet = 0
125 extra_susp = 0
126
127 for res_id in t.resmodel:
128 req = t.resmodel[res_id]
129 assert req.max_reads == 0 # DPCP doesn't handle RW
130
131 if req.max_writes:
132 req.max_write_length += cs_increase
133 req.max_write_length = int(ceil(req.max_write_length))
134 extra_wcet += req.max_writes * exec_increase
135 extra_susp += req.max_writes * susp_increase
136
137 t.cost += int(ceil(extra_wcet))
138 t.suspended += int(ceil(extra_susp))
139 if t.density() > 1:
140 return False
141 return tasks
diff --git a/schedcat/overheads/model.py b/schedcat/overheads/model.py
new file mode 100644
index 0000000..7d5ddd3
--- /dev/null
+++ b/schedcat/overheads/model.py
@@ -0,0 +1,112 @@
1from __future__ import division
2
3from schedcat.util.csv import load_columns as load_column_csv
4from schedcat.util.math import monotonic_pwlin, const
5
6class Overheads(object):
7 """Legacy overhead objects"""
8 def __init__(self):
9 self.quantum_length = 1000 # microseconds
10 self.zero_overheads()
11
12 FIELD_MAPPING = [
13 # scheduling-related overheads
14 ('IPI-LATENCY', 'ipi_latency'),
15 ('SCHEDULE', 'schedule'),
16 ('RELEASE', 'release'),
17 ('CXS', 'ctx_switch'),
18 ('TICK', 'tick'),
19 ('RELEASE-LATENCY', 'release_latency'),
20
21 # locking- and system-call-related overheads
22 ('LOCK', 'lock'),
23 ('UNLOCK', 'unlock'),
24 ('READ-LOCK', 'read_lock'),
25 ('READ-UNLOCK', 'read_unlock'),
26 ('SYSCALL-IN', 'syscall_in'),
27 ('SYSCALL-OUT', 'syscall_out'),
28 ]
29
30 def zero_overheads(self):
31 # cache-related preemption/migration delay
32 self.cache_affinity_loss = CacheDelay()
33 # cost of loading working set into cache at start of execution
34 self.initial_cache_load = CacheDelay()
35 for (name, field) in self.FIELD_MAPPING:
36 self.__dict__[field] = const(0)
37
38 def __str__(self):
39 return " ".join(["%s: %s" % (name, self.__dict__[field])
40 for (name, field) in Overheads.FIELD_MAPPING])
41
42 def load_approximations(self, fname, non_decreasing=True):
43 data = load_column_csv(fname, convert=float)
44 if not 'TASK-COUNT' in data.by_name:
45 raise IOError, "TASK-COUNT column is missing"
46
47 for (name, field) in Overheads.FIELD_MAPPING:
48 if name in data.by_name:
49 points = zip(data.by_name['TASK-COUNT'], data.by_name[name])
50 if non_decreasing:
51 self.__dict__[field] = monotonic_pwlin(points)
52 else:
53 self.__dict__[field] = piece_wise_linear(points)
54
55 @staticmethod
56 def from_file(fname, non_decreasing=True):
57 o = Overheads()
58 o.source = fname
59 o.load_approximations(fname, non_decreasing)
60 return o
61
62class CacheDelay(object):
63 """Cache-related Preemption and Migration Delay (CPMD)
64 Overheads are expressed as a piece-wise linear function of working set size.
65 """
66
67 MEM, L1, L2, L3 = 0, 1, 2, 3
68 MAPPING = list(enumerate(["MEM", "L1", "L2", "L3"]))
69
70 def __init__(self, l1=0, l2=0, l3=0, mem=0):
71 self.mem_hierarchy = [const(mem), const(l1), const(l2), const(l3)]
72 for (i, name) in CacheDelay.MAPPING:
73 self.__dict__[name] = self.mem_hierarchy[i]
74
75 def cpmd_cost(self, shared_mem_level, working_set_size):
76 return self.mem_hierarchy[shared_mem_level](working_set_size)
77
78 def set_cpmd_cost(self, shared_mem_level, approximation):
79 self.mem_hierarchy[shared_mem_level] = approximation
80 name = CacheDelay.MAPPING[shared_mem_level][1]
81 self.__dict__[name] = self.mem_hierarchy[shared_mem_level]
82
83 def max_cost(self, working_set_size):
84 return max([f(working_set_size) for f in self.mem_hierarchy])
85
86 def __call__(self, wss):
87 return self.max_cost(wss)
88
89 @staticmethod
90 def get_idx_for_name(key):
91 for (i, name) in CacheDelay.MAPPING:
92 if name == key:
93 return i
94 assert False # bad key
95
96 @staticmethod
97 def from_file(fname, non_decreasing=True):
98 data = load_column_csv(fname, convert=float)
99 if not 'WSS' in data.by_name:
100 raise IOError, 'WSS column is missing'
101
102 o = CacheDelay()
103
104 for idx, name in CacheDelay.MAPPING:
105 if name in data.by_name:
106 points = zip(data.by_name['WSS'], data.by_name[name])
107 if non_decreasing:
108 o.mem_hierarchy[idx] = monotonic_pwlin(points)
109 else:
110 o.mem_hierarchy[idx] = piece_wise_linear(points)
111 o.__dict__[name] = o.mem_hierarchy[idx]
112 return o
diff --git a/schedcat/overheads/pfair.py b/schedcat/overheads/pfair.py
new file mode 100644
index 0000000..48872f1
--- /dev/null
+++ b/schedcat/overheads/pfair.py
@@ -0,0 +1,58 @@
1from __future__ import division
2
3from .quanta import quantize_wcet, quantize_period, account_for_delayed_release, stagger_latency
4
5def charge_scheduling_overheads(oheads, num_cpus, dedicated_irq, taskset,
6 staggered=False, total_cpus=None,
7 aligned_periodic_releases=False):
8 if not oheads or not taskset:
9 return taskset
10
11 qlen = oheads.quantum_length
12 ev_lat = oheads.release_latency(taskset)
13 rel_oh = oheads.release(taskset)
14
15 # account for reduced effective quantum length
16 qeff = qlen \
17 - ev_lat \
18 - oheads.tick(taskset) \
19 - oheads.schedule(taskset) \
20 - oheads.ctx_switch(taskset) \
21 - oheads.cache_affinity_loss(taskset)
22
23 if not dedicated_irq:
24 # account for release interrupts
25 qeff -= (len(taskset) - 1) * rel_oh
26
27 # Is any useful time left in the quantum? With short quanta and high
28 # overheads, this may not be the case (in the analyzed worst case).
29 if qeff <= 0:
30 return False
31
32 # apply reduction
33 taskset = quantize_wcet(qlen, taskset, qeff)
34 if not taskset:
35 return False
36
37 # Account for release delay.
38 if not aligned_periodic_releases:
39 # Default sporadic mode: job releases are triggered sporadically,
40 # but newly released jobs are not considered for scheduling until
41 # the next quantum boundary.
42 release_delay = qlen + ev_lat + rel_oh
43 else:
44 # "Polling" mode. Periodic job releases are triggered
45 # at each quantum boundary without any delays.
46 release_delay = 0
47
48 # shortcut: we roll staggering into release delay
49 if staggered:
50 if total_cpus is None:
51 total_cpus = num_cpus;
52 release_delay += stagger_latency(total_cpus, qlen)
53
54 taskset = account_for_delayed_release(release_delay, taskset)
55 if not taskset:
56 return False
57
58 return quantize_period(qlen, taskset, deadline=True)
diff --git a/schedcat/overheads/quanta.py b/schedcat/overheads/quanta.py
new file mode 100644
index 0000000..cb39055
--- /dev/null
+++ b/schedcat/overheads/quanta.py
@@ -0,0 +1,77 @@
1"""Support for quantum-based scheduling.
2"""
3from __future__ import division
4
5from math import ceil, floor
6
7def is_quantum_multiple(qlen, value):
8 return value % qlen is 0
9
10def has_integral_period(qlen):
11 return lambda t: t.period % qlen is 0
12
13def quantize_wcet(qlen, tasks, effective_qlen=None):
14 """Round up execution cost to account for partially used quanta.
15 Specify an effective_qlen less than the quantum length to account for
16 overheads.
17 """
18 if effective_qlen is None:
19 effective_qlen = qlen
20 assert effective_qlen > 0
21 assert qlen > 0
22 for t in tasks:
23 nr_quanta = int(ceil(t.cost / effective_qlen))
24 t.cost = nr_quanta * qlen
25 if t.density() >= 1:
26 return False
27 return tasks
28
29def quantize_period(qlen, tasks, deadline=False):
30 """Round down periods to account for the fact that in a quantum-based
31 scheduler all periods must be multiples of the quantum length.
32
33 Rounding down the period of a periodic task yields a sporadic task that has
34 an inter-arrival delay of one quantum.
35 """
36 for t in tasks:
37 if not is_quantum_multiple(t.period, qlen):
38 nr_quanta = int(floor(t.period / qlen))
39 per = nr_quanta * qlen
40 t.period = per
41 if deadline and not is_quantum_multiple(t.deadline, qlen):
42 nr_quanta = int(floor(t.deadline / qlen))
43 dl = nr_quanta * qlen
44 t.deadline = dl
45 if t.density() >= 1:
46 return False
47 return tasks
48
49def account_for_delayed_release(delay, tasks):
50 """A release will not be noticed until the start of the next quantum
51 boundary. Hence, the period and deadline must both be reduced by one
52 quantum size for hard real-time use.
53 """
54 for t in tasks:
55 t.period -= delay
56 t.deadline -= delay
57 if t.density() >= 1:
58 return False
59 return tasks
60
61def stagger_latency(qlen, num_cpus):
62 return (num_cpus - 1) / num_cpus * qlen
63
64def account_for_staggering(qlen, num_cpus, tasks):
65 """A job may miss its deadline by up to ((m - 1) / m) of a quantum length
66 due to staggering. Hence, we need to reduce the period and deadline.
67
68 This leaves non-integral task parameters, which must be quantized
69 afterward with quantize_period().
70 """
71 reduction = stagger_latency(qlen, num_cpus)
72 for t in tasks:
73 t.period -= reduction
74 t.deadline -= reduction
75 if t.density() >= 1:
76 return False
77 return tasks
diff --git a/schedcat/sched/__init__.py b/schedcat/sched/__init__.py
new file mode 100644
index 0000000..6a9aff2
--- /dev/null
+++ b/schedcat/sched/__init__.py
@@ -0,0 +1,23 @@
1
2# Python model to C++ model conversion code.
3
4
5try:
6 from .native import TaskSet
7
8 using_native = True
9
10 def get_native_taskset(tasks):
11 ts = TaskSet()
12 for t in tasks:
13 if t.implicit_deadline():
14 ts.add_task(t.cost, t.period)
15 else:
16 ts.add_task(t.cost, t.period, t.deadline)
17 return ts
18
19except ImportError:
20 # Nope, C++ impl. not available. Use Python implementation.
21 using_native = False
22 def get_native_taskset(tasks):
23 assert False # C++ implementation not available
diff --git a/schedcat/sched/edf/__init__.py b/schedcat/sched/edf/__init__.py
new file mode 100644
index 0000000..3ae9515
--- /dev/null
+++ b/schedcat/sched/edf/__init__.py
@@ -0,0 +1,129 @@
1"""EDF hard and soft schedulability tests,
2for uni- and multiprocessors.
3"""
4from __future__ import division
5
6from .gfb import is_schedulable as is_schedulable_gfb
7
8from .gfb import is_schedulable as gfb_test
9from .bak import is_schedulable as bak_test
10from .bar import is_schedulable as bar_test
11from .bcl import is_schedulable as bcl_test
12from .bcl_iterative import is_schedulable as bcli_test
13from .rta import is_schedulable as rta_test
14from .ffdbf import is_schedulable as ffdbf_test
15
16from .da import bound_response_times as da_tardiness_bounds
17from .rta import bound_response_times as rta_response_times
18
19from schedcat.util.quantor import forall
20
21# hard real-time tests
22HRT_TESTS = {
23 'GFB' : gfb_test,
24 'BAK' : bak_test,
25 'BAR' : bar_test,
26 'BCL' : bcl_test,
27 'BCLI' : bcli_test,
28 'RTA' : rta_test,
29 'FF-DBF' : ffdbf_test,
30 }
31
32# A somewhat arbitrary heuristic to curb pseudo-polynomial runtimes...
33def should_use_baruah_test(threshold, taskset, no_cpus):
34 if threshold is True:
35 return True
36 elif threshold is False:
37 return False
38 else:
39 slack = no_cpus - taskset.utilization()
40 if not slack:
41 # can't apply test for zero slack; avoid division by zero
42 return False
43 n = len(taskset)
44 score = n * (no_cpus * no_cpus) / (slack * slack)
45 return score <= threshold
46
47# all (pure Python implementation)
48def is_schedulable_py(no_cpus, tasks,
49 rta_min_step=1,
50 want_baruah=3000,
51 want_rta=True,
52 want_ffdbf=False,
53 want_load=False):
54 if tasks.utilization() > no_cpus or \
55 not forall(tasks)(lambda t: t.period >= t.cost):
56 # trivially infeasible
57 return False
58 else:
59 not_arbitrary = tasks.only_constrained_deadlines()
60 if no_cpus == 1 and tasks.density() <= 1:
61 # simply uniprocessor density condition
62 return True
63 elif no_cpus > 1:
64 # Baker's test can handle arbitrary deadlines.
65 if bak_test(no_cpus, tasks):
66 return True
67 # The other tests cannot.
68 if not_arbitrary:
69 # The density test is cheap, try it first.
70 if gfb_test(no_cpus, tasks):
71 return True
72 # Ok, try the slower ones.
73 if should_use_baruah_test(want_baruah, tasks, no_cpus) and \
74 bar_test(no_cpus, tasks):
75 return True
76 if want_rta and \
77 rta_test(no_cpus, tasks,
78 min_fixpoint_step=rta_min_step):
79 return True
80 # FF-DBF is almost always too slow.
81 if want_ffdbf and ffdbf_test(no_cpus, tasks):
82 return True
83 # If we get here, none of the tests passed.
84 return False
85
86import schedcat.sched
87
88if schedcat.sched.using_native:
89 import schedcat.sched.native as native
90
91 def is_schedulable_cpp(no_cpus, tasks,
92 rta_min_step=1,
93 want_baruah=True,
94 want_rta=True,
95 want_ffdbf=False,
96 want_load=False):
97 native_test = native.GlobalEDF(no_cpus, rta_min_step,
98 want_baruah != False,
99 want_rta,
100 want_ffdbf,
101 want_load)
102 ts = schedcat.sched.get_native_taskset(tasks)
103 return native_test.is_schedulable(ts)
104
105 is_schedulable = is_schedulable_cpp
106
107else:
108 is_schedulable = is_schedulable_py
109
110
111def bound_response_times(no_cpus, tasks, *args, **kargs):
112 if is_schedulable(no_cpus, tasks, *args, **kargs):
113 # HRT schedualble => no tardiness
114 # See if we can get RTA to provide a good response time bound.
115 rta_step = kargs['rta_min_step'] if 'rta_min_step' in kargs else 0
116 if rta_response_times(no_cpus, tasks, min_fixpoint_step=rta_step):
117 # Great, we got RTA estimates.
118 return True
119 else:
120 # RTA failed, use conservative bounds.
121 for t in tasks:
122 t.response_time = t.deadline
123 return True
124 # Not HRT schedulable, use SRT analysis.
125 elif da_tardiness_bounds(no_cpus, tasks):
126 return True
127 else:
128 # Could not find a safe response time bound for each task.
129 return False
diff --git a/schedcat/sched/edf/bak.py b/schedcat/sched/edf/bak.py
new file mode 100644
index 0000000..3fc9272
--- /dev/null
+++ b/schedcat/sched/edf/bak.py
@@ -0,0 +1,30 @@
1"""Implements the BAK G-EDF schedulability test.
2"""
3
4from __future__ import division
5
6from schedcat.util.quantor import forall
7
8from fractions import Fraction
9
10ONE = Fraction(1)
11
12def beta(t_i, t_k, l):
13 # assumes integral time
14 u_i = t_i.utilization_q()
15 part1 = u_i * (ONE + Fraction(t_i.period - t_i.deadline, t_k.deadline))
16 if l < u_i:
17 part2 = (t_i.cost - l * t_i.period) / Fraction(t_k.deadline)
18 return part1 + part2
19 else:
20 return part1
21
22def task_schedulable(T, t_k, m):
23 l = t_k.density_q() # lambda
24 if l > ONE:
25 return False
26 beta_sum = sum([min(ONE, beta(t_i, t_k, l)) for t_i in T])
27 return beta_sum <= m - (m - 1) * l
28
29def is_schedulable(no_cpus, tasks):
30 return forall(tasks)(lambda t_k: task_schedulable(tasks, t_k, no_cpus))
diff --git a/schedcat/sched/edf/bar.py b/schedcat/sched/edf/bar.py
new file mode 100644
index 0000000..dafabd7
--- /dev/null
+++ b/schedcat/sched/edf/bar.py
@@ -0,0 +1,108 @@
1"""G-EDF hard schedulability test
2
3This module implements Sanjoy Baruah's G-EDF schedulability test as presented
4in his paper "Techniques for Multiprocessor Global Schedulability Analysis."
5
6The variable names are picked to resemble the paper and are not meant to be
7understandable without the paper as a reference.
8"""
9
10from __future__ import division
11
12from math import floor, ceil
13from itertools import izip
14from schedcat.util.quantor import forall
15from schedcat.util.math import topsum
16from schedcat.util.iter import imerge, uniq
17
18def dbf(tsk, t):
19 """Demand bound function for task tsk at time t."""
20 if t <= 0:
21 return 0
22 return max(0, (int(floor((t - tsk.deadline) / tsk.period)) + 1) * tsk.cost)
23
24def brute_force_dbf_points_of_change(tsk, max_t, d_k):
25 for t in xrange(0, max_t + 1):
26 cur = dbf(tsk, t + d_k)
27 lst = dbf(tsk, t - 1 + d_k)
28 if cur != lst:
29 yield t
30
31def dbf_points_of_change(tsk, max_t):
32 """Return iterator over t where dbf(tsk, t) changes."""
33 yield 0
34 t = tsk.deadline
35 while t <= max_t:
36 yield t
37 t += tsk.period
38
39def dbf_points_of_change_dk(tsk, max_t, dk):
40 """Return iterator over t where dbf(tsk, t) changes."""
41 for pt in dbf_points_of_change(tsk, max_t + dk):
42 offset = pt - dk
43 if offset >= 0:
44 yield offset
45
46def all_dbf_points_of_change_dk(all_tsks, max_t, dk):
47 all_points = [dbf_points_of_change_dk(t, max_t, dk) for t in all_tsks]
48 return uniq(imerge(lambda x,y: x < y, *all_points))
49
50# The definition of I1() and I2() diverge from the one given in the
51# RTSS'07 paper. According to S. Baruah: "The second term in the min --
52# A_k+D_k-C_k -- implicitly assumes that the job missing its deadline
53# executes for C_k time units, whereas it actually executes for strictly
54# less than C_k. Hence this second term should be --A_k+D_k(-C_k -
55# \epsilon); for task systems with integer parameters, epsilon can be
56# taken to e equal to one. [...] A similar modification may need to be
57# made for the definition of I2."
58
59def I1(tsk_i, tsk_k, a_k):
60 d_k = tsk_k.deadline
61 c_k = tsk_k.cost
62 if tsk_k == tsk_i:
63 return min(dbf(tsk_i, a_k + d_k) - c_k, a_k)
64 else:
65 return min(dbf(tsk_i, a_k + d_k), a_k + d_k - (c_k - 1))
66
67def dbf_(tsk, t):
68 """dbf() for carry-in scenario"""
69 return int(floor(t / tsk.period)) * tsk.cost + min(tsk.cost, t % tsk.period)
70
71def I2(tsk_i, tsk_k, a_k):
72 d_k = tsk_k.deadline
73 c_k = tsk_k.cost
74 if tsk_k == tsk_i:
75 return min(dbf_(tsk_i, a_k + d_k) - c_k, a_k)
76 else:
77 return min(dbf_(tsk_i, a_k + d_k), a_k + d_k - (c_k - 1))
78
79def Idiff(tsk_i, tsk_k, a_k):
80 return I2(tsk_i, tsk_k, a_k) - I1(tsk_i, tsk_k, a_k)
81
82def task_schedulable_for_offset(all_tsks, tsk_k, a_k, m):
83 """Tests condition 8 from the paper"""
84 I1s = [I1(tsk_i, tsk_k, a_k) for tsk_i in all_tsks]
85 Idiffs = [I2(tsk_i, tsk_k, a_k) - i1 for (tsk_i, i1) in izip(all_tsks, I1s)]
86 Idiff = topsum(Idiffs, None, m -1)
87 return sum(I1s) + Idiff <= m * (a_k + tsk_k.deadline - tsk_k.cost)
88
89def ak_bounds(all_tsks, m):
90 U = all_tsks.utilization()
91 c_sigma = topsum(all_tsks, lambda t: t.cost, m - 1)
92 y = sum([(t.period - t.deadline) * t.utilization() for t in all_tsks])
93 mu = m - U
94 def ak_bound(tsk_k):
95 # Equation 9 in the paper
96 return (c_sigma - tsk_k.deadline * mu + y + m * tsk_k.cost) / mu
97 return [ak_bound(t) for t in all_tsks]
98
99def is_schedulable(m, tasks):
100 """Are the given tasks schedulable on m processors?"""
101 if tasks.utilization() >= m or not forall(tasks)(lambda t: t.constrained_deadline()):
102 return False
103 for (tsk_k, a_k_bound) in izip(tasks, ak_bounds(tasks, m)):
104 for a_k in all_dbf_points_of_change_dk(tasks, a_k_bound, tsk_k.deadline):
105 if not task_schedulable_for_offset(tasks, tsk_k, a_k, m):
106 return False
107 return True
108
diff --git a/schedcat/sched/edf/bcl.py b/schedcat/sched/edf/bcl.py
new file mode 100644
index 0000000..93701dd
--- /dev/null
+++ b/schedcat/sched/edf/bcl.py
@@ -0,0 +1,30 @@
1from __future__ import division
2
3from math import floor
4from fractions import Fraction
5
6from schedcat.util.quantor import forall
7
8ONE = Fraction(1)
9
10def N(t_k, t_i):
11 # assumes integral time
12 return int(floor((t_k.deadline - t_i.deadline) / t_i.period)) + 1
13
14def beta(t_k, t_i):
15 N_i = N(t_k, t_i)
16 C_i = t_i.cost
17 T_i = t_i.period
18 D_k = t_k.deadline
19 return Fraction(N_i * C_i + min(C_i, max(0, D_k - N_i * T_i)) , D_k)
20
21def task_schedulable(T, t_k, m):
22 l_k = t_k.density_q()
23 cap = m * (ONE - l_k)
24 all_beta = [beta(t_k, t_i) for t_i in T if t_i != t_k]
25 beta_sum = sum([min(b, ONE - l_k) for b in all_beta])
26 return beta_sum < cap or \
27 (beta_sum == cap and any([0 < b <= ONE - l_k for b in all_beta]))
28
29def is_schedulable(no_cpus, tasks):
30 return forall(tasks)(lambda t_k: task_schedulable(tasks, t_k, no_cpus))
diff --git a/schedcat/sched/edf/bcl_iterative.py b/schedcat/sched/edf/bcl_iterative.py
new file mode 100644
index 0000000..aecf3bc
--- /dev/null
+++ b/schedcat/sched/edf/bcl_iterative.py
@@ -0,0 +1,62 @@
1"""
2Implementation of Marko Bertogna, Michele Cirinei, and Giuseppe Lipari
3iterative schedulability test. This implementation follows the description in:
4
5 Schedulability analysis of global scheduling algorithms on
6 multiprocessor platforms by Marko Bertogna, Michele Cirinei, Giuseppe
7 Lipari to appear in Journal IEEE Transactions on Parallel and
8 Distributed Systems (2008).
9"""
10
11from __future__ import division
12
13from math import floor, ceil
14
15def interfering_jobs(length, ti):
16 "Equ. (15) in the paper."
17 return int(floor((length + ti.deadline - ti.cost - ti.bcl_slack) / ti.period))
18
19def wk_interfering_workload(length, ti):
20 "General work-conserving case, Equ. (14) in the paper."
21 jobs = interfering_jobs(length, ti)
22 return jobs * ti.cost + min(ti.cost, length + ti.deadline - ti.cost
23 - ti.bcl_slack - jobs * ti.period)
24
25def edf_interfering_workload(length, ti):
26 "Equ. (17) in the paper."
27 jobs = int(floor(length / ti.period))
28 return jobs * ti.cost + min(ti.cost,
29 max(0, length - ti.bcl_slack - jobs * ti.period))
30
31def edf_slack_update(tk, tasks, no_cpus):
32 """Compute slack in the case of G-EDF.
33 Equ. (18) in the paper.
34 """
35 other_work = 0
36 for ti in tasks:
37 if tk != ti:
38 other_work += min(edf_interfering_workload(tk.deadline, ti),
39 # the '+ 1' below assumes integral time
40 tk.deadline - tk.cost + 1)
41 return tk.deadline - tk.cost - int(floor(other_work / no_cpus))
42
43def is_schedulable(no_cpus, tasks, round_limit=None):
44 """"Iteratively improve slack bound for each task until either the system
45 is deemed to be feasible, no more improvements could be found, or
46 the round limit (if given) is reached.
47 """
48 for t in tasks:
49 t.bcl_slack = 0.0
50 updated = True
51 feasible = False
52 round = 0
53 while updated and not feasible and (not round_limit or round < round_limit):
54 round += 1
55 feasible = True
56 updated = False
57 for tk in tasks:
58 new_bound = edf_slack_update(tk, tasks, no_cpus)
59 feasible = feasible and new_bound >= 0
60 updated = updated or new_bound > tk.bcl_slack
61 tk.bcl_slack = max(tk.bcl_slack, new_bound)
62 return feasible
diff --git a/schedcat/sched/edf/da.py b/schedcat/sched/edf/da.py
new file mode 100644
index 0000000..15fe974
--- /dev/null
+++ b/schedcat/sched/edf/da.py
@@ -0,0 +1,94 @@
1"""Global EDF soft real-time test and tardiness bounds, based on Devi & Anderson's work.
2"""
3
4from __future__ import division
5
6from math import ceil
7
8from schedcat.util.quantor import forall
9
10def tardiness_x(no_cpus, tasks):
11 """This function computes the X part of Uma Devi's G-EDF tardiness bound, as
12 given in Corollary 4.11 on page 109 of Uma's thesis..
13
14 This function assumes full preemptivity.
15 """
16 if not tasks:
17 return 0
18 U = tasks.utilization()
19 if no_cpus == 1:
20 if U <= 1:
21 return 0
22 else:
23 return None
24 by_util = [t.utilization() for t in tasks]
25 by_util.sort(reverse=True)
26 by_cost = [t.cost for t in tasks]
27 by_cost.sort(reverse=True)
28
29 Lambda = int(ceil(U)) - 1
30 emin = by_cost[-1]
31
32 reduced_capacity = no_cpus - sum(by_util[0:Lambda - 1])
33 if reduced_capacity <= 0:
34 # bad: tardiness is not bounded
35 return None
36
37 reduced_cost = max(0, sum(by_cost[0:Lambda]) - emin)
38 return int(ceil(reduced_cost / reduced_capacity))
39
40def np_tardiness_x(no_cpus, tasks):
41 """This function computes the X part of Uma Devi's G-EDF tardiness bound, as
42 given in Corollary 4.3 in Uma's thesis, page 110.
43 """
44 if not tasks:
45 return 0
46 U = tasks.utilization()
47 # by_util is mu in Uma's theorem
48 by_util = [t.utilization() for t in tasks]
49 by_util.sort(reverse=True)
50 # by_cost is epsilon in Uma's theorem
51 by_cost = [t.cost for t in tasks]
52 by_cost.sort(reverse=True)
53
54 Lambda = int(ceil(U)) - 1
55 emin = by_cost[-1]
56
57 reduced_capacity = no_cpus - sum(by_util[0:Lambda - 1])
58 if reduced_capacity <= 0:
59 # bad: tardiness is not bounded
60 return None
61
62 block_idx = no_cpus - Lambda - 1
63 reduced_cost = sum(by_cost[0:Lambda]) + sum(by_cost[0:block_idx]) - emin
64 return int(ceil(reduced_cost / reduced_capacity))
65
66def task_tardiness_bound(no_cpus, tasks, preemptive=True):
67 x = 0
68 # first check if the bound formulas are valid
69 if not has_bounded_tardiness(no_cpus, tasks):
70 return None
71 if no_cpus > 1:
72 if preemptive:
73 x = tardiness_x(no_cpus, tasks)
74 else:
75 x = np_tardiness_x(no_cpus, tasks)
76 else:
77 x = 0
78 return x
79
80def has_bounded_tardiness(no_cpus, tasks):
81 return tasks.utilization() <= no_cpus and \
82 forall(tasks)(lambda t: t.period >= t.cost)
83
84def bound_response_times(no_cpus, tasks, preemptive=True):
85 # DA's work applies to implicit-deadline tasks
86 assert forall(tasks)(lambda t: t.implicit_deadline())
87
88 x = task_tardiness_bound(no_cpus, tasks, preemptive)
89 if x is None:
90 return False
91 else:
92 for t in tasks:
93 t.response_time = t.deadline + t.cost + x
94 return True
diff --git a/schedcat/sched/edf/ffdbf.py b/schedcat/sched/edf/ffdbf.py
new file mode 100644
index 0000000..a159442
--- /dev/null
+++ b/schedcat/sched/edf/ffdbf.py
@@ -0,0 +1,131 @@
1"""
2Schedulability test based on:
3
4 Improved multiprocessor global schedulability analysis
5 S. Baruah and V. Bonifaci and A. Marchetti-Spaccamela and S. Stiller
6 Real-Time Systems, to appear, Springer, 2010.
7
8
9NB: this code is slow (i.e., of pseudo-polynomial complexity and not optimized),
10 and also not well tested.
11"""
12
13from __future__ import division
14
15import numbers
16from math import trunc
17from fractions import Fraction
18
19from schedcat.util.iter import imerge, uniq
20
21
22def ffdbf(ti, time, speed):
23 r_i = time % ti.period
24 q_i = trunc(time / ti.period)
25 demand = q_i * ti.cost
26 if r_i >= ti.deadline:
27 demand += ti.cost
28 elif ti.deadline > r_i >= ti.deadline - (Fraction(ti.cost) / speed):
29 assert isinstance(speed, Fraction)
30 demand += ti.cost - (ti.deadline - r_i) * speed
31 # else add nothing
32 return demand
33
34def ts_ffdbf(ts, time, speed):
35 demand = 0
36 for t in ts:
37 demand += ffdbf(t, time, speed)
38 return demand
39
40def witness_condition(cpus, ts, time, speed):
41 "Equ. (6)"
42 demand = ts_ffdbf(ts, time, speed)
43 bound = (cpus - (cpus - 1) * speed) * time
44 return demand <= bound
45
46def test_points(ti, speed, min_time):
47# assert isinstance(min_time, numbers.Rational)
48 skip = trunc(min_time / ti.period)
49 time = (skip * ti.period) + ti.deadline
50 assert isinstance(speed, Fraction)
51 offset = min(Fraction(ti.cost) / speed, ti.deadline)
52
53 # check the first two points for exclusion
54 if time - offset > min_time:
55 yield time - offset
56 if time > min_time:
57 yield time
58 time += ti.period
59
60 while True:
61 yield time - offset
62 yield time
63 time += ti.period
64
65def testing_set(ts, speed, min_time):
66 all_points = [test_points(ti, speed, min_time) for ti in ts]
67 return uniq(imerge(lambda x,y: x < y, *all_points))
68
69def brute_force_sigma_values(ts, step=Fraction(1,100)):
70 maxd = ts.max_density_q()
71 yield maxd
72 x = (maxd - maxd % step) + step
73 while True:
74 yield x
75 x += step
76
77def is_schedulable(cpus, ts,
78 epsilon=Fraction(1, 10),
79 sigma_granularity=50):
80 if not ts:
81 # protect against empty task sets
82 return True
83
84 if cpus < 2:
85 # sigma bounds requires cpus >= 2
86 return False
87
88 assert isinstance(epsilon, Fraction)
89
90 sigma_bound = (cpus - ts.utilization_q()) / Fraction(cpus - 1) - epsilon
91 time_bound = Fraction(sum([ti.cost for ti in ts])) / epsilon
92 max_density = ts.max_density_q()
93
94 microsteps = 0
95 sigma_step = Fraction(1, sigma_granularity)
96
97 # sigma is only defined for <= 1
98 sigma_bound = min(1, sigma_bound)
99 sigma_vals = iter(brute_force_sigma_values(ts, step=sigma_step))
100
101 schedulable = False
102 sigma_cur = sigma_vals.next()
103 t_cur = 0
104
105 while not schedulable and max_density <= sigma_cur <= sigma_bound:
106 schedulable = True
107 for t in testing_set(ts, sigma_cur, t_cur):
108 if time_bound < t:
109 # great, we made it to the end
110 break
111 if not witness_condition(cpus, ts, t, sigma_cur):
112 # nope, sigma_cur is not a witness
113 schedulable = False
114
115 while True:
116 # search next sigma value
117 sigma_nxt = sigma_vals.next()
118 if not (max_density <= sigma_nxt <= sigma_bound):
119 # out of bounds, give up
120 sigma_cur = 2
121 break
122 if witness_condition(cpus, ts, t, sigma_nxt):
123 # this one works
124 sigma_cur = sigma_nxt
125 break
126
127 # don't have to recheck times already checked
128 t_cur = t
129 break
130
131 return schedulable
diff --git a/schedcat/sched/edf/gfb.py b/schedcat/sched/edf/gfb.py
new file mode 100644
index 0000000..6ee4367
--- /dev/null
+++ b/schedcat/sched/edf/gfb.py
@@ -0,0 +1,12 @@
1from __future__ import division
2
3# The G-EDF density test.
4def is_schedulable(no_cpus, tasks):
5 """Is the system schedulable according to the GFB test?
6 Also known as the "density test."
7 """
8 if not tasks:
9 return True
10 dmax = max([t.density() for t in tasks])
11 bound = no_cpus - (no_cpus - 1) * dmax
12 return tasks.density() <= bound
diff --git a/schedcat/sched/edf/rta.py b/schedcat/sched/edf/rta.py
new file mode 100644
index 0000000..682b593
--- /dev/null
+++ b/schedcat/sched/edf/rta.py
@@ -0,0 +1,88 @@
1"""
2Implementation of Bertogna and Cirinei's response time analysis test.
3
4 "Response-Time Analysis for Globally Scheduled Symmetric
5 Multiprocessor Platforms"
6 M. Bertogna and M. Cirinei,
7 Proceedings of the 28th IEEE International Real-Time Systems Symposium,
8 pages 149--160, 2007.
9
10"""
11
12from __future__ import division
13
14
15def rta_interfering_workload(length, ti):
16 "Equ. (4) and (8)"
17 interval = length + ti.deadline - ti.cost - ti.rta_slack
18 jobs = int(floor(interval / ti.period))
19 return jobs * ti.cost + min(ti.cost, interval % ti.period)
20
21def edf_interfering_workload(length, ti):
22 "Equs. (5) and (9)"
23 # implicit floor by integer division
24 jobs = int(floor(length / ti.period))
25 return jobs * ti.cost + \
26 min(ti.cost, max(0, length % ti.period - ti.rta_slack))
27
28def response_estimate(tk, tasks, no_cpus, response_time):
29 cumulative_work = 0
30 delay_limit = response_time - tk.cost + 1
31 for ti in tasks:
32 if ti != tk:
33 cumulative_work += min(rta_interfering_workload(response_time, ti),
34 edf_interfering_workload(tk.deadline, ti),
35 delay_limit)
36 return tk.cost + int(floor(cumulative_work / no_cpus))
37
38def rta_fixpoint(tk, tasks, no_cpus, min_delta=None):
39 """If the fixpoint search converges too slowly, then
40 use min_delta to enforce a minimum step size."""
41 # response time iteration, start with cost
42 last, resp = tk.cost, response_estimate(tk, tasks, no_cpus, tk.cost)
43
44 while last != resp and resp <= tk.deadline:
45 if resp > last and resp - last < min_delta:
46 resp = min(last + min_delta, tk.deadline)
47 last, resp = resp, response_estimate(tk, tasks, no_cpus, resp)
48
49 return resp
50
51def is_schedulable(no_cpus, tasks, round_limit=25, min_fixpoint_step=0):
52 """"Iteratively improve slack bound for each task until either the system
53 is deemed to be feasible, no more improvements could be found, or
54 the round limit (if given) is reached.
55 """
56 for t in tasks:
57 t.rta_slack = 0
58 updated = True
59 schedulable = False
60 round = 0
61
62 while updated and not schedulable \
63 and (not round_limit or round < round_limit):
64 round += 1
65 schedulable = True
66 updated = False
67 for tk in tasks:
68 # compute new response time bound
69 response = rta_fixpoint(tk, tasks, no_cpus,
70 min_delta=min_fixpoint_step)
71 if response <= tk.deadline:
72 # this is a valid response time
73 new_slack = tk.deadline - response
74 if new_slack != tk.rta_slack:
75 tk.rta_slack = new_slack
76 updated = True
77 else:
78 # this one is currently not schedulable
79 schedulable = False
80 return schedulable
81
82def bound_response_times(no_cpus, tasks, *args, **kargs):
83 if is_schedulable(no_cpus, tasks, *args, **kargs):
84 for t in tasks:
85 t.response_time = t.deadline - t.rta_slack
86 return True
87 else:
88 return False
diff --git a/schedcat/sched/fp/__init__.py b/schedcat/sched/fp/__init__.py
new file mode 100644
index 0000000..2da5e1b
--- /dev/null
+++ b/schedcat/sched/fp/__init__.py
@@ -0,0 +1,8 @@
1"""Fixed-priority schedulability tests.
2 Currently, only uniprocessor response-time analysis is implemented.
3"""
4
5from __future__ import division
6
7from .rta import bound_response_times, is_schedulable
8
diff --git a/schedcat/sched/fp/rta.py b/schedcat/sched/fp/rta.py
new file mode 100644
index 0000000..4f148af
--- /dev/null
+++ b/schedcat/sched/fp/rta.py
@@ -0,0 +1,69 @@
1from __future__ import division
2
3from math import ceil
4
5from schedcat.util.quantor import forall
6
7# task.blocked => ALL blocking, including local and remote (self-suspensions)
8# task.suspended => self-suspensions, aka only REMOTE blocking
9# task.jitter => ADDITIONAL self-suspensions (not included in task.blocked or task.suspended)
10
11def check_for_suspension_parameters(taskset):
12 "compatibility: add required parameters if they are not present"
13 for t in taskset:
14 if not 'blocked' in t.__dict__:
15 # No blocking.
16 t.blocked = 0
17 if not 'suspended' in t.__dict__:
18 # No self-suspension time.
19 t.suspended = 0
20 if not 'jitter' in t.__dict__:
21 # No arrival jitter (equivalent to an initial suspension).
22 t.jitter = 0
23
24def fp_demand(task, time):
25 # Account for higher-priority interference due to double-hit /
26 # back-to-back execution.
27 return task.cost * int(ceil((time + task.suspended + task.jitter) / task.period))
28
29def rta_schedulable(taskset, i):
30 task = taskset[i]
31 higher_prio = taskset[:i]
32
33 test_end = task.deadline
34
35 # pre-compute the additional terms for the processor demand bound
36 own_demand = task.blocked + task.jitter + task.cost
37
38 # see if we find a point where the demand is satisfied
39 time = sum([t.cost for t in higher_prio]) + own_demand
40 while time <= test_end:
41 demand = sum([fp_demand(t, time) for t in higher_prio]) \
42 + own_demand
43 if demand == time:
44 # yep, demand will be met by time
45 task.response_time = time
46 return True
47 else:
48 # try again
49 time = demand
50
51 # if we get here, we didn't converge
52 return False
53
54def bound_response_times(no_cpus, taskset):
55 """Assumption: taskset is sorted in order of decreasing priority."""
56 if not (no_cpus == 1 and taskset.only_constrained_deadlines()):
57 # This implements standard uniprocessor response-time analysis, which
58 # does not handle arbitrary deadlines or multiprocessors.
59 return False
60 else:
61 check_for_suspension_parameters(taskset)
62 for i in xrange(len(taskset)):
63 if not rta_schedulable(taskset, i):
64 return False
65 return True
66
67is_schedulable = bound_response_times
68
69
diff --git a/schedcat/sched/pfair.py b/schedcat/sched/pfair.py
new file mode 100644
index 0000000..27e7bf6
--- /dev/null
+++ b/schedcat/sched/pfair.py
@@ -0,0 +1,28 @@
1from schedcat.util.quantor import forall
2
3def is_schedulable(no_cpus, tasks):
4 """Simple utilization bound: tasks.utilization() <= no_cpus.
5 Assumption: all parameters are quantum multiples and deadlines
6 are not constrained.
7 """
8 return tasks.utilization() <= no_cpus and \
9 forall(tasks)(lambda t: t.deadline >= t.period >= t.cost)
10
11def has_bounded_tardiness(no_cpus, tasks):
12 """Simple utilization bound: tasks.utilization() <= no_cpus.
13 This is also true for constrained-deadline tasks.
14 """
15 return tasks.utilization() <= no_cpus and \
16 forall(tasks)(lambda t: t.period >= t.cost)
17
18def bound_response_times(no_cpus, tasks):
19 """Upper bound the response time of each task.
20 This assumes that all task parameters are quantum multiples, and
21 that effects such as quantum staggering have already been accounted for.
22 """
23 if has_bounded_tardiness(no_cpus, tasks):
24 for t in tasks:
25 t.response_time = t.period
26 return True
27 else:
28 return False
diff --git a/schedcat/sim/__init__.py b/schedcat/sim/__init__.py
new file mode 100644
index 0000000..32443b7
--- /dev/null
+++ b/schedcat/sim/__init__.py
@@ -0,0 +1,10 @@
1from .native import TaskSet
2
3def get_native_taskset(tasks):
4 ts = TaskSet()
5 for t in tasks:
6 if t.implicit_deadline():
7 ts.add_task(t.cost, t.period)
8 else:
9 ts.add_task(t.cost, t.period, t.deadline)
10 return ts
diff --git a/schedcat/sim/edf.py b/schedcat/sim/edf.py
new file mode 100644
index 0000000..453ba4d
--- /dev/null
+++ b/schedcat/sim/edf.py
@@ -0,0 +1,17 @@
1
2import schedcat.sim as sim
3import schedcat.sim.native as cpp
4
5from schedcat.util.time import sec2us
6
7
8def is_deadline_missed(no_cpus, tasks, simulation_length=60):
9 ts = sim.get_native_taskset(tasks)
10 return cpp.edf_misses_deadline(no_cpus, ts, int(sec2us(simulation_length)))
11
12def time_of_first_miss(no_cpus, tasks, simulation_length=60):
13 ts = sim.get_native_taskset(tasks)
14 return cpp.edf_first_violation(no_cpus, ts, int(sec2us(simulation_length)))
15
16def no_counter_example(*args, **kargs):
17 return not is_deadline_missed(*args, **kargs)
diff --git a/schedcat/util/__init__.py b/schedcat/util/__init__.py
new file mode 100644
index 0000000..b0b28a3
--- /dev/null
+++ b/schedcat/util/__init__.py
@@ -0,0 +1,3 @@
1"""
2schedcat.util: misc. helpers
3"""
diff --git a/schedcat/util/csv.py b/schedcat/util/csv.py
new file mode 100644
index 0000000..4482f0a
--- /dev/null
+++ b/schedcat/util/csv.py
@@ -0,0 +1,59 @@
1from __future__ import absolute_import
2
3import csv
4
5from .storage import storage
6
7def load_columns(fname,
8 convert=lambda x: x,
9 expect_uniform=True):
10 """Load a file of CSV data. The first row is assumed
11 to contain column labels. These labels can then be used to
12 reference individual columns.
13
14 x = load_column_csv(...)
15 x.by_name -> columns by name
16 x.by_idx -> columns by index in the file
17 x.columns -> all columns
18 """
19 if isinstance(fname, str):
20 f = open(fname)
21 else:
22 # assume we got a file object
23 f = fname
24 d = list(csv.reader(f))
25 if fname != f:
26 f.close()
27
28 # infer column labels
29 col_idx = {}
30 for i, key in enumerate(d[0]):
31 col_idx[key.strip()] = i
32
33 max_idx = i
34
35 data = d[1:]
36
37 if expect_uniform:
38 for row in data:
39 if len(row) != max_idx + 1:
40 print len(row), max_idx
41 msg = "expected uniform row length (%s:%d)" % \
42 (fname, data.index(row) + 1)
43 raise IOError, msg # bad row length
44
45 # column iterator
46 def col(i):
47 for row in data:
48 if row:
49 yield convert(row[i])
50
51 by_col_name = {}
52 by_col_idx = [0] * (max_idx + 1)
53
54 for key in col_idx:
55 by_col_name[key] = list(col(col_idx[key]))
56 by_col_idx[col_idx[key]] = by_col_name[key]
57
58 return storage(name=fname, columns=col_idx,
59 by_name=by_col_name, by_idx=by_col_idx)
diff --git a/schedcat/util/iter.py b/schedcat/util/iter.py
new file mode 100644
index 0000000..7ee20f6
--- /dev/null
+++ b/schedcat/util/iter.py
@@ -0,0 +1,44 @@
1# assorted sequence helpers
2
3from heapq import heapify, heappop, heappush
4
5class PrioObj(object):
6 def __init__(self, val, le):
7 self.val = val
8 self.le = le
9
10 def __str__(self):
11 return str(self.val)
12
13 def __le__(self, other):
14 return self.le(self.val, other.val)
15
16
17def imerge(le, *iters):
18 nxtheap = []
19 _le = lambda a, b: le(a[0], b[0])
20 for i in iters:
21 try:
22 it = iter(i)
23 nxtheap.append(PrioObj((it.next(), it), _le))
24 except StopIteration:
25 pass
26 heapify(nxtheap)
27 while nxtheap:
28 wrapper = heappop(nxtheap)
29 x, it = wrapper.val
30 yield x
31 try:
32 wrapper.val = (it.next(), it)
33 heappush(nxtheap, wrapper)
34 except StopIteration:
35 pass
36
37def uniq(seq):
38 it = iter(seq)
39 last = it.next()
40 yield last
41 for x in it:
42 if x != last:
43 last = x
44 yield x
diff --git a/schedcat/util/math.py b/schedcat/util/math.py
new file mode 100644
index 0000000..ad94df4
--- /dev/null
+++ b/schedcat/util/math.py
@@ -0,0 +1,143 @@
1from __future__ import division
2
3from bisect import bisect_left as find_index
4
5def is_integral(x):
6 return type(x) == int or type(x) == long
7
8def gcd(a,b):
9 if a == 0:
10 return b
11 return abs(gcd(b % a, a))
12
13def lcm(*args):
14 if not args:
15 return 0
16 a = args[0]
17 for b in args[1:]:
18 if not is_integral(a) or not is_integral(b):
19 # only well-defined for integers
20 raise Exception, \
21 "LCM is only well-defined for integers (got: %s, %s)" \
22 % (type(a), type(b))
23 a = (a // gcd(a,b)) * b
24 return a
25
26def topsum(lst, fun, n):
27 """return the sum of the top n items of map(fun, lst)"""
28 x = map(fun, lst)
29 x.sort(reverse=True)
30 return sum(x[0:n])
31
32class LinearEqu(object):
33 def __init__(self, a, b):
34 self.a = a
35 self.b = b
36
37 def __str__(self):
38 slope = round(self.b, 3)
39 if abs(slope) >= 0.001:
40 return '%.3f%+.3f n' % (self.a, slope)
41 else:
42 return '%.3f' % self.a
43
44 def __call__(self, x):
45 return self.a + (self.b * x if self.b else 0)
46
47 def __add__(self, other):
48 return LinearEqu(self.a + other.a, self.b + other.b)
49
50 def __mul__(self, scalar):
51 return LinearEqu(self.a * scalar, self.b * scalar)
52
53 def __rmul__(self, scalar):
54 return self * scalar
55
56 def is_constant(self):
57 return self.b == 0
58
59class PieceWiseLinearEqu(object):
60 def __init__(self, points):
61 # points = [(x1, y1), (x2, y2), ...]
62 assert len(points) >= 2
63
64 def slope(i):
65 dy = points[i+1][1] - points[i][1]
66 dx = points[i+1][0] - points[i][0]
67 if dx != 0:
68 return dy / dx
69 else:
70 # De-generate case; the function is not continuous
71 # This slope is used in a dummy segment and hence not
72 # important.
73 return 0.0
74
75 def yintercept(i):
76 x, y = points[i]
77 dy = slope(i) * x
78 return y - dy
79
80 self.segments = [LinearEqu(yintercept(i), slope(i))
81 for i in xrange(len(points) - 1)]
82 self.lookup = [points[i+1][0] for i in xrange(len(points) - 1)]
83 self.hi = len(self.lookup) - 1
84
85 def __call__(self, x):
86 # find appropriate linear segments
87 i = find_index(self.lookup, x, hi=self.hi)
88 f = self.segments[i]
89 # approximate linearly from support point
90 # negative overheads make no sense, so avoid them
91 return max(0, f(x))
92
93 def is_constant(self):
94 return all([seg[1].is_constant() for seg in self.segments])
95
96def const(x):
97 return LinearEqu(x, 0)
98
99def lin(a, b):
100 return LinearEqu(a, b)
101
102def scale(alpha, fun):
103 return lambda x: fun(x) * alpha
104
105def piece_wise_linear(points):
106 return PieceWiseLinearEqu(points)
107
108def make_monotonic(points):
109 filtered = points[:1]
110 prevprev = None
111 _, prev = filtered[0]
112 for (x, y) in points[1:]:
113 # y values should not decrease
114 y = max(prev, y)
115 if not prevprev is None and prevprev == y:
116 # remove useless intermediate point
117 filtered.pop()
118 filtered.append((x,y))
119 prevprev, prev = prev, y
120
121 # also remove the last one if it is not needed (i.e., constant)
122 if len(filtered) == 2 and filtered[-1][1] == filtered[-2][1]:
123 filtered.pop()
124
125 return filtered
126
127def is_monotonic(points):
128 x1, y1 = points[0]
129 for (x2, y2) in points[1:]:
130 assert x1 < x2
131 if y1 > y2:
132 return False
133 x1, y1 = x2, y2
134 return True
135
136def monotonic_pwlin(points):
137 ascending = make_monotonic(points)
138 if len(ascending) > 1:
139 return piece_wise_linear(ascending)
140 elif ascending:
141 return const(ascending[0][1])
142 else:
143 return const(0)
diff --git a/schedcat/util/quantor.py b/schedcat/util/quantor.py
new file mode 100644
index 0000000..8bb985b
--- /dev/null
+++ b/schedcat/util/quantor.py
@@ -0,0 +1,16 @@
1
2def forall(lst):
3 def predicate(p):
4 for x in lst:
5 if not p(x):
6 return False
7 return True
8 return predicate
9
10def exists(lst):
11 def predicate(p):
12 for x in lst:
13 if p(x):
14 return True
15 return False
16 return predicate
diff --git a/schedcat/util/storage.py b/schedcat/util/storage.py
new file mode 100644
index 0000000..f2cc007
--- /dev/null
+++ b/schedcat/util/storage.py
@@ -0,0 +1,41 @@
1
2# from web.py (public domain code)
3class Storage(dict):
4 """
5 A Storage object is like a dictionary except `obj.foo` can be used
6 in addition to `obj['foo']`.
7
8 >>> o = storage(a=1)
9 >>> o.a
10 1
11 >>> o['a']
12 1
13 >>> o.a = 2
14 >>> o['a']
15 2
16 >>> del o.a
17 >>> o.a
18 Traceback (most recent call last):
19 ...
20 AttributeError: 'a'
21
22 """
23 def __getattr__(self, key):
24 try:
25 return self[key]
26 except KeyError, k:
27 raise AttributeError, k
28
29 def __setattr__(self, key, value):
30 self[key] = value
31
32 def __delattr__(self, key):
33 try:
34 del self[key]
35 except KeyError, k:
36 raise AttributeError, k
37
38 def __repr__(self):
39 return '<Storage ' + dict.__repr__(self) + '>'
40
41storage = Storage
diff --git a/schedcat/util/time.py b/schedcat/util/time.py
new file mode 100644
index 0000000..4125280
--- /dev/null
+++ b/schedcat/util/time.py
@@ -0,0 +1,23 @@
1from __future__ import division
2from __future__ import absolute_import
3
4from math import ceil, floor
5
6# various time-related helpers
7
8def us2ms(us):
9 return us / 1000
10
11def ms2us(ms):
12 return ms * 1000
13
14def sec2us(sec):
15 return sec * 1000000
16
17def ms2us_ru(ms):
18 "Convert and round up."
19 return int(ceil(ms * 1000))
20
21def ms2us_rd(ms):
22 "Convert and round down."
23 return int(floor(ms * 1000))
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/__init__.py
diff --git a/tests/__main__.py b/tests/__main__.py
new file mode 100644
index 0000000..1ecfcdc
--- /dev/null
+++ b/tests/__main__.py
@@ -0,0 +1,36 @@
1#!/usr/bin/env python
2
3import unittest
4
5import tests.model
6import tests.util
7import tests.generator
8import tests.quanta
9import tests.pfair
10import tests.edf
11import tests.fp
12import tests.binpack
13import tests.locking
14import tests.sim
15import tests.overheads
16
17suite = unittest.TestSuite(
18 [unittest.defaultTestLoader.loadTestsFromModule(x) for x in
19 [tests.model,
20 tests.util,
21 tests.generator,
22 tests.quanta,
23 tests.pfair,
24 tests.edf,
25 tests.fp,
26 tests.binpack,
27 tests.locking,
28 tests.sim,
29 tests.overheads]
30 ])
31
32def run_all_tests():
33 unittest.TextTestRunner(verbosity=2).run(suite)
34
35if __name__ == '__main__':
36 run_all_tests()
diff --git a/tests/binpack.py b/tests/binpack.py
new file mode 100644
index 0000000..43736a7
--- /dev/null
+++ b/tests/binpack.py
@@ -0,0 +1,368 @@
1#!/usr/bin/env python
2
3from __future__ import division
4
5import unittest
6
7import schedcat.mapping.binpack as bp
8import schedcat.mapping.rollback as rb
9
10class TooLarge(unittest.TestCase):
11 def setUp(self):
12 self.cap = 10
13 self.items = range(100, 1000)
14 self.bins = 9
15 self.empty = [[]] * self.bins
16
17 def test_next_fit(self):
18 sets = bp.next_fit(self.items, self.bins, self.cap)
19 self.assertEqual(sets, self.empty)
20
21 def test_first_fit(self):
22 sets = bp.first_fit(self.items, self.bins, self.cap)
23 self.assertEqual(sets, self.empty)
24
25 def test_worst_fit(self):
26 sets = bp.worst_fit(self.items, self.bins, self.cap)
27 self.assertEqual(sets, self.empty)
28
29 def test_best_fit(self):
30 sets = bp.best_fit(self.items, self.bins, self.cap)
31 self.assertEqual(sets, self.empty)
32
33 def test_next_fit_decreasing(self):
34 sets = bp.next_fit_decreasing(self.items, self.bins, self.cap)
35 self.assertEqual(sets, self.empty)
36
37 def test_first_fit_decreasing(self):
38 sets = bp.first_fit_decreasing(self.items, self.bins, self.cap)
39 self.assertEqual(sets, self.empty)
40
41 def test_worst_fit_decreasing(self):
42 sets = bp.worst_fit_decreasing(self.items, self.bins, self.cap)
43 self.assertEqual(sets, self.empty)
44
45 def test_best_fit_decreasing(self):
46 sets = bp.best_fit_decreasing(self.items, self.bins, self.cap)
47 self.assertEqual(sets, self.empty)
48
49class NotLossy(unittest.TestCase):
50 def setUp(self):
51 self.items = range(100, 1000)
52 self.bins = 1
53 self.cap = sum(self.items)
54 self.expected = [self.items]
55
56 def test_next_fit(self):
57 sets = bp.next_fit(self.items, self.bins, self.cap)
58 sets[0].sort()
59 self.assertEqual(sets, self.expected)
60
61 def test_first_fit(self):
62 sets = bp.first_fit(self.items, self.bins, self.cap)
63 sets[0].sort()
64 self.assertEqual(sets, self.expected)
65
66 def test_worst_fit(self):
67 sets = bp.worst_fit(self.items, self.bins, self.cap)
68 sets[0].sort()
69 self.assertEqual(sets, self.expected)
70
71 def test_best_fit(self):
72 sets = bp.best_fit(self.items, self.bins, self.cap)
73 sets[0].sort()
74 self.assertEqual(sets, self.expected)
75
76 def test_next_fit_decreasing(self):
77 sets = bp.next_fit_decreasing(self.items, self.bins, self.cap)
78 sets[0].sort()
79 self.assertEqual(sets, self.expected)
80
81 def test_first_fit_decreasing(self):
82 sets = bp.first_fit_decreasing(self.items, self.bins, self.cap)
83 sets[0].sort()
84 self.assertEqual(sets, self.expected)
85
86 def test_worst_fit_decreasing(self):
87 sets = bp.worst_fit_decreasing(self.items, self.bins, self.cap)
88 sets[0].sort()
89 self.assertEqual(sets, self.expected)
90
91 def test_best_fit_decreasing(self):
92 sets = bp.best_fit_decreasing(self.items, self.bins, self.cap)
93 sets[0].sort()
94 self.assertEqual(sets, self.expected)
95
96
97class KnownExample(unittest.TestCase):
98 def setUp(self):
99 self.items = [8, 5, 7, 6, 2, 4, 1]
100 self.bins = 5
101 self.cap = 10
102
103 def test_next_fit(self):
104 sets = bp.next_fit(self.items, self.bins, self.cap)
105 self.expected = [[8], [5], [7], [6, 2], [4, 1]]
106 self.assertEqual(sets, self.expected)
107
108 def test_first_fit(self):
109 sets = bp.first_fit(self.items, self.bins, self.cap)
110 self.expected = [[8, 2], [5, 4, 1], [7], [6], []]
111 self.assertEqual(sets, self.expected)
112
113 def test_worst_fit(self):
114 self.bins = 4
115 sets = bp.worst_fit(self.items, self.bins, self.cap)
116 self.expected = [[8], [5, 2, 1], [7], [6, 4]]
117 self.assertEqual(sets, self.expected)
118
119 def test_best_fit(self):
120 sets = bp.best_fit(self.items, self.bins, self.cap)
121 self.expected = [[8, 2], [5], [7, 1], [6, 4], []]
122 self.assertEqual(sets, self.expected)
123
124
125class RollbackBins(unittest.TestCase):
126 def setUp(self):
127 self.bin = rb.Bin([0.2])
128 self.bin2 = rb.Bin([2], size=lambda x: x + 0.1, capacity=10)
129
130 def test_try_to_add_float(self):
131 self.assertTrue(self.bin.try_to_add(0.3))
132 self.assertFalse(self.bin.try_to_add(0.6))
133 self.assertTrue(self.bin.try_to_add(0.5))
134 self.assertFalse(self.bin.try_to_add(0.0000001))
135 self.assertEqual([0.2,0.3,0.5], self.bin.items)
136
137 def test_try_to_add_int(self):
138 self.assertTrue(self.bin2.try_to_add(3))
139 self.assertFalse(self.bin2.try_to_add(6))
140 self.assertFalse(self.bin2.try_to_add(5))
141 self.assertTrue(self.bin2.try_to_add(1))
142 self.assertEqual([2,3,1], self.bin2.items)
143
144
145class Heuristics(unittest.TestCase):
146 def setUp(self):
147 self.items = [8, 5, 7, 6, 2, 4, 1]
148 self.bins = [rb.Bin(capacity = 10) for _ in range(5)]
149 self.bins = [rb.CheckedBin(b) for b in self.bins]
150 self.make_bin = lambda x: rb.CheckedBin(rb.Bin(capacity = x))
151
152 def test_base(self):
153 h = rb.Heuristic(self.bins)
154 self.assertEqual(0, h.binpack(self.items))
155 self.assertEqual(h.misfits, self.items)
156 h.misfits = []
157 self.assertRaises(bp.DidNotFit, h.binpack, self.items,
158 report_misfit=bp.report_failure)
159 self.assertEqual([8], h.misfits)
160
161 def test_next_fit_fixed(self):
162 h = rb.NextFit(self.bins)
163 self.assertEqual(len(self.items), h.binpack(self.items))
164 expected = [[8], [5], [7], [6, 2], [4, 1]]
165 did_part = [bin.items for bin in h.bins]
166 self.assertEqual(expected, did_part)
167
168 def test_next_fit_make(self):
169 h = rb.NextFit(make_bin=lambda: self.make_bin(10))
170 self.assertEqual(len(self.items), h.binpack(self.items))
171 expected = [[8], [5], [7], [6, 2], [4, 1]]
172 did_part = [bin.items for bin in h.bins]
173 self.assertEqual(expected, did_part)
174
175 def test_next_fit_small(self):
176 h = rb.NextFit(make_bin=lambda: self.make_bin(7))
177 expected = [[5], [7], [6], [2, 4, 1]]
178 misfits = [8]
179 self.assertEqual(len(self.items) - len(misfits),
180 h.binpack(self.items))
181 did_part = [bin.items for bin in h.bins]
182 self.assertEqual(expected, did_part)
183 self.assertEqual(misfits, h.misfits)
184
185 def test_next_fit_few(self):
186 h = rb.NextFit(self.bins[:4])
187 expected = [[8], [5], [7], [6, 2]]
188 misfits = [4, 1]
189 self.assertEqual(len(self.items) - len(misfits),
190 h.binpack(self.items))
191 did_part = [bin.items for bin in h.bins]
192 self.assertEqual(expected, did_part)
193 self.assertEqual(misfits, h.misfits)
194
195
196 def test_first_fit_fixed(self):
197 h = rb.FirstFit(self.bins)
198 self.assertEqual(len(self.items), h.binpack(self.items))
199 expected = [[8, 2], [5, 4, 1], [7], [6], []]
200 did_part = [bin.items for bin in h.bins]
201 self.assertEqual(expected, did_part)
202
203 def test_first_fit_make(self):
204 h = rb.FirstFit(make_bin=lambda: self.make_bin(10))
205 self.assertEqual(len(self.items), h.binpack(self.items))
206 expected = [[8, 2], [5, 4, 1], [7], [6]]
207 did_part = [bin.items for bin in h.bins]
208 self.assertEqual(expected, did_part)
209
210 def test_first_fit_small(self):
211 h = rb.FirstFit(make_bin=lambda: self.make_bin(7))
212 expected = [[5,2], [7], [6, 1], [4]]
213 misfits = [8]
214 self.assertEqual(len(self.items) - len(misfits),
215 h.binpack(self.items))
216 did_part = [bin.items for bin in h.bins]
217 self.assertEqual(expected, did_part)
218 self.assertEqual(misfits, h.misfits)
219
220 def test_first_fit_few(self):
221 h = rb.FirstFit(self.bins[:3])
222 expected = [[8, 2], [5, 4, 1], [7]]
223 misfits = [6]
224 self.assertEqual(len(self.items) - len(misfits),
225 h.binpack(self.items))
226 did_part = [bin.items for bin in h.bins]
227 self.assertEqual(expected, did_part)
228 self.assertEqual(misfits, h.misfits)
229
230
231 def test_worst_fit_fixed(self):
232 h = rb.WorstFit(self.bins)
233 self.assertEqual(len(self.items), h.binpack(self.items))
234 expected = [[8], [5, 1], [7], [6], [2, 4]]
235 did_part = [bin.items for bin in h.bins]
236 self.assertEqual(expected, did_part)
237
238 def test_worst_fit_make(self):
239 h = rb.WorstFit(make_bin=lambda: self.make_bin(10))
240 self.assertEqual(len(self.items), h.binpack(self.items))
241 expected = [[8], [5, 2, 1], [7], [6, 4]]
242 did_part = [bin.items for bin in h.bins]
243 self.assertEqual(expected, did_part)
244
245 def test_worst_fit_small(self):
246 h = rb.WorstFit(make_bin=lambda: self.make_bin(7))
247 expected = [[5, 2], [7], [6], [4, 1]]
248 misfits = [8]
249 self.assertEqual(len(self.items) - len(misfits),
250 h.binpack(self.items))
251 did_part = [bin.items for bin in h.bins]
252 self.assertEqual(expected, did_part)
253 self.assertEqual(misfits, h.misfits)
254
255 def test_worst_fit_few(self):
256 h = rb.WorstFit(self.bins[:4])
257 expected = [[8], [5, 2, 1], [7], [6, 4]]
258 misfits = []
259 self.assertEqual(len(self.items) - len(misfits),
260 h.binpack(self.items))
261 did_part = [bin.items for bin in h.bins]
262 self.assertEqual(expected, did_part)
263 self.assertEqual(misfits, h.misfits)
264
265
266 def test_best_fit_fixed(self):
267 h = rb.BestFit(self.bins)
268 self.assertEqual(len(self.items), h.binpack(self.items))
269 expected = [[8, 2], [5], [7, 1], [6, 4], []]
270 did_part = [bin.items for bin in h.bins]
271 self.assertEqual(expected, did_part)
272
273 def test_best_fit_make(self):
274 h = rb.BestFit(make_bin=lambda: self.make_bin(10))
275 self.assertEqual(len(self.items), h.binpack(self.items))
276 expected = [[8, 2], [5], [7, 1], [6, 4]]
277 did_part = [bin.items for bin in h.bins]
278 self.assertEqual(expected, did_part)
279
280 def test_best_fit_small(self):
281 h = rb.BestFit(make_bin=lambda: self.make_bin(7))
282 expected = [[5, 2], [7], [6, 1], [4]]
283 misfits = [8]
284 self.assertEqual(len(self.items) - len(misfits),
285 h.binpack(self.items))
286 did_part = [bin.items for bin in h.bins]
287 self.assertEqual(expected, did_part)
288 self.assertEqual(misfits, h.misfits)
289
290 def test_best_fit_few(self):
291 h = rb.BestFit(self.bins[:3])
292 expected = [[8, 2], [5, 4, 1], [7]]
293 misfits = [6]
294 self.assertEqual(len(self.items) - len(misfits),
295 h.binpack(self.items))
296 did_part = [bin.items for bin in h.bins]
297 self.assertEqual(expected, did_part)
298 self.assertEqual(misfits, h.misfits)
299
300
301 def test_max_spare_cap_fixed(self):
302 h = rb.MaxSpareCapacity(self.bins)
303 self.assertEqual(len(self.items), h.binpack(self.items))
304 expected = [[8], [5, 1], [7], [6], [2, 4]]
305 did_part = [bin.items for bin in h.bins]
306 self.assertEqual(expected, did_part)
307
308 def test_max_spare_cap_make(self):
309 h = rb.MaxSpareCapacity(make_bin=lambda: self.make_bin(10))
310 self.assertEqual(len(self.items), h.binpack(self.items))
311 expected = [[8], [5, 2, 1], [7], [6, 4]]
312 did_part = [bin.items for bin in h.bins]
313 self.assertEqual(expected, did_part)
314
315 def test_max_spare_cap_small(self):
316 h = rb.MaxSpareCapacity(make_bin=lambda: self.make_bin(7))
317 expected = [[5, 2], [7], [6], [4, 1]]
318 misfits = [8]
319 self.assertEqual(len(self.items) - len(misfits),
320 h.binpack(self.items))
321 did_part = [bin.items for bin in h.bins]
322 self.assertEqual(expected, did_part)
323 self.assertEqual(misfits, h.misfits)
324
325 def test_max_spare_cap_few(self):
326 h = rb.MaxSpareCapacity(self.bins[:4])
327 expected = [[8], [5, 2, 1], [7], [6, 4]]
328 misfits = []
329 self.assertEqual(len(self.items) - len(misfits),
330 h.binpack(self.items))
331 did_part = [bin.items for bin in h.bins]
332 self.assertEqual(expected, did_part)
333 self.assertEqual(misfits, h.misfits)
334
335
336 def test_min_spare_cap_fixed(self):
337 h = rb.MinSpareCapacity(self.bins)
338 self.assertEqual(len(self.items), h.binpack(self.items))
339 expected = [[8, 2], [5], [7, 1], [6, 4], []]
340 did_part = [bin.items for bin in h.bins]
341 self.assertEqual(expected, did_part)
342
343 def test_min_spare_cap_make(self):
344 h = rb.MinSpareCapacity(make_bin=lambda: self.make_bin(10))
345 self.assertEqual(len(self.items), h.binpack(self.items))
346 expected = [[8, 2], [5], [7, 1], [6, 4]]
347 did_part = [bin.items for bin in h.bins]
348 self.assertEqual(expected, did_part)
349
350 def test_min_spare_cap_small(self):
351 h = rb.MinSpareCapacity(make_bin=lambda: self.make_bin(7))
352 expected = [[5, 2], [7], [6, 1], [4]]
353 misfits = [8]
354 self.assertEqual(len(self.items) - len(misfits),
355 h.binpack(self.items))
356 did_part = [bin.items for bin in h.bins]
357 self.assertEqual(expected, did_part)
358 self.assertEqual(misfits, h.misfits)
359
360 def test_min_spare_cap_few(self):
361 h = rb.MinSpareCapacity(self.bins[:3])
362 expected = [[8, 2], [5, 4, 1], [7]]
363 misfits = [6]
364 self.assertEqual(len(self.items) - len(misfits),
365 h.binpack(self.items))
366 did_part = [bin.items for bin in h.bins]
367 self.assertEqual(expected, did_part)
368 self.assertEqual(misfits, h.misfits)
diff --git a/tests/edf.py b/tests/edf.py
new file mode 100644
index 0000000..2895f79
--- /dev/null
+++ b/tests/edf.py
@@ -0,0 +1,140 @@
1from __future__ import division
2
3import unittest
4
5from fractions import Fraction
6
7import schedcat.sched.edf.bak as bak
8import schedcat.sched.edf.bar as bar
9import schedcat.sched.edf.bcl_iterative as bcli
10import schedcat.sched.edf.bcl as bcl
11import schedcat.sched.edf.da as da
12import schedcat.sched.edf.ffdbf as ffdbf
13import schedcat.sched.edf.gfb as gfb
14import schedcat.sched.edf.rta as rta
15import schedcat.sched.edf as edf
16
17import schedcat.model.tasks as tasks
18
19from schedcat.util.math import is_integral
20
21# TODO: add unit tests for EDF schedulability tests
22
23class DA(unittest.TestCase):
24 def setUp(self):
25 self.ts = tasks.TaskSystem([
26 tasks.SporadicTask(80, 100),
27 tasks.SporadicTask(33, 66),
28 tasks.SporadicTask(7, 10),
29 ])
30
31 def test_util_bound(self):
32 self.assertTrue(da.has_bounded_tardiness(2, self.ts))
33 self.assertFalse(da.has_bounded_tardiness(1, self.ts))
34
35 def test_bound_is_integral(self):
36 self.assertTrue(da.bound_response_times(2, self.ts))
37 self.assertTrue(is_integral(self.ts[0].response_time))
38 self.assertTrue(is_integral(self.ts[1].response_time))
39 self.assertTrue(is_integral(self.ts[2].response_time))
40
41 self.assertFalse(da.bound_response_times(1, self.ts))
42
43class Test_ffdbf(unittest.TestCase):
44
45 def setUp(self):
46 self.t1 = tasks.SporadicTask(5000, 10000)
47 self.t2 = tasks.SporadicTask(5000, 10000, deadline = 7000)
48
49 def test_ffdbf1(self):
50 one = Fraction(1)
51 self.assertEqual(
52 ffdbf.ffdbf(self.t1, 0, one),
53 0)
54 self.assertEqual(
55 ffdbf.ffdbf(self.t1, 5000, one),
56 0)
57 self.assertEqual(
58 ffdbf.ffdbf(self.t1, 5001, one),
59 1)
60 self.assertEqual(
61 ffdbf.ffdbf(self.t1, 7000, one),
62 2000)
63 self.assertEqual(
64 ffdbf.ffdbf(self.t1, 9999, one),
65 4999)
66 self.assertEqual(
67 ffdbf.ffdbf(self.t1, 10001, one),
68 5000)
69 self.assertEqual(
70 ffdbf.ffdbf(self.t1, 14001, one),
71 5000)
72
73 def test_ffdbf_constrained(self):
74 one = Fraction(1)
75 self.assertEqual(
76 ffdbf.ffdbf(self.t2, 0, one),
77 0)
78 self.assertEqual(
79 ffdbf.ffdbf(self.t2, 1000, one),
80 0)
81 self.assertEqual(
82 ffdbf.ffdbf(self.t2, 2001, one),
83 1)
84 self.assertEqual(
85 ffdbf.ffdbf(self.t2, 4000, one),
86 2000)
87 self.assertEqual(
88 ffdbf.ffdbf(self.t2, 6999, one),
89 4999)
90 self.assertEqual(
91 ffdbf.ffdbf(self.t2, 10001, one),
92 5000)
93 self.assertEqual(
94 ffdbf.ffdbf(self.t2, 12001, one),
95 5001)
96
97 def test_test_points(self):
98 one = Fraction(1)
99 pts = ffdbf.test_points(self.t1, one, 0)
100 pts = iter(pts)
101 self.assertEqual(pts.next(), 5000)
102 self.assertEqual(pts.next(), 10000)
103 self.assertEqual(pts.next(), 15000)
104 self.assertEqual(pts.next(), 20000)
105
106
107 pts = ffdbf.test_points(self.t2, one, 0)
108 pts = iter(pts)
109 self.assertEqual(pts.next(), 2000)
110 self.assertEqual(pts.next(), 7000)
111 self.assertEqual(pts.next(), 12000)
112 self.assertEqual(pts.next(), 17000)
113
114
115 pts = ffdbf.test_points(self.t1, Fraction(1, 2), 0)
116 pts = iter(pts)
117 self.assertEqual(pts.next(), 10000)
118 self.assertEqual(pts.next(), 10000)
119 self.assertEqual(pts.next(), 20000)
120
121 pts = ffdbf.test_points(self.t2, Fraction(8, 10), 0)
122 pts = iter(pts)
123 self.assertEqual(pts.next(), 750)
124 self.assertEqual(pts.next(), 7000)
125 self.assertEqual(pts.next(), 10750)
126 self.assertEqual(pts.next(), 17000)
127
128 def test_testing_set(self):
129 one = Fraction(1)
130 ts = tasks.TaskSystem([self.t1, self.t2])
131 pts = ffdbf.testing_set(ts, one, 0)
132 ts = iter(pts)
133 self.assertEqual(pts.next(), 2000)
134 self.assertEqual(pts.next(), 5000)
135 self.assertEqual(pts.next(), 7000)
136 self.assertEqual(pts.next(), 10000)
137 self.assertEqual(pts.next(), 12000)
138 self.assertEqual(pts.next(), 15000)
139 self.assertEqual(pts.next(), 17000)
140 self.assertEqual(pts.next(), 20000)
diff --git a/tests/fp.py b/tests/fp.py
new file mode 100644
index 0000000..eb86546
--- /dev/null
+++ b/tests/fp.py
@@ -0,0 +1,40 @@
1from __future__ import division
2
3import unittest
4
5import schedcat.sched.fp.rta as rta
6import schedcat.sched.fp as fp
7
8import schedcat.model.tasks as tasks
9
10from schedcat.util.math import is_integral
11
12class UniprocessorRTA(unittest.TestCase):
13 def setUp(self):
14 self.ts = tasks.TaskSystem([
15 tasks.SporadicTask(1, 4),
16 tasks.SporadicTask(1, 5),
17 tasks.SporadicTask(3, 9),
18 tasks.SporadicTask(3, 18),
19 ])
20
21 def test_procs(self):
22 self.assertTrue(rta.is_schedulable(1, self.ts))
23 self.assertFalse(rta.is_schedulable(2, self.ts))
24
25 def test_bound_is_integral(self):
26 self.assertTrue(rta.is_schedulable(1, self.ts))
27 self.assertTrue(is_integral(self.ts[0].response_time))
28 self.assertTrue(is_integral(self.ts[1].response_time))
29 self.assertTrue(is_integral(self.ts[2].response_time))
30
31 def test_times(self):
32 self.assertTrue(rta.is_schedulable(1, self.ts))
33
34 self.assertEqual(self.ts[0].response_time, 1)
35 self.assertEqual(self.ts[1].response_time, 2)
36 self.assertEqual(self.ts[2].response_time, 7)
37 self.assertEqual(self.ts[3].response_time, 18)
38
39
40# TODO: add tests with blocking and self-suspensions
diff --git a/tests/generator.py b/tests/generator.py
new file mode 100644
index 0000000..c43a3ac
--- /dev/null
+++ b/tests/generator.py
@@ -0,0 +1,81 @@
1import unittest
2
3from schedcat.util.time import ms2us
4
5import schedcat.generator.tasks as tg
6import schedcat.generator.tasksets as tsgen
7
8class TaskGen(unittest.TestCase):
9
10 def test_drawing_functions(self):
11 f = tg.uniform_int(10, 100)
12 self.assertTrue(type(f()) == int)
13 self.assertTrue(10 <= f() <= 100)
14
15 f = tg.uniform(10, 100)
16 self.assertTrue(type(f()) == float)
17 self.assertTrue(10 <= f() <= 100)
18
19 f = tg.uniform_choice("abcdefg")
20 self.assertTrue(type(f()) == str)
21 self.assertTrue('a' <= f() <= 'g')
22
23 f = tg.exponential(0.1, 0.7, 0.4)
24 self.assertTrue(type(f()) == float)
25 self.assertTrue(0.1 <= f() <= 0.7)
26
27 def test_limiters(self):
28 global counter
29 counter = 0
30 def inc():
31 global counter
32 counter += 10
33 return counter
34
35 trun = tg.truncate(15, 35)(inc)
36
37 self.assertEqual(trun(), 15)
38 self.assertEqual(counter, 10)
39
40 counter = 0
41 lim = tg.redraw(15, 35)(inc)
42 self.assertEqual(lim(), 20)
43 self.assertEqual(counter, 20)
44
45
46 def test_generator(self):
47 periods = tg.uniform_int(10, 100)
48 utils = tg.exponential(0.1, 0.9, 0.3)
49 g = tg.TaskGenerator(periods, utils)
50
51 self.assertEqual(len(list(g.tasks(max_tasks = 10))), 10)
52 self.assertLessEqual(len(list(g.tasks(max_util = 10))), 100)
53
54 ts1 = g.tasks(max_util = 10, squeeze = True, time_conversion=ms2us)
55 ts2 = g.tasks(max_util = 10, squeeze = False, time_conversion=ms2us)
56
57 self.assertAlmostEqual(sum([t.utilization() for t in ts1]), 10, places=2)
58 self.assertNotEqual(sum([t.utilization() for t in ts2]), 10)
59
60 def test_task_system_creation(self):
61 periods = tg.uniform_int(10, 100)
62 utils = tg.exponential(0.1, 0.9, 0.3)
63 g = tg.TaskGenerator(periods, utils)
64
65 self.assertEqual(len(g.make_task_set(max_tasks = 10)), 10)
66 self.assertLessEqual(len((g.make_task_set(max_util = 10))), 100)
67
68 ts1 = g.make_task_set(max_util = 10, squeeze = True, time_conversion=ms2us)
69 ts2 = g.make_task_set(max_util = 10, squeeze = False, time_conversion=ms2us)
70
71 self.assertAlmostEqual(ts1.utilization(), 10, places=2)
72 # Not strictly impossible, but very unlikely
73 self.assertNotEqual(ts2.utilization(), 10)
74
75class TaskSetGen(unittest.TestCase):
76
77 def test_feasible_tasks(self):
78 for name in tsgen.ALL_DISTS:
79 g = tsgen.ALL_DISTS[name]
80 ts = g(time_conversion=ms2us, max_tasks=4)
81 self.assertLessEqual(ts.utilization(), 4)
diff --git a/tests/locking.py b/tests/locking.py
new file mode 100644
index 0000000..d18f9ae
--- /dev/null
+++ b/tests/locking.py
@@ -0,0 +1,487 @@
1from __future__ import division
2
3import unittest
4import random
5
6import schedcat.locking.bounds as lb
7import schedcat.locking.native as cpp
8import schedcat.model.tasks as tasks
9import schedcat.model.resources as r
10
11class Locking(unittest.TestCase):
12 def setUp(self):
13 self.ts = tasks.TaskSystem([
14 tasks.SporadicTask(1, 4),
15 tasks.SporadicTask(1, 5),
16 tasks.SporadicTask(3, 9),
17 tasks.SporadicTask(3, 18),
18 ])
19 r.initialize_resource_model(self.ts)
20 for i, t in enumerate(self.ts):
21 t.partition = 0
22 t.response_time = t.period
23 t.resmodel[1].add_request(1)
24
25 def test_fp_locking_prios(self):
26 self.ts.sort_by_period()
27 lb.assign_fp_locking_prios(self.ts)
28 self.assertEqual(self.ts[0].locking_prio, 0)
29 self.assertEqual(self.ts[1].locking_prio, 1)
30 self.assertEqual(self.ts[2].locking_prio, 2)
31 self.assertEqual(self.ts[3].locking_prio, 3)
32
33 def test_edf_locking_prios(self):
34 self.ts[0].deadline = 5
35 self.ts[3].deadline = 9
36 ts = list(self.ts)
37 random.shuffle(ts)
38 lb.assign_edf_locking_prios(self.ts)
39 self.assertEqual(self.ts[0].locking_prio, 0)
40 self.assertEqual(self.ts[1].locking_prio, 0)
41 self.assertEqual(self.ts[2].locking_prio, 1)
42 self.assertEqual(self.ts[3].locking_prio, 1)
43
44
45 def test_cpp_bridge(self):
46 lb.assign_fp_locking_prios(self.ts)
47 self.assertIsNotNone(lb.get_cpp_model(self.ts))
48 self.assertIsNotNone(lb.get_cpp_model_rw(self.ts))
49
50
51class ApplyBounds(unittest.TestCase):
52# This primarily checks that the tests don't crash.
53# TODO: add actual tests of the computed bounds.
54 def setUp(self):
55 self.ts = tasks.TaskSystem([
56 tasks.SporadicTask(1, 4),
57 tasks.SporadicTask(1, 5),
58 tasks.SporadicTask(3, 9),
59 tasks.SporadicTask(3, 18),
60 ])
61 self.ts_ = self.ts.copy()
62 r.initialize_resource_model(self.ts)
63 for i, t in enumerate(self.ts):
64 t.partition = i % 2
65 t.response_time = t.period
66 t.resmodel[0].add_request(1)
67 t.resmodel[1].add_request(1)
68 lb.assign_fp_locking_prios(self.ts)
69
70 def saw_non_zero_blocking(self):
71 for t, t_ in zip(self.ts, self.ts_):
72 self.assertGreater(t.suspended, 0)
73 self.assertGreater(t.blocked, 0)
74 self.assertEqual(t.cost, t_.cost)
75 self.assertEqual(t.period, t_.period)
76
77 def sob_non_zero_blocking(self):
78 for t, t_ in zip(self.ts, self.ts_):
79 self.assertGreater(t.blocked, 0)
80 self.assertEqual(t.suspended, 0)
81 self.assertGreater(t.cost, t_.cost)
82 self.assertEqual(t.period, t_.period)
83
84 def test_mpcp(self):
85 lb.apply_mpcp_bounds(self.ts, use_virtual_spin=False)
86 self.saw_non_zero_blocking()
87
88 def test_mpcpvs(self):
89 lb.apply_mpcp_bounds(self.ts, use_virtual_spin=True)
90 self.sob_non_zero_blocking()
91
92 def test_dpcp(self):
93 rmap = lb.get_round_robin_resource_mapping(2, 2)
94 lb.apply_dpcp_bounds(self.ts, rmap)
95 self.saw_non_zero_blocking()
96
97 def test_part_fmlp(self):
98 lb.apply_part_fmlp_bounds(self.ts, preemptive=True)
99 self.saw_non_zero_blocking()
100
101 def test_part_fmlp_np(self):
102 lb.apply_part_fmlp_bounds(self.ts, preemptive=False)
103 self.saw_non_zero_blocking()
104
105 def test_global_fmlp(self):
106 lb.apply_global_fmlp_sob_bounds(self.ts)
107 self.sob_non_zero_blocking()
108
109 def test_global_omlp(self):
110 lb.apply_global_omlp_bounds(self.ts, 2)
111 self.sob_non_zero_blocking()
112
113 def test_clustered_omlp(self):
114 lb.apply_clustered_omlp_bounds(self.ts, 2)
115 self.sob_non_zero_blocking()
116
117 def test_clustered_rw_omlp(self):
118 lb.apply_clustered_rw_omlp_bounds(self.ts, 2)
119 self.sob_non_zero_blocking()
120
121 def test_tfmtx(self):
122 lb.apply_task_fair_mutex_bounds(self.ts, 2)
123 self.sob_non_zero_blocking()
124
125 def test_tfrw(self):
126 lb.apply_task_fair_rw_bounds(self.ts, 2)
127 self.sob_non_zero_blocking()
128
129 def test_pfrw(self):
130 lb.apply_phase_fair_rw_bounds(self.ts, 2)
131 self.sob_non_zero_blocking()
132
133# lower-level tests for C++ implementation
134
135class Test_bounds(unittest.TestCase):
136
137 def setUp(self):
138 self.rsi1 = cpp.ResourceSharingInfo(6)
139
140 self.rsi1.add_task(100, 100, 0, 0)
141 self.rsi1.add_request(0, 1, 77)
142
143 self.rsi1.add_task(50, 50, 0, 1)
144 self.rsi1.add_request(0, 1, 6)
145
146 self.rsi1.add_task(10, 10, 0, 2)
147 self.rsi1.add_request(0, 1, 5)
148
149 self.rsi1.add_task(10, 10, 1, 3)
150 self.rsi1.add_request(0, 1, 7)
151
152 self.rsi1.add_task(20, 20, 2, 4)
153 self.rsi1.add_request(0, 4, 1)
154
155 self.rsi1.add_task(30, 30, 2, 5)
156 self.rsi1.add_request(0, 1, 7)
157
158
159 def test_arrival_blocking_mtx(self):
160
161 c = 1
162
163 res = cpp.task_fair_mutex_bounds(self.rsi1, c)
164 self.assertEqual(6 + 7 + 7, res.get_arrival_blocking(0))
165 self.assertEqual(5 + 7 + 7, res.get_arrival_blocking(1))
166 self.assertEqual(0, res.get_arrival_blocking(2))
167 self.assertEqual(0, res.get_arrival_blocking(3))
168 self.assertEqual(7 + 7 + 77, res.get_arrival_blocking(4))
169 self.assertEqual(0, res.get_arrival_blocking(5))
170
171 def test_arrival_blocking_pf(self):
172 c = 1
173
174 res = cpp.phase_fair_rw_bounds(self.rsi1, c)
175 self.assertEqual(6 + 7 + 7, res.get_arrival_blocking(0))
176 self.assertEqual(5 + 7 + 7, res.get_arrival_blocking(1))
177 self.assertEqual(0, res.get_arrival_blocking(2))
178 self.assertEqual(0, res.get_arrival_blocking(3))
179 self.assertEqual(7 + 7 + 77, res.get_arrival_blocking(4))
180 self.assertEqual(0, res.get_arrival_blocking(5))
181
182 def test_arrival_blocking_tf(self):
183 c = 1
184
185 res = cpp.task_fair_rw_bounds(self.rsi1, self.rsi1, c)
186 self.assertEqual(6 + 7 + 7, res.get_arrival_blocking(0))
187 self.assertEqual(5 + 7 + 7, res.get_arrival_blocking(1))
188 self.assertEqual(0, res.get_arrival_blocking(2))
189 self.assertEqual(0, res.get_arrival_blocking(3))
190 self.assertEqual(7 + 7 + 77, res.get_arrival_blocking(4))
191 self.assertEqual(0, res.get_arrival_blocking(5))
192
193
194
195class Test_dedicated_irq(unittest.TestCase):
196
197 def setUp(self):
198 self.rsi = cpp.ResourceSharingInfo(16)
199
200 idx = 0
201 for cluster, length in zip(range(4), [1, 3, 5, 7]):
202 for _ in range(4):
203 self.rsi.add_task(100, 100, cluster, idx)
204 self.rsi.add_request(0, 1, length)
205 idx += 1
206
207 self.rsi_rw = cpp.ResourceSharingInfo(16)
208
209 idx = 0
210 for cluster, length in zip(range(4), [1, 3, 5, 7]):
211 for _ in range(4):
212 self.rsi_rw.add_task(100, 100, cluster, idx)
213 self.rsi_rw.add_request_rw(0, 1, length,
214 cpp.READ if cluster > 0 else cpp.WRITE)
215 idx += 1
216
217 def test_global_irq_blocking(self):
218 cluster_size = 2
219 dedicated_cpu = cpp.NO_CPU
220 res = cpp.task_fair_mutex_bounds(self.rsi, cluster_size, dedicated_cpu)
221
222 self.assertEqual(0, res.get_arrival_blocking(15))
223 self.assertEqual(7, res.get_blocking_count(15))
224 self.assertEqual(2 + 6 + 10 + 7, res.get_blocking_term(15))
225
226 arrival = 2 + 6 + 10 + 14
227 self.assertEqual(arrival, res.get_arrival_blocking(0))
228 self.assertEqual(15, res.get_blocking_count(0))
229 self.assertEqual(arrival + 1 + 6 + 10 + 14, res.get_blocking_term(0))
230
231 def test_dedicated_irq_blocking(self):
232 cluster_size = 2
233 dedicated_cpu = 0
234 res = cpp.task_fair_mutex_bounds(self.rsi, cluster_size, dedicated_cpu)
235
236 self.assertEqual(0, res.get_arrival_blocking(15))
237 self.assertEqual(6, res.get_blocking_count(15))
238 self.assertEqual(1 + 6 + 10 + 7, res.get_blocking_term(15))
239
240 arrival = 1 + 6 + 10 + 14
241 self.assertEqual(arrival, res.get_arrival_blocking(0))
242 self.assertEqual(7 + 6, res.get_blocking_count(0))
243 self.assertEqual(arrival + 6 + 10 + 14, res.get_blocking_term(0))
244
245
246 def test_global_irq_tfrw_blocking(self):
247 cluster_size = 2
248 dedicated_cpu = cpp.NO_CPU
249
250 res = cpp.task_fair_rw_bounds(self.rsi, self.rsi, cluster_size, dedicated_cpu)
251
252 self.assertEqual(0, res.get_arrival_blocking(15))
253 self.assertEqual(7, res.get_blocking_count(15))
254 self.assertEqual(2 + 6 + 10 + 7, res.get_blocking_term(15))
255
256 arrival = 2 + 6 + 10 + 14
257 self.assertEqual(arrival, res.get_arrival_blocking(0))
258 self.assertEqual(15, res.get_blocking_count(0))
259 self.assertEqual(arrival + 1 + 6 + 10 + 14, res.get_blocking_term(0))
260
261
262
263 res = cpp.task_fair_rw_bounds(self.rsi_rw, self.rsi, cluster_size, dedicated_cpu)
264
265 self.assertEqual(0, res.get_arrival_blocking(15))
266 self.assertEqual(4, res.get_blocking_count(15))
267 self.assertEqual(2 + 5 + 7, res.get_blocking_term(15))
268
269 arrival = 2 + 14
270 self.assertEqual(arrival, res.get_arrival_blocking(0))
271 self.assertEqual(7, res.get_blocking_count(0))
272 # pessimism
273 self.assertEqual(arrival + 1 + 14, res.get_blocking_term(0))
274
275
276
277 def test_dedicated_irq_tfrw_blocking(self):
278 cluster_size = 2
279 dedicated_cpu = 0
280 res = cpp.task_fair_rw_bounds(self.rsi, self.rsi, cluster_size, dedicated_cpu)
281
282 self.assertEqual(0, res.get_arrival_blocking(15))
283 self.assertEqual(6, res.get_blocking_count(15))
284 self.assertEqual(1 + 6 + 10 + 7, res.get_blocking_term(15))
285
286 arrival = 1 + 6 + 10 + 14
287 self.assertEqual(arrival, res.get_arrival_blocking(0))
288 self.assertEqual(7 + 6, res.get_blocking_count(0))
289 self.assertEqual(arrival + 6 + 10 + 14, res.get_blocking_term(0))
290
291
292
293 res = cpp.task_fair_rw_bounds(self.rsi_rw, self.rsi, cluster_size, dedicated_cpu)
294
295 self.assertEqual(0, res.get_arrival_blocking(15))
296 self.assertEqual(2, res.get_blocking_count(15))
297 self.assertEqual(1 + 7, res.get_blocking_term(15))
298
299 arrival = 1 + 7
300 self.assertEqual(arrival, res.get_arrival_blocking(0))
301 self.assertEqual(3, res.get_blocking_count(0))
302 # pessimism
303 self.assertEqual(arrival + 7, res.get_blocking_term(0))
304
305
306
307 def test_global_irq_pfrw_blocking(self):
308 cluster_size = 2
309 dedicated_cpu = cpp.NO_CPU
310 res = cpp.phase_fair_rw_bounds(self.rsi, cluster_size, dedicated_cpu)
311
312 self.assertEqual(0, res.get_arrival_blocking(15))
313 self.assertEqual(7, res.get_blocking_count(15))
314 self.assertEqual(2 + 6 + 10 + 7, res.get_blocking_term(15))
315
316 arrival = 2 + 6 + 10 + 14
317 self.assertEqual(arrival, res.get_arrival_blocking(0))
318 self.assertEqual(15, res.get_blocking_count(0))
319 self.assertEqual(arrival + 1 + 6 + 10 + 14, res.get_blocking_term(0))
320
321
322 res = cpp.phase_fair_rw_bounds(self.rsi_rw, cluster_size, dedicated_cpu)
323
324 self.assertEqual(0, res.get_arrival_blocking(15))
325 self.assertEqual(2, res.get_blocking_count(15))
326 self.assertEqual(1 + 7, res.get_blocking_term(15))
327
328 arrival = 2 + 14
329 self.assertEqual(arrival, res.get_arrival_blocking(0))
330 self.assertEqual(7, res.get_blocking_count(0))
331 # pessimism
332 self.assertEqual(arrival + 1 + 14, res.get_blocking_term(0))
333
334
335
336 def test_dedicated_irq_pfrw_blocking(self):
337 cluster_size = 2
338 dedicated_cpu = 0
339 res = cpp.phase_fair_rw_bounds(self.rsi, cluster_size, dedicated_cpu)
340
341 self.assertEqual(0, res.get_arrival_blocking(15))
342 self.assertEqual(6, res.get_blocking_count(15))
343 self.assertEqual(1 + 6 + 10 + 7, res.get_blocking_term(15))
344
345 arrival = 1 + 6 + 10 + 14
346 self.assertEqual(arrival, res.get_arrival_blocking(0))
347 self.assertEqual(7 + 6, res.get_blocking_count(0))
348 self.assertEqual(arrival + 6 + 10 + 14, res.get_blocking_term(0))
349
350
351 res = cpp.phase_fair_rw_bounds(self.rsi_rw, cluster_size, dedicated_cpu)
352
353 self.assertEqual(0, res.get_arrival_blocking(15))
354 self.assertEqual(2, res.get_blocking_count(15))
355 self.assertEqual(1 + 7, res.get_blocking_term(15))
356
357 arrival = 1 + 7
358 self.assertEqual(arrival, res.get_arrival_blocking(0))
359 self.assertEqual(3, res.get_blocking_count(0))
360 # pessimism
361 self.assertEqual(arrival + 7, res.get_blocking_term(0))
362
363
364class Test_dpcp_terms(unittest.TestCase):
365
366 def setUp(self):
367 self.rsi = cpp.ResourceSharingInfo(4)
368
369 self.rsi.add_task(10, 10, 2, 100)
370 self.rsi.add_request(0, 1, 3)
371
372 self.rsi.add_task(25, 25, 3, 200)
373 self.rsi.add_request(0, 1, 5)
374
375 self.rsi.add_task(50, 50, 4, 300)
376 self.rsi.add_request(0, 1, 7)
377
378 self.rsi.add_task(100, 100, 1, 400)
379
380 self.loc = cpp.ResourceLocality()
381 self.loc.assign_resource(0, 1)
382
383 def test_local_blocking(self):
384 res = cpp.dpcp_bounds(self.rsi, self.loc)
385
386 self.assertEqual(0, res.get_local_count(0))
387 self.assertEqual(0, res.get_local_blocking(0))
388
389 self.assertEqual(0, res.get_local_count(1))
390 self.assertEqual(0, res.get_local_blocking(1))
391
392 self.assertEqual(0, res.get_local_count(2))
393 self.assertEqual(0, res.get_local_blocking(2))
394
395 self.assertEqual(11 + 5 + 3, res.get_local_count(3))
396 self.assertEqual(11 * 3 + 5 * 5 + 3 * 7, res.get_local_blocking(3))
397
398 def test_remote_blocking(self):
399 res = cpp.dpcp_bounds(self.rsi, self.loc)
400
401 self.assertEqual(0, res.get_remote_count(3))
402 self.assertEqual(0, res.get_remote_blocking(3))
403
404 self.assertEqual(1, res.get_remote_count(0))
405 self.assertEqual(7, res.get_remote_blocking(0))
406
407 self.assertEqual(5, res.get_remote_count(1))
408 self.assertEqual(4 * 3 + 1 * 7, res.get_remote_blocking(1))
409
410 self.assertEqual(6 + 3, res.get_remote_count(2))
411 self.assertEqual(6 * 3 + 3 * 5, res.get_remote_blocking(2))
412
413
414class Test_mpcp_terms(unittest.TestCase):
415
416 def setUp(self):
417 self.rsi = cpp.ResourceSharingInfo(4)
418
419 self.rsi.add_task(10, 10, 2, 100)
420 self.rsi.add_request(0, 1, 3)
421
422 self.rsi.add_task(25, 25, 3, 200)
423 self.rsi.add_request(0, 1, 5)
424
425 self.rsi.add_task(50, 50, 4, 300)
426 self.rsi.add_request(0, 1, 7)
427
428 self.rsi.add_task(100, 100, 1, 400)
429
430 self.loc = cpp.ResourceLocality()
431 self.loc.assign_resource(0, 1)
432
433
434 def test_remote_blocking(self):
435 res = cpp.mpcp_bounds(self.rsi, False)
436
437 self.assertEqual(0, res.get_remote_count(3))
438 self.assertEqual(0, res.get_remote_blocking(3))
439
440 self.assertEqual(0, res.get_blocking_count(3))
441 self.assertEqual(0, res.get_blocking_term(3))
442
443 self.assertEqual(7, res.get_remote_blocking(0))
444 self.assertEqual(7, res.get_blocking_term(0))
445
446 self.assertEqual(1 * 7 + (2 + 1) * 3, res.get_remote_blocking(1))
447 self.assertEqual(1 * 7 + (2 + 1) * 3, res.get_blocking_term(1))
448
449 self.assertEqual((2 + 1) * 3 + (1 + 1) * 5, res.get_remote_blocking(2))
450 self.assertEqual((2 + 1) * 3 + (1 + 1) * 5, res.get_blocking_term(2))
451
452
453class Test_part_fmlp_terms(unittest.TestCase):
454
455 def setUp(self):
456 self.rsi = cpp.ResourceSharingInfo(4)
457
458 self.rsi.add_task(10, 10, 2, 100)
459 self.rsi.add_request(0, 1, 3)
460
461 self.rsi.add_task(25, 25, 3, 200)
462 self.rsi.add_request(0, 1, 5)
463
464 self.rsi.add_task(50, 50, 4, 300)
465 self.rsi.add_request(0, 1, 7)
466
467 self.rsi.add_task(100, 100, 1, 400)
468
469 self.loc = cpp.ResourceLocality()
470 self.loc.assign_resource(0, 1)
471
472
473 def test_fmlp_remote(self):
474 res = cpp.part_fmlp_bounds(self.rsi, True)
475
476 self.assertEqual(0, res.get_blocking_count(3))
477 self.assertEqual(0, res.get_blocking_term(3))
478
479 self.assertEqual(2, res.get_blocking_count(0))
480 self.assertEqual(5 + 7, res.get_blocking_term(0))
481
482 self.assertEqual(2, res.get_blocking_count(1))
483 self.assertEqual(3 + 7, res.get_blocking_term(1))
484
485 self.assertEqual(2, res.get_blocking_count(2))
486 self.assertEqual(3 + 5, res.get_blocking_term(2))
487
diff --git a/tests/model.py b/tests/model.py
new file mode 100644
index 0000000..60c7cb4
--- /dev/null
+++ b/tests/model.py
@@ -0,0 +1,139 @@
1from __future__ import division
2
3import unittest
4from StringIO import StringIO
5from fractions import Fraction
6
7import schedcat.model.tasks as m
8import schedcat.model.serialize as s
9import schedcat.model.resources as r
10
11class Tasks(unittest.TestCase):
12 def setUp(self):
13 self.t1 = m.SporadicTask(10, 100)
14 self.t2 = m.SporadicTask(5, 19, 15, id=3)
15 self.t3 = m.SporadicTask(25, 50, id=5, deadline=75)
16
17 def test_deadline_type(self):
18 self.assertTrue(self.t1.implicit_deadline())
19 self.assertFalse(self.t2.implicit_deadline())
20 self.assertFalse(self.t3.implicit_deadline())
21
22 self.assertTrue(self.t1.constrained_deadline())
23 self.assertTrue(self.t2.constrained_deadline())
24 self.assertFalse(self.t3.implicit_deadline())
25
26 def test_utilization(self):
27 self.assertEqual(self.t1.utilization_q(), Fraction(10, 100))
28 self.assertEqual(self.t1.utilization(), 0.1)
29
30 def test_density(self):
31 self.assertEqual(self.t2.density_q(), Fraction(1, 3))
32 self.assertEqual(self.t2.density(), 1/3)
33
34 self.assertEqual(self.t3.density_q(), Fraction(1, 2))
35 self.assertEqual(self.t3.density(), 0.5)
36
37 def test_repr(self):
38 e1 = eval("m." + repr(self.t1))
39 self.assertEqual(e1.cost, self.t1.cost)
40 self.assertEqual(e1.period, self.t1.period)
41 self.assertEqual(e1.deadline, self.t1.deadline)
42 self.assertEqual(e1.id, self.t1.id)
43
44 e2 = eval("m." + repr(self.t2))
45 self.assertEqual(e2.cost, self.t2.cost)
46 self.assertEqual(e2.period, self.t2.period)
47 self.assertEqual(e2.deadline, self.t2.deadline)
48 self.assertEqual(e2.id, self.t2.id)
49
50 e3 = eval("m." + repr(self.t3))
51 self.assertEqual(e3.cost, self.t3.cost)
52 self.assertEqual(e3.period, self.t3.period)
53 self.assertEqual(e3.deadline, self.t3.deadline)
54 self.assertEqual(e3.id, self.t3.id)
55
56 def test_maxjobs(self):
57 t = m.SporadicTask(None, 2)
58 t.response_time = 6.6
59 self.assertEqual(t.maxjobs(7.25), 7)
60
61 def test_tardiness(self):
62 t = m.SporadicTask(None, 2)
63 t.response_time = 1.5
64 self.assertEqual(t.tardiness(), 0)
65 t.response_time = 6
66 self.assertEqual(t.tardiness(), 4)
67
68# TODO: Write tests for TaskSystem
69
70class Tasks(unittest.TestCase):
71 def setUp(self):
72 pass
73
74 def test_property(self):
75 req = r.ResourceRequirement(1, 2, 10, 13, 4)
76 self.assertEqual(req.max_reads, 13)
77 self.assertEqual(req.max_writes, 2)
78 self.assertEqual(req.max_requests, 15)
79 self.assertEqual(req.max_write_length, 10)
80 self.assertEqual(req.max_read_length, 4)
81 self.assertEqual(req.max_length, 10)
82
83
84class Serialization(unittest.TestCase):
85 def setUp(self):
86 self.t1 = m.SporadicTask(10, 100)
87 self.t2 = m.SporadicTask(5, 19, 15, id=3)
88 self.t3 = m.SporadicTask(25, 50, id=5, deadline=75)
89 self.ts = m.TaskSystem([self.t1, self.t2, self.t3])
90 self.f = StringIO()
91
92 def test_serialize_task(self):
93 for t in self.ts:
94 s.write_xml(s.task(t), self.f)
95 self.f.seek(0)
96 x = s.load(self.f)
97 self.assertIsInstance(x, m.SporadicTask)
98 self.assertEqual(x.cost, t.cost)
99 self.assertEqual(x.deadline, t.deadline)
100 self.assertEqual(x.period, t.period)
101 self.assertEqual(x.id, t.id)
102 self.f.seek(0)
103 self.f.truncate()
104
105 def test_serialize_taskset(self):
106 s.write(self.ts, self.f)
107 self.f.seek(0)
108 xs = s.load(self.f)
109 self.assertIsInstance(xs, m.TaskSystem)
110 self.assertEqual(len(xs), len(self.ts))
111 for x,t in zip(xs, self.ts):
112 self.assertEqual(x.cost, t.cost)
113 self.assertEqual(x.deadline, t.deadline)
114 self.assertEqual(x.period, t.period)
115 self.assertEqual(x.id, t.id)
116
117 def test_serialize_resmodel(self):
118 r.initialize_resource_model(self.ts)
119 self.t1.resmodel[1].add_request(1)
120 self.t2.resmodel[1].add_read_request(2)
121 self.t2.resmodel['serial I/O'].add_request(2)
122 self.t3.resmodel['serial I/O'].add_request(3)
123
124 for t in self.ts:
125 s.write_xml(s.task(t), self.f)
126 self.f.seek(0)
127 x = s.load(self.f)
128 self.assertIsInstance(x.resmodel, r.ResourceRequirements)
129 self.assertEqual(len(x.resmodel), len(t.resmodel))
130 self.assertEqual(x.resmodel.keys(), t.resmodel.keys())
131 for res_id in x.resmodel:
132 self.assertEqual(x.resmodel[res_id].max_reads, t.resmodel[res_id].max_reads)
133 self.assertEqual(x.resmodel[res_id].max_writes, t.resmodel[res_id].max_writes)
134 self.assertEqual(x.resmodel[res_id].max_requests, t.resmodel[res_id].max_requests)
135 self.assertEqual(x.resmodel[res_id].max_read_length, t.resmodel[res_id].max_read_length)
136 self.assertEqual(x.resmodel[res_id].max_write_length, t.resmodel[res_id].max_write_length)
137 self.assertEqual(x.resmodel[res_id].max_length, t.resmodel[res_id].max_length)
138 self.f.seek(0)
139 self.f.truncate()
diff --git a/tests/overheads.py b/tests/overheads.py
new file mode 100644
index 0000000..bd33627
--- /dev/null
+++ b/tests/overheads.py
@@ -0,0 +1,863 @@
1from __future__ import division
2
3import unittest
4import StringIO
5
6import schedcat.overheads.model as m
7import schedcat.overheads.jlfp as jlfp
8import schedcat.overheads.pfair as pfair
9import schedcat.overheads.fp as fp
10import schedcat.overheads.locking as locking
11import schedcat.model.tasks as tasks
12import schedcat.model.resources as res
13
14from schedcat.util.math import const
15
16class Model(unittest.TestCase):
17 def setUp(self):
18 s = """TASK-COUNT, SCHEDULE
19 10 , 10
20 20 , 20
21 30 , 20
22 40 , 17
23 50 , 40
24"""
25 self.sched_file = StringIO.StringIO(s)
26
27 s = """WSS, MEM, L3
281024, 100, 50
292048, 200, 50
304096, 400, 400
3116384, 1600, 17000
32"""
33 self.cpmd_file = StringIO.StringIO(s)
34
35 def test_init(self):
36 o = m.Overheads()
37 self.assertEqual(o.schedule(10), 0)
38 self.assertEqual(o.ctx_switch(10), 0)
39 self.assertEqual(o.quantum_length, 1000)
40
41 def test_from_file(self):
42 o = m.Overheads.from_file(self.sched_file)
43 self.assertIsInstance(o.schedule(10), float)
44 self.assertAlmostEqual(o.schedule(10), 10.0)
45 self.assertAlmostEqual(o.schedule(5), 5.0)
46 self.assertAlmostEqual(o.schedule(15), 15.0)
47 self.assertAlmostEqual(o.schedule(20), 20.0)
48 self.assertAlmostEqual(o.schedule(25), 20.0)
49 self.assertAlmostEqual(o.schedule(30), 20.0)
50 self.assertAlmostEqual(o.schedule(40), 20.0)
51 self.assertAlmostEqual(o.schedule(45), 30.0)
52
53 def test_cpmd_from_file(self):
54 o = m.CacheDelay.from_file(self.cpmd_file)
55 self.assertIsInstance(o.MEM(10), float)
56 self.assertIsInstance(o(10), float)
57 self.assertAlmostEqual(o(1024), 100.0)
58 self.assertAlmostEqual(o.MEM(8192), 800.0)
59 self.assertGreater(o(8192), 800.0)
60 self.assertAlmostEqual(o(16384), 17000.0)
61
62 def test_exceptions(self):
63 self.assertRaises(IOError, m.Overheads.from_file, '/non/existant')
64 self.assertRaises(IOError, m.CacheDelay.from_file, '/non/existant')
65 self.assertRaises(IOError, m.Overheads.from_file, self.cpmd_file)
66 self.assertRaises(IOError, m.CacheDelay.from_file, self.sched_file)
67
68
69class JLFPOverheads(unittest.TestCase):
70 def setUp(self):
71 self.ts = tasks.TaskSystem([
72 tasks.SporadicTask(10000, 100000),
73 tasks.SporadicTask( 5000, 50000),
74 ])
75 for t in self.ts:
76 t.wss = 0
77 self.o = m.Overheads()
78
79 def unchanged_period(self):
80 self.assertEqual(self.ts[0].period, 100000)
81 self.assertEqual(self.ts[1].period, 50000)
82
83 def unchanged_deadline(self):
84 self.assertEqual(self.ts[0].deadline, 100000)
85 self.assertEqual(self.ts[1].deadline, 50000)
86
87 def unchanged_cost(self):
88 self.assertEqual(self.ts[0].cost, 10000)
89 self.assertEqual(self.ts[1].cost, 5000)
90
91 def test_none(self):
92 self.assertEqual(jlfp.charge_scheduling_overheads(None, 4, False, self.ts), self.ts)
93 self.unchanged_cost()
94 self.unchanged_period()
95 self.unchanged_deadline()
96
97 def test_initial_load(self):
98 self.o.initial_cache_load = const(4)
99 self.assertEqual(jlfp.charge_initial_load(self.o, self.ts), self.ts)
100 self.assertEqual(jlfp.quantize_params(self.ts), self.ts)
101 self.assertEqual(self.ts[0].cost, 10004)
102 self.assertEqual(self.ts[1].cost, 5004)
103 self.unchanged_period()
104 self.unchanged_deadline()
105
106 def test_sched(self):
107 self.o.schedule = const(2)
108 self.assertEqual(jlfp.charge_scheduling_overheads(self.o, 4, False, self.ts), self.ts)
109 self.assertEqual(jlfp.quantize_params(self.ts), self.ts)
110 self.assertEqual(self.ts[0].cost, 10004)
111 self.assertEqual(self.ts[1].cost, 5004)
112 self.unchanged_period()
113 self.unchanged_deadline()
114
115 def test_ctx_switch(self):
116 self.o.ctx_switch = const(1)
117 self.assertEqual(jlfp.charge_scheduling_overheads(self.o, 4, False, self.ts), self.ts)
118 self.assertEqual(jlfp.quantize_params(self.ts), self.ts)
119 self.assertEqual(self.ts[0].cost, 10002)
120 self.assertEqual(self.ts[1].cost, 5002)
121 self.unchanged_period()
122 self.unchanged_deadline()
123
124 def test_cache_affinity_loss(self):
125 self.o.cache_affinity_loss = const(1)
126 self.assertEqual(jlfp.charge_scheduling_overheads(self.o, 4, False, self.ts), self.ts)
127 self.assertEqual(jlfp.quantize_params(self.ts), self.ts)
128 self.assertEqual(self.ts[0].cost, 10001)
129 self.assertEqual(self.ts[1].cost, 5001)
130 self.unchanged_period()
131 self.unchanged_deadline()
132
133 def test_ipi_latency(self):
134 self.o.ipi_latency = const(1)
135 self.assertEqual(jlfp.charge_scheduling_overheads(self.o, 4, False, self.ts), self.ts)
136 self.assertEqual(jlfp.quantize_params(self.ts), self.ts)
137 self.assertEqual(self.ts[0].cost, 10001)
138 self.assertEqual(self.ts[1].cost, 5001)
139 self.unchanged_period()
140 self.unchanged_deadline()
141
142 def test_release_latency(self):
143 self.o.release_latency = const(1)
144 self.assertEqual(jlfp.charge_scheduling_overheads(self.o, 4, False, self.ts), self.ts)
145 self.assertEqual(jlfp.quantize_params(self.ts), self.ts)
146 self.unchanged_cost()
147 self.assertEqual(self.ts[0].period, 99999)
148 self.assertEqual(self.ts[1].period, 49999)
149 self.assertEqual(self.ts[0].deadline, 99999)
150 self.assertEqual(self.ts[1].deadline, 49999)
151
152 def test_tick(self):
153 self.o.tick = const(1)
154 self.assertEqual(jlfp.charge_scheduling_overheads(self.o, 4, False, self.ts), self.ts)
155 self.assertEqual(jlfp.quantize_params(self.ts), self.ts)
156 self.assertEqual(self.ts[0].cost, 10013)
157 self.assertEqual(self.ts[1].cost, 5008)
158 self.unchanged_period()
159 self.unchanged_deadline()
160
161 def test_release(self):
162 self.o.release = const(1)
163 self.assertEqual(jlfp.charge_scheduling_overheads(self.o, 4, False, self.ts), self.ts)
164 self.assertEqual(jlfp.quantize_params(self.ts), self.ts)
165 self.assertEqual(self.ts[0].cost, 10005)
166 self.assertEqual(self.ts[1].cost, 5005)
167 self.unchanged_period()
168 self.unchanged_deadline()
169
170
171class PfairOverheads(unittest.TestCase):
172 def setUp(self):
173 self.ts = tasks.TaskSystem([
174 tasks.SporadicTask(9995, 100000),
175 tasks.SporadicTask( 4995, 50000),
176 ])
177 for t in self.ts:
178 t.wss = 0
179 self.o = m.Overheads()
180 self.o.quantum_length = 500
181
182 def unchanged_period(self):
183 # not strictly unchanged, but affected by a quantum release delay
184 self.assertEqual(self.ts[0].period, 99500)
185 self.assertEqual(self.ts[1].period, 49500)
186
187 def unchanged_deadline(self):
188 # not strictly unchanged, but affected by a quantum release delay
189 self.assertEqual(self.ts[0].deadline, 99500)
190 self.assertEqual(self.ts[1].deadline, 49500)
191
192 def unchanged_cost(self):
193 # not strictly unchanged, but only quantized
194 self.assertEqual(self.ts[0].cost, 10000)
195 self.assertEqual(self.ts[1].cost, 5000)
196
197 def test_none(self):
198 self.assertEqual(pfair.charge_scheduling_overheads(None, 4, False, self.ts), self.ts)
199 self.assertEqual(self.ts[0].cost, 9995)
200 self.assertEqual(self.ts[1].cost, 4995)
201 self.assertEqual(self.ts[0].period, 100000)
202 self.assertEqual(self.ts[1].period, 50000)
203 self.assertEqual(self.ts[0].deadline, 100000)
204 self.assertEqual(self.ts[1].deadline, 50000)
205
206 def test_quant(self):
207 self.assertEqual(pfair.charge_scheduling_overheads(self.o, 4, False, self.ts), self.ts)
208 self.assertEqual(self.ts[0].cost, 10000)
209 self.assertEqual(self.ts[1].cost, 5000)
210 self.unchanged_period()
211 self.unchanged_deadline()
212
213 def test_periodic(self):
214 self.assertEqual(pfair.charge_scheduling_overheads(self.o, 4, False, self.ts,
215 aligned_periodic_releases=True), self.ts)
216 self.assertEqual(self.ts[0].cost, 10000)
217 self.assertEqual(self.ts[1].cost, 5000)
218 self.assertEqual(self.ts[0].deadline, 100000)
219 self.assertEqual(self.ts[1].deadline, 50000)
220 self.assertEqual(self.ts[0].period, 100000)
221 self.assertEqual(self.ts[1].period, 50000)
222
223 def test_sched(self):
224 self.o.schedule = const(50)
225 self.assertEqual(pfair.charge_scheduling_overheads(self.o, 4, False, self.ts), self.ts)
226 self.assertEqual(self.ts[0].cost, 11500)
227 self.assertEqual(self.ts[1].cost, 6000)
228 self.unchanged_period()
229 self.unchanged_deadline()
230
231 def test_ctx_switch(self):
232 self.o.ctx_switch = const(100)
233 self.assertEqual(pfair.charge_scheduling_overheads(self.o, 4, False, self.ts), self.ts)
234 self.assertEqual(self.ts[0].cost, 12500)
235 self.assertEqual(self.ts[1].cost, 6500)
236 self.unchanged_period()
237 self.unchanged_deadline()
238
239 def test_cache_affinity_loss(self):
240 self.o.cache_affinity_loss = const(0.5)
241 self.assertEqual(pfair.charge_scheduling_overheads(self.o, 4, False, self.ts), self.ts)
242 self.assertEqual(self.ts[0].cost, 10500)
243 self.assertEqual(self.ts[1].cost, 5000)
244 self.unchanged_period()
245 self.unchanged_deadline()
246
247 def test_ipi_latency(self):
248 # IPI latency is irrelevant for Pfair
249 self.o.ipi_latency = const(1000)
250 self.assertEqual(pfair.charge_scheduling_overheads(self.o, 4, False, self.ts), self.ts)
251 self.assertEqual(self.ts[0].cost, 10000)
252 self.assertEqual(self.ts[1].cost, 5000)
253 self.unchanged_period()
254 self.unchanged_deadline()
255
256 def test_release_latency(self):
257 self.o.release_latency = const(100)
258 self.assertEqual(pfair.charge_scheduling_overheads(self.o, 4, False, self.ts), self.ts)
259 self.assertEqual(self.ts[0].cost, 12500)
260 self.assertEqual(self.ts[1].cost, 6500)
261 self.assertEqual(self.ts[0].period, 99000)
262 self.assertEqual(self.ts[1].period, 49000)
263 self.assertEqual(self.ts[0].deadline, 99000)
264 self.assertEqual(self.ts[1].deadline, 49000)
265
266 def test_tick(self):
267 self.o.tick = const(50)
268 self.assertEqual(pfair.charge_scheduling_overheads(self.o, 4, False, self.ts), self.ts)
269 self.assertEqual(self.ts[0].cost, 11500)
270 self.assertEqual(self.ts[1].cost, 6000)
271 self.unchanged_period()
272 self.unchanged_deadline()
273
274 def test_release(self):
275 self.o.release = const(50)
276 self.assertEqual(pfair.charge_scheduling_overheads(self.o, 4, False, self.ts), self.ts)
277 self.assertEqual(self.ts[0].cost, 11500)
278 self.assertEqual(self.ts[1].cost, 6000)
279 self.assertEqual(self.ts[0].period, 99000)
280 self.assertEqual(self.ts[1].period, 49000)
281 self.assertEqual(self.ts[0].deadline, 99000)
282 self.assertEqual(self.ts[1].deadline, 49000)
283
284
285class FPOverheads(unittest.TestCase):
286 def setUp(self):
287 self.ts = tasks.TaskSystem([
288 tasks.SporadicTask(10000, 100000),
289 tasks.SporadicTask( 5000, 50000),
290 ])
291 for t in self.ts:
292 t.wss = 0
293 self.o = m.Overheads()
294
295 def unchanged_period(self):
296 self.assertEqual(self.ts[0].period, 100000)
297 self.assertEqual(self.ts[1].period, 50000)
298
299 def unchanged_deadline(self):
300 self.assertEqual(self.ts[0].deadline, 100000)
301 self.assertEqual(self.ts[1].deadline, 50000)
302
303 def unchanged_cost(self):
304 self.assertEqual(self.ts[0].cost, 10000)
305 self.assertEqual(self.ts[1].cost, 5000)
306
307 def no_jitter(self):
308 self.assertEqual(self.ts[0].jitter, 0)
309 self.assertEqual(self.ts[1].jitter, 0)
310
311 def test_none(self):
312 ts = fp.charge_scheduling_overheads(None, 4, False, self.ts)
313 self.assertIsNot(ts, False)
314 self.assertIsNot(ts, self.ts)
315 self.unchanged_cost()
316 self.unchanged_period()
317 self.unchanged_deadline()
318
319 def test_sched(self):
320 self.o.schedule = const(2)
321 ts = fp.charge_scheduling_overheads(self.o, 4, False, self.ts)
322 self.assertEqual(fp.quantize_params(ts), ts)
323 self.assertIsNot(ts, False)
324 self.assertIsNot(ts, self.ts)
325 self.assertEqual(self.ts[0].cost, 10004)
326 self.assertEqual(self.ts[1].cost, 5004)
327 self.unchanged_period()
328 self.unchanged_deadline()
329 self.no_jitter()
330
331 def test_ctx_switch(self):
332 self.o.ctx_switch = const(1)
333 ts = fp.charge_scheduling_overheads(self.o, 4, False, self.ts)
334 self.assertEqual(fp.quantize_params(ts), ts)
335 self.assertIsNot(ts, False)
336 self.assertIsNot(ts, self.ts)
337 self.assertEqual(self.ts[0].cost, 10002)
338 self.assertEqual(self.ts[1].cost, 5002)
339 self.unchanged_period()
340 self.unchanged_deadline()
341 self.no_jitter()
342
343 def test_cache_affinity_loss(self):
344 self.o.cache_affinity_loss = const(1)
345 ts = fp.charge_scheduling_overheads(self.o, 4, False, self.ts)
346 self.assertEqual(fp.quantize_params(ts), ts)
347 self.assertIsNot(ts, False)
348 self.assertIsNot(ts, self.ts)
349 self.assertEqual(self.ts[0].cost, 10001)
350 self.assertEqual(self.ts[1].cost, 5001)
351 self.unchanged_period()
352 self.unchanged_deadline()
353 self.no_jitter()
354
355 def test_ipi_latency(self):
356 self.o.ipi_latency = const(1)
357 ts = fp.charge_scheduling_overheads(self.o, 4, False, self.ts)
358 self.assertEqual(fp.quantize_params(ts), ts)
359 self.assertIsNot(ts, False)
360 self.assertIsNot(ts, self.ts)
361 self.unchanged_cost()
362 self.unchanged_period()
363 self.unchanged_deadline()
364 self.no_jitter()
365
366 ts = fp.charge_scheduling_overheads(self.o, 4, True, self.ts)
367 self.assertEqual(fp.quantize_params(ts), ts)
368 self.assertIsNot(ts, False)
369 self.assertIsNot(ts, self.ts)
370 self.unchanged_cost()
371 self.unchanged_period()
372 self.unchanged_deadline()
373 self.assertEqual(self.ts[0].jitter, 1)
374 self.assertEqual(self.ts[1].jitter, 1)
375
376 def test_release_latency(self):
377 self.o.release_latency = const(1)
378 ts = fp.charge_scheduling_overheads(self.o, 4, False, self.ts)
379 self.assertEqual(fp.quantize_params(ts), ts)
380 self.assertIsNot(ts, False)
381 self.assertIsNot(ts, self.ts)
382 self.unchanged_cost()
383 self.unchanged_period()
384 self.unchanged_deadline()
385 self.assertEqual(self.ts[0].jitter, 1)
386 self.assertEqual(self.ts[1].jitter, 1)
387
388 def test_tick(self):
389 self.o.tick = const(123)
390 self.o.quantum_length = 777
391 ts = fp.charge_scheduling_overheads(self.o, 4, False, self.ts)
392 self.assertEqual(fp.quantize_params(ts), ts)
393 self.assertIsNot(ts, False)
394 self.assertIsNot(ts, self.ts)
395 self.unchanged_cost()
396 self.unchanged_period()
397 self.unchanged_deadline()
398 self.no_jitter()
399 self.assertEqual(ts[0].cost, 123)
400 self.assertEqual(ts[0].period, 777)
401
402 def test_release(self):
403 self.o.release = const(17)
404 ts = fp.charge_scheduling_overheads(self.o, 4, False, self.ts)
405 self.assertEqual(fp.quantize_params(ts), ts)
406 self.assertIsNot(ts, False)
407 self.assertIsNot(ts, self.ts)
408 self.unchanged_cost()
409 self.unchanged_period()
410 self.unchanged_deadline()
411 self.assertEqual(self.ts[0].jitter, 17)
412 self.assertEqual(self.ts[1].jitter, 17)
413 self.assertEqual(ts[0].cost, 17)
414 self.assertEqual(ts[0].jitter, 0)
415 self.assertEqual(ts[0].period, self.ts[0].period)
416 self.assertEqual(ts[1].cost, 17)
417 self.assertEqual(ts[1].jitter, 0)
418 self.assertEqual(ts[1].period, self.ts[1].period)
419
420
421class LockingOverheads(unittest.TestCase):
422 def setUp(self):
423 self.ts = tasks.TaskSystem([
424 tasks.SporadicTask(10000, 100000),
425 tasks.SporadicTask( 5000, 50000),
426 ])
427 for t in self.ts:
428 t.wss = 0
429 res.initialize_resource_model(self.ts)
430 self.ts[0].resmodel[0].add_request(11)
431 self.ts[0].resmodel[0].add_request(7)
432 self.ts[1].resmodel[0].add_request(17)
433
434 self.ts[0].resmodel[1].add_read_request(11)
435 self.ts[0].resmodel[1].add_read_request(1)
436 self.ts[1].resmodel[1].add_read_request(17)
437
438 self.ts[0].resmodel[2].add_read_request(11)
439 self.ts[0].resmodel[2].add_read_request(1)
440 self.ts[1].resmodel[2].add_write_request(17)
441 self.ts[1].resmodel[2].add_write_request(1)
442
443 self.o = m.Overheads()
444
445 def no_reads(self):
446 for t in self.ts:
447 for res_id in t.resmodel:
448 req = t.resmodel[res_id]
449 req.convert_reads_to_writes()
450
451 def init_susp(self):
452 for t in self.ts:
453 t.suspended = 0
454
455 def not_lossy(self):
456 self.assertIs(self.ts[0].resmodel[0].max_reads, 0)
457 self.assertIs(self.ts[0].resmodel[0].max_writes, 2)
458 self.assertIs(self.ts[1].resmodel[0].max_reads, 0)
459 self.assertIs(self.ts[1].resmodel[0].max_writes, 1)
460
461 self.assertIs(self.ts[0].resmodel[1].max_reads, 2)
462 self.assertIs(self.ts[0].resmodel[1].max_writes, 0)
463 self.assertIs(self.ts[1].resmodel[1].max_reads, 1)
464 self.assertIs(self.ts[1].resmodel[1].max_writes, 0)
465
466 self.assertIs(self.ts[0].resmodel[2].max_reads, 2)
467 self.assertIs(self.ts[0].resmodel[2].max_writes, 0)
468 self.assertIs(self.ts[1].resmodel[2].max_reads, 0)
469 self.assertIs(self.ts[1].resmodel[2].max_writes, 2)
470
471 def not_lossy_no_reads(self):
472 self.assertIs(self.ts[0].resmodel[0].max_reads, 0)
473 self.assertIs(self.ts[0].resmodel[0].max_writes, 2)
474 self.assertIs(self.ts[1].resmodel[0].max_reads, 0)
475 self.assertIs(self.ts[1].resmodel[0].max_writes, 1)
476
477 self.assertIs(self.ts[0].resmodel[1].max_reads, 0)
478 self.assertIs(self.ts[0].resmodel[1].max_writes, 2)
479 self.assertIs(self.ts[1].resmodel[1].max_reads, 0)
480 self.assertIs(self.ts[1].resmodel[1].max_writes, 1)
481
482 self.assertIs(self.ts[0].resmodel[2].max_reads, 0)
483 self.assertIs(self.ts[0].resmodel[2].max_writes, 2)
484 self.assertIs(self.ts[1].resmodel[2].max_reads, 0)
485 self.assertIs(self.ts[1].resmodel[2].max_writes, 2)
486
487 def unchanged_period(self):
488 self.assertEqual(self.ts[0].period, 100000)
489 self.assertEqual(self.ts[1].period, 50000)
490
491 def unchanged_deadline(self):
492 self.assertEqual(self.ts[0].deadline, 100000)
493 self.assertEqual(self.ts[1].deadline, 50000)
494
495 def unchanged_cost(self):
496 self.assertEqual(self.ts[0].cost, 10000)
497 self.assertEqual(self.ts[1].cost, 5000)
498
499 def unchanged_resmodel(self):
500 self.assertEqual(self.ts[0].resmodel[0].max_read_length, 0)
501 self.assertEqual(self.ts[0].resmodel[0].max_write_length, 11)
502 self.assertEqual(self.ts[1].resmodel[0].max_read_length, 0)
503 self.assertEqual(self.ts[1].resmodel[0].max_write_length, 17)
504
505 self.assertEqual(self.ts[0].resmodel[1].max_read_length, 11)
506 self.assertEqual(self.ts[0].resmodel[1].max_write_length, 0)
507 self.assertEqual(self.ts[1].resmodel[1].max_read_length, 17)
508 self.assertEqual(self.ts[1].resmodel[1].max_write_length, 0)
509
510 self.assertEqual(self.ts[0].resmodel[2].max_read_length, 11)
511 self.assertEqual(self.ts[0].resmodel[2].max_write_length, 0)
512 self.assertEqual(self.ts[1].resmodel[2].max_read_length, 0)
513 self.assertEqual(self.ts[1].resmodel[2].max_write_length, 17)
514
515 def unchanged_resmodel_no_reads(self):
516 self.assertEqual(self.ts[0].resmodel[0].max_read_length, 0)
517 self.assertEqual(self.ts[0].resmodel[0].max_write_length, 11)
518 self.assertEqual(self.ts[1].resmodel[0].max_read_length, 0)
519 self.assertEqual(self.ts[1].resmodel[0].max_write_length, 17)
520
521 self.assertEqual(self.ts[0].resmodel[1].max_read_length, 0)
522 self.assertEqual(self.ts[0].resmodel[1].max_write_length, 11)
523 self.assertEqual(self.ts[1].resmodel[1].max_read_length, 0)
524 self.assertEqual(self.ts[1].resmodel[1].max_write_length, 17)
525
526 self.assertEqual(self.ts[0].resmodel[2].max_read_length, 0)
527 self.assertEqual(self.ts[0].resmodel[2].max_write_length, 11)
528 self.assertEqual(self.ts[1].resmodel[2].max_read_length, 0)
529 self.assertEqual(self.ts[1].resmodel[2].max_write_length, 17)
530
531
532 def oheads(self, sched=False):
533 self.o.lock = const(3)
534 self.o.unlock = const(5)
535 self.o.read_lock = const(7)
536 self.o.read_unlock = const(11)
537 self.o.syscall_in = const(17)
538 self.o.syscall_out = const(19)
539
540 if sched:
541 self.o.ipi_latency = const(23)
542 self.o.schedule = const(27)
543 self.o.ctx_switch = const(31)
544 self.o.cache_affinity_loss.set_cpmd_cost(m.CacheDelay.L1, const(41))
545
546# 31 37 41 43 47 53 59 61 67 71
547
548# ('LOCK', 'lock'),
549# ('UNLOCK', 'unlock'),
550# ('READ-LOCK', 'read_lock'),
551# ('READ-UNLOCK', 'read_unlock'),
552# ('SYSCALL-IN', 'syscall_in'),
553# ('SYSCALL-OUT', 'syscall_out'),
554
555 def test_spinlock_none(self):
556 self.assertIs(locking.charge_spinlock_overheads(None, self.ts), self.ts)
557 self.unchanged_period()
558 self.unchanged_deadline()
559 self.unchanged_cost()
560 self.not_lossy()
561 self.unchanged_resmodel()
562
563 def test_spinlock_zero(self):
564 self.assertIs(locking.charge_spinlock_overheads(self.o, self.ts), self.ts)
565 self.unchanged_period()
566 self.unchanged_deadline()
567 self.unchanged_cost()
568 self.not_lossy()
569 self.unchanged_resmodel()
570
571 def test_spinlock_infeasible(self):
572 self.o.syscall_in = const(10000000)
573 self.assertIs(locking.charge_spinlock_overheads(self.o, self.ts), False)
574
575 def test_spinlock_integral(self):
576 self.o.lock = const(1.75)
577 self.assertIs(locking.charge_spinlock_overheads(self.o, self.ts), self.ts)
578 self.assertEqual(self.ts[0].resmodel[0].max_write_length, 11 + 2)
579
580 def test_spinlock(self):
581 self.oheads()
582 self.assertIs(locking.charge_spinlock_overheads(self.o, self.ts), self.ts)
583 self.unchanged_period()
584 self.unchanged_deadline()
585 self.not_lossy()
586
587 scost = 17 + 19
588 rcost = 7 + 11
589 wcost = 3 + 5
590
591 self.assertEqual(self.ts[0].cost, 10000 + 2 * wcost + 4 * rcost + 6 * scost)
592 self.assertEqual(self.ts[1].cost, 5000 + 3 * wcost + 1 * rcost + 4 * scost)
593
594 self.assertEqual(self.ts[0].resmodel[0].max_read_length, 0)
595 self.assertEqual(self.ts[0].resmodel[0].max_write_length, 11 + wcost)
596 self.assertEqual(self.ts[1].resmodel[0].max_read_length, 0)
597 self.assertEqual(self.ts[1].resmodel[0].max_write_length, 17 + wcost)
598
599 self.assertEqual(self.ts[0].resmodel[1].max_read_length, 11 + rcost)
600 self.assertEqual(self.ts[0].resmodel[1].max_write_length, 0)
601 self.assertEqual(self.ts[1].resmodel[1].max_read_length, 17 + rcost)
602 self.assertEqual(self.ts[1].resmodel[1].max_write_length, 0)
603
604 self.assertEqual(self.ts[0].resmodel[2].max_read_length, 11 + rcost)
605 self.assertEqual(self.ts[0].resmodel[2].max_write_length, 0)
606 self.assertEqual(self.ts[1].resmodel[2].max_read_length, 0)
607 self.assertEqual(self.ts[1].resmodel[2].max_write_length, 17 + wcost)
608
609 def test_sem_none(self):
610 self.assertIs(locking.charge_semaphore_overheads(None, True, True, self.ts), self.ts)
611 self.unchanged_period()
612 self.unchanged_deadline()
613 self.unchanged_cost()
614 self.not_lossy()
615 self.unchanged_resmodel()
616
617 def test_sem_zero(self):
618 self.no_reads()
619 self.assertIs(locking.charge_semaphore_overheads(self.o, True, False, self.ts), self.ts)
620 self.unchanged_period()
621 self.unchanged_deadline()
622 self.unchanged_cost()
623 self.not_lossy_no_reads()
624 self.unchanged_resmodel_no_reads()
625
626 def test_sem_infeasible(self):
627 self.no_reads()
628 self.o.syscall_in = const(10000000)
629 self.assertIs(locking.charge_semaphore_overheads(self.o, True, False, self.ts), False)
630
631 def test_sem_integral(self):
632 self.no_reads()
633 self.o.unlock = const(1.75)
634 self.assertIs(locking.charge_semaphore_overheads(self.o, True, False, self.ts), self.ts)
635 self.assertEqual(self.ts[0].resmodel[0].max_write_length, 11 + 2)
636
637 def test_sem_lock_only(self):
638 self.oheads()
639 self.no_reads()
640 self.assertIs(locking.charge_semaphore_overheads(self.o, True, False, self.ts), self.ts)
641 self.unchanged_period()
642 self.unchanged_deadline()
643 self.not_lossy_no_reads()
644
645 scost = 17 + 19
646 xcost = 3 + 5
647 ocost = 5
648
649 self.assertEqual(self.ts[0].cost, 10000 + 6 * xcost + 12 * scost)
650 self.assertEqual(self.ts[1].cost, 5000 + 4 * xcost + 8 * scost)
651
652 self.assertEqual(self.ts[0].resmodel[0].max_read_length, 0)
653 self.assertEqual(self.ts[0].resmodel[0].max_write_length, 11 + scost + ocost)
654 self.assertEqual(self.ts[1].resmodel[0].max_read_length, 0)
655 self.assertEqual(self.ts[1].resmodel[0].max_write_length, 17 + scost + ocost)
656
657 self.assertEqual(self.ts[0].resmodel[1].max_read_length, 0)
658 self.assertEqual(self.ts[0].resmodel[1].max_write_length, 11 + scost + ocost)
659 self.assertEqual(self.ts[1].resmodel[1].max_read_length, 0)
660 self.assertEqual(self.ts[1].resmodel[1].max_write_length, 17 + scost + ocost)
661
662 self.assertEqual(self.ts[0].resmodel[2].max_read_length, 0)
663 self.assertEqual(self.ts[0].resmodel[2].max_write_length, 11 + scost + ocost)
664 self.assertEqual(self.ts[1].resmodel[2].max_read_length, 0)
665 self.assertEqual(self.ts[1].resmodel[2].max_write_length, 17 + scost + ocost)
666
667 def test_sem_lock_and_sched(self):
668 self.oheads(sched=True)
669 self.no_reads()
670 self.assertIs(locking.charge_semaphore_overheads(self.o, True, False, self.ts), self.ts)
671 self.unchanged_period()
672 self.unchanged_deadline()
673 self.not_lossy_no_reads()
674
675 ecost = 2 * 17 + 2 * 19 + 3 + 5 + 3 * 27 + 3 * 31 + 2 * 41 + 23
676 xcost = 2 * 27 + 2 * 31 + 17 + 19 + 5 + 23
677
678 self.assertEqual(self.ts[0].cost, 10000 + 6 * ecost)
679 self.assertEqual(self.ts[1].cost, 5000 + 4 * ecost)
680
681 self.assertEqual(self.ts[0].resmodel[0].max_read_length, 0)
682 self.assertEqual(self.ts[0].resmodel[0].max_write_length, 11 + xcost)
683 self.assertEqual(self.ts[1].resmodel[0].max_read_length, 0)
684 self.assertEqual(self.ts[1].resmodel[0].max_write_length, 17 + xcost)
685
686 self.assertEqual(self.ts[0].resmodel[1].max_read_length, 0)
687 self.assertEqual(self.ts[0].resmodel[1].max_write_length, 11 + xcost)
688 self.assertEqual(self.ts[1].resmodel[1].max_read_length, 0)
689 self.assertEqual(self.ts[1].resmodel[1].max_write_length, 17 + xcost)
690
691 self.assertEqual(self.ts[0].resmodel[2].max_read_length, 0)
692 self.assertEqual(self.ts[0].resmodel[2].max_write_length, 11 + xcost)
693 self.assertEqual(self.ts[1].resmodel[2].max_read_length, 0)
694 self.assertEqual(self.ts[1].resmodel[2].max_write_length, 17 + xcost)
695
696 def test_sem_lock_and_sched_saw(self):
697 self.oheads(sched=True)
698 self.no_reads()
699 self.init_susp()
700 self.assertIs(locking.charge_semaphore_overheads(self.o, True, True, self.ts), self.ts)
701 self.unchanged_period()
702 self.unchanged_deadline()
703 self.not_lossy_no_reads()
704
705 ecost = 2 * 17 + 2 * 19 + 3 + 5 + 3 * 27 + 3 * 31 + 2 * 41
706 xcost = 2 * 27 + 2 * 31 + 17 + 19 + 5 + 23
707 esusp = 23
708
709 self.assertEqual(self.ts[0].cost, 10000 + 6 * ecost)
710 self.assertEqual(self.ts[1].cost, 5000 + 4 * ecost)
711
712 self.assertEqual(self.ts[0].suspended, 6 * esusp)
713 self.assertEqual(self.ts[1].suspended, 4 * esusp)
714
715 self.assertEqual(self.ts[0].resmodel[0].max_read_length, 0)
716 self.assertEqual(self.ts[0].resmodel[0].max_write_length, 11 + xcost)
717 self.assertEqual(self.ts[1].resmodel[0].max_read_length, 0)
718 self.assertEqual(self.ts[1].resmodel[0].max_write_length, 17 + xcost)
719
720 self.assertEqual(self.ts[0].resmodel[1].max_read_length, 0)
721 self.assertEqual(self.ts[0].resmodel[1].max_write_length, 11 + xcost)
722 self.assertEqual(self.ts[1].resmodel[1].max_read_length, 0)
723 self.assertEqual(self.ts[1].resmodel[1].max_write_length, 17 + xcost)
724
725 self.assertEqual(self.ts[0].resmodel[2].max_read_length, 0)
726 self.assertEqual(self.ts[0].resmodel[2].max_write_length, 11 + xcost)
727 self.assertEqual(self.ts[1].resmodel[2].max_read_length, 0)
728 self.assertEqual(self.ts[1].resmodel[2].max_write_length, 17 + xcost)
729
730 def test_sem_lock_and_sched_np(self):
731 self.oheads(sched=True)
732 self.no_reads()
733 self.assertIs(locking.charge_semaphore_overheads(self.o, False, False, self.ts), self.ts)
734 self.unchanged_period()
735 self.unchanged_deadline()
736 self.not_lossy_no_reads()
737
738 ecost = 2 * 17 + 2 * 19 + 3 + 5 + 3 * 27 + 3 * 31 + 2 * 41 + 23
739 xcost = 1 * 27 + 1 * 31 + 17 + 19 + 5 + 23
740 xcost_local = xcost + 27 + 31 # additional scheduler invocation
741
742 self.assertEqual(self.ts[0].cost, 10000 + 6 * ecost)
743 self.assertEqual(self.ts[1].cost, 5000 + 4 * ecost)
744
745 self.assertEqual(self.ts[0].resmodel[0].max_read_length, 0)
746 self.assertEqual(self.ts[0].resmodel[0].max_write_length, 11 + xcost)
747 self.assertEqual(self.ts[0].resmodel[0].max_write_length_local, 11 + xcost_local)
748 self.assertEqual(self.ts[1].resmodel[0].max_read_length, 0)
749 self.assertEqual(self.ts[1].resmodel[0].max_write_length, 17 + xcost)
750 self.assertEqual(self.ts[1].resmodel[0].max_write_length_local, 17 + xcost_local)
751
752 self.assertEqual(self.ts[0].resmodel[1].max_read_length, 0)
753 self.assertEqual(self.ts[0].resmodel[1].max_write_length, 11 + xcost)
754 self.assertEqual(self.ts[0].resmodel[1].max_write_length_local, 11 + xcost_local)
755 self.assertEqual(self.ts[1].resmodel[1].max_read_length, 0)
756 self.assertEqual(self.ts[1].resmodel[1].max_write_length, 17 + xcost)
757 self.assertEqual(self.ts[0].resmodel[1].max_write_length_local, 11 + xcost_local)
758
759 self.assertEqual(self.ts[0].resmodel[2].max_read_length, 0)
760 self.assertEqual(self.ts[0].resmodel[2].max_write_length, 11 + xcost)
761 self.assertEqual(self.ts[0].resmodel[2].max_write_length_local, 11 + xcost_local)
762 self.assertEqual(self.ts[1].resmodel[2].max_read_length, 0)
763 self.assertEqual(self.ts[1].resmodel[2].max_write_length, 17 + xcost)
764 self.assertEqual(self.ts[0].resmodel[2].max_write_length_local, 11 + xcost_local)
765
766 def test_dpcp_none(self):
767 self.assertIs(locking.charge_dpcp_overheads(None, self.ts), self.ts)
768 self.unchanged_period()
769 self.unchanged_deadline()
770 self.unchanged_cost()
771 self.not_lossy()
772 self.unchanged_resmodel()
773
774 def test_dpcp_zero(self):
775 self.no_reads()
776 self.init_susp()
777 self.assertIs(locking.charge_dpcp_overheads(self.o, self.ts), self.ts)
778 self.unchanged_period()
779 self.unchanged_deadline()
780 self.unchanged_cost()
781 self.not_lossy_no_reads()
782 self.unchanged_resmodel_no_reads()
783
784 def test_dpcp_infeasible(self):
785 self.no_reads()
786 self.init_susp()
787 self.o.syscall_in = const(10000000)
788 self.assertIs(locking.charge_dpcp_overheads(self.o, self.ts), False)
789
790 def test_dpcp_integral(self):
791 self.no_reads()
792 self.init_susp()
793 self.o.lock = const(1.75)
794 self.assertIs(locking.charge_dpcp_overheads(self.o, self.ts), self.ts)
795 self.assertEqual(self.ts[0].resmodel[0].max_write_length, 11 + 2)
796
797 def test_dpcp_lock_only(self):
798 self.oheads()
799 self.no_reads()
800 self.init_susp()
801 self.assertIs(locking.charge_dpcp_overheads(self.o, self.ts), self.ts)
802 self.unchanged_period()
803 self.unchanged_deadline()
804 self.not_lossy_no_reads()
805
806 ecost = 17 + 19
807 xcost = 17 + 19 + 3 + 5
808 esusp = 0 + xcost
809
810 self.assertEqual(self.ts[0].cost, 10000 + 6 * ecost)
811 self.assertEqual(self.ts[1].cost, 5000 + 4 * ecost)
812
813 self.assertEqual(self.ts[0].suspended, 6 * esusp)
814 self.assertEqual(self.ts[1].suspended, 4 * esusp)
815
816 self.assertEqual(self.ts[0].resmodel[0].max_read_length, 0)
817 self.assertEqual(self.ts[0].resmodel[0].max_write_length, 11 + xcost)
818 self.assertEqual(self.ts[1].resmodel[0].max_read_length, 0)
819 self.assertEqual(self.ts[1].resmodel[0].max_write_length, 17 + xcost)
820
821 self.assertEqual(self.ts[0].resmodel[1].max_read_length, 0)
822 self.assertEqual(self.ts[0].resmodel[1].max_write_length, 11 + xcost)
823 self.assertEqual(self.ts[1].resmodel[1].max_read_length, 0)
824 self.assertEqual(self.ts[1].resmodel[1].max_write_length, 17 + xcost)
825
826 self.assertEqual(self.ts[0].resmodel[2].max_read_length, 0)
827 self.assertEqual(self.ts[0].resmodel[2].max_write_length, 11 + xcost)
828 self.assertEqual(self.ts[1].resmodel[2].max_read_length, 0)
829 self.assertEqual(self.ts[1].resmodel[2].max_write_length, 17 + xcost)
830
831 def test_dpcp_and_sched(self):
832 self.oheads(sched=True)
833 self.no_reads()
834 self.init_susp()
835 self.assertIs(locking.charge_dpcp_overheads(self.o, self.ts), self.ts)
836 self.unchanged_period()
837 self.unchanged_deadline()
838 self.not_lossy_no_reads()
839
840 ecost = 17 + 19 + 2 * (27 + 31 + 41)
841 xcost = 17 + 19 + 3 + 5 + 3 * (27 + 31)
842 esusp = 2 * 23 + xcost
843
844 self.assertEqual(self.ts[0].cost, 10000 + 6 * ecost)
845 self.assertEqual(self.ts[1].cost, 5000 + 4 * ecost)
846
847 self.assertEqual(self.ts[0].suspended, 6 * esusp)
848 self.assertEqual(self.ts[1].suspended, 4 * esusp)
849
850 self.assertEqual(self.ts[0].resmodel[0].max_read_length, 0)
851 self.assertEqual(self.ts[0].resmodel[0].max_write_length, 11 + xcost)
852 self.assertEqual(self.ts[1].resmodel[0].max_read_length, 0)
853 self.assertEqual(self.ts[1].resmodel[0].max_write_length, 17 + xcost)
854
855 self.assertEqual(self.ts[0].resmodel[1].max_read_length, 0)
856 self.assertEqual(self.ts[0].resmodel[1].max_write_length, 11 + xcost)
857 self.assertEqual(self.ts[1].resmodel[1].max_read_length, 0)
858 self.assertEqual(self.ts[1].resmodel[1].max_write_length, 17 + xcost)
859
860 self.assertEqual(self.ts[0].resmodel[2].max_read_length, 0)
861 self.assertEqual(self.ts[0].resmodel[2].max_write_length, 11 + xcost)
862 self.assertEqual(self.ts[1].resmodel[2].max_read_length, 0)
863 self.assertEqual(self.ts[1].resmodel[2].max_write_length, 17 + xcost)
diff --git a/tests/pfair.py b/tests/pfair.py
new file mode 100644
index 0000000..1fa6786
--- /dev/null
+++ b/tests/pfair.py
@@ -0,0 +1,48 @@
1from __future__ import division
2
3import unittest
4
5from fractions import Fraction
6
7import schedcat.sched.pfair as p
8import schedcat.model.tasks as tasks
9
10class Pfair(unittest.TestCase):
11 def setUp(self):
12 self.ts = tasks.TaskSystem([
13 tasks.SporadicTask(80, 100),
14 tasks.SporadicTask(33, 66),
15 tasks.SporadicTask(7, 10),
16 ])
17
18 def test_bound(self):
19 self.assertTrue(p.is_schedulable(2, self.ts))
20 self.assertFalse(p.is_schedulable(1, self.ts))
21
22 def test_deadlines(self):
23 self.ts[0].deadline = 300
24 self.ts[2].deadline = 11
25
26 self.assertTrue(p.is_schedulable(2, self.ts))
27 self.assertFalse(p.is_schedulable(1, self.ts))
28
29 self.ts[1].deadline = 50
30
31 self.assertFalse(p.is_schedulable(2, self.ts))
32 self.assertFalse(p.is_schedulable(1, self.ts))
33
34 self.assertTrue(p.has_bounded_tardiness(2, self.ts))
35 self.assertFalse(p.has_bounded_tardiness(1, self.ts))
36
37 def test_tardiness(self):
38 self.ts[0].deadline = 300
39 self.ts[1].deadline = 50
40 self.ts[2].deadline = 11
41
42 self.assertTrue(p.bound_response_times(2, self.ts))
43
44 self.assertEqual(self.ts[0].tardiness(), 0)
45 self.assertEqual(self.ts[1].tardiness(), 16)
46 self.assertEqual(self.ts[2].tardiness(), 0)
47
48 self.assertFalse(p.bound_response_times(1, self.ts))
diff --git a/tests/quanta.py b/tests/quanta.py
new file mode 100644
index 0000000..8b2e7db
--- /dev/null
+++ b/tests/quanta.py
@@ -0,0 +1,105 @@
1from __future__ import division
2
3import unittest
4
5from fractions import Fraction
6
7import schedcat.overheads.quanta as q
8import schedcat.model.tasks as tasks
9
10from schedcat.util.math import is_integral
11
12class Overheads(unittest.TestCase):
13 def setUp(self):
14 self.ts = tasks.TaskSystem([
15 tasks.SporadicTask(100, 1000),
16 tasks.SporadicTask(39, 1050),
17 tasks.SporadicTask(51, 599),
18 ])
19 self.qlen = 50
20
21 def test_wcet(self):
22 q.quantize_wcet(self.qlen, self.ts)
23 self.assertEqual(self.ts[0].cost, 100)
24 self.assertEqual(self.ts[1].cost, 50)
25 self.assertEqual(self.ts[2].cost, 100)
26
27 self.assertTrue(is_integral(self.ts[0].cost))
28 self.assertTrue(is_integral(self.ts[1].cost))
29 self.assertTrue(is_integral(self.ts[2].cost))
30
31 self.assertEqual(self.ts[0].period, 1000)
32 self.assertEqual(self.ts[1].period, 1050)
33 self.assertEqual(self.ts[2].period, 599)
34
35 self.assertEqual(self.ts[0].deadline, 1000)
36 self.assertEqual(self.ts[1].deadline, 1050)
37 self.assertEqual(self.ts[2].deadline, 599)
38
39 def test_ewcet(self):
40 q.quantize_wcet(self.qlen, self.ts, effective_qlen=25)
41 self.assertEqual(self.ts[0].cost, 200)
42 self.assertEqual(self.ts[1].cost, 100)
43 self.assertEqual(self.ts[2].cost, 150)
44
45 self.assertTrue(is_integral(self.ts[0].cost))
46 self.assertTrue(is_integral(self.ts[1].cost))
47 self.assertTrue(is_integral(self.ts[2].cost))
48
49 self.assertEqual(self.ts[0].period, 1000)
50 self.assertEqual(self.ts[1].period, 1050)
51 self.assertEqual(self.ts[2].period, 599)
52
53 self.assertEqual(self.ts[0].deadline, 1000)
54 self.assertEqual(self.ts[1].deadline, 1050)
55 self.assertEqual(self.ts[2].deadline, 599)
56
57 def test_period(self):
58 q.quantize_period(self.qlen, self.ts)
59 self.assertEqual(self.ts[0].cost, 100)
60 self.assertEqual(self.ts[1].cost, 39)
61 self.assertEqual(self.ts[2].cost, 51)
62
63 self.assertTrue(is_integral(self.ts[0].period))
64 self.assertTrue(is_integral(self.ts[1].period))
65 self.assertTrue(is_integral(self.ts[2].period))
66
67 self.assertEqual(self.ts[0].period, 1000)
68 self.assertEqual(self.ts[1].period, 1050)
69 self.assertEqual(self.ts[2].period, 550)
70
71 self.assertEqual(self.ts[0].deadline, 1000)
72 self.assertEqual(self.ts[1].deadline, 1050)
73 self.assertEqual(self.ts[2].deadline, 599)
74
75 def test_release_delay(self):
76 q.account_for_delayed_release(101, self.ts)
77 q.quantize_period(self.qlen, self.ts)
78 self.assertEqual(self.ts[0].cost, 100)
79 self.assertEqual(self.ts[1].cost, 39)
80 self.assertEqual(self.ts[2].cost, 51)
81
82 self.assertTrue(is_integral(self.ts[0].period))
83 self.assertTrue(is_integral(self.ts[1].period))
84 self.assertTrue(is_integral(self.ts[2].period))
85
86 self.assertEqual(self.ts[0].period, 850)
87 self.assertEqual(self.ts[1].period, 900)
88 self.assertEqual(self.ts[2].period, 450)
89
90 def test_staggering(self):
91 q.account_for_staggering(self.qlen, 4, self.ts)
92
93 self.assertAlmostEqual(self.ts[0].period, 1000 - 37.5)
94 self.assertAlmostEqual(self.ts[1].period, 1050 - 37.5)
95 self.assertAlmostEqual(self.ts[2].period, 599 - 37.5)
96
97 self.assertEqual(self.ts[0].cost, 100)
98 self.assertEqual(self.ts[1].cost, 39)
99 self.assertEqual(self.ts[2].cost, 51)
100
101 q.quantize_period(self.qlen, self.ts)
102
103 self.assertEqual(self.ts[0].period, 950)
104 self.assertEqual(self.ts[1].period, 1000)
105 self.assertEqual(self.ts[2].period, 550)
diff --git a/tests/sim.py b/tests/sim.py
new file mode 100644
index 0000000..9137d1c
--- /dev/null
+++ b/tests/sim.py
@@ -0,0 +1,26 @@
1from __future__ import division
2
3import unittest
4
5import schedcat.sim.edf as edf
6import schedcat.model.tasks as tasks
7
8from schedcat.util.math import is_integral
9
10class EDFSimulator(unittest.TestCase):
11 def setUp(self):
12 self.ts = tasks.TaskSystem([
13 tasks.SporadicTask(2, 3),
14 tasks.SporadicTask(2, 3),
15 tasks.SporadicTask(2, 3),
16 ])
17
18 def test_deadline_miss(self):
19 self.assertTrue(edf.is_deadline_missed(1, self.ts))
20 self.assertTrue(edf.is_deadline_missed(2, self.ts))
21 self.assertFalse(edf.is_deadline_missed(3, self.ts, simulation_length=1))
22
23 def test_deadline_miss_time(self):
24 self.assertEqual(edf.time_of_first_miss(1, self.ts), 3)
25 self.assertEqual(edf.time_of_first_miss(2, self.ts), 3)
26 self.assertEqual(edf.time_of_first_miss(3, self.ts, simulation_length=1), 0)
diff --git a/tests/util.py b/tests/util.py
new file mode 100644
index 0000000..04a0738
--- /dev/null
+++ b/tests/util.py
@@ -0,0 +1,101 @@
1from __future__ import division
2
3import unittest
4
5from fractions import Fraction
6
7import schedcat.util.iter as iter
8import schedcat.util.math as m
9
10class Iters(unittest.TestCase):
11 def setUp(self):
12 self.s1 = xrange(1, 1000, 3)
13 self.s2 = xrange(4, 1000, 5)
14 self.s3 = [-3, 6000]
15 self.s1b = xrange(1, 1000, 3)
16 self.s1c = xrange(1, 1000, 3)
17
18 def test_imerge(self):
19 s = iter.imerge(lambda x, y: x < y, self.s1, self.s2, self.s3)
20 self.assertEqual(list(s)[:10],
21 [-3, 1, 4, 4, 7, 9, 10, 13, 14, 16])
22
23 def test_imerge2(self):
24 a = range(10)
25 b = range(1, 6)
26 c = range(3, 14)
27 a.reverse()
28 b.reverse()
29 c.reverse()
30 self.assertEqual(list(iter.imerge(lambda a,b: a >= b, a, b, c)),
31 [13, 12,11, 10,
32 9, 9, 8, 8, 7, 7, 6, 6,
33 5, 5, 5, 4, 4, 4, 3, 3, 3,
34 2, 2, 1, 1,
35 0])
36
37 def test_uniq(self):
38 s = iter.uniq(iter.imerge(lambda x, y: x < y, self.s1, self.s2, self.s3))
39 self.assertEqual(list(s)[:10],
40 [-3, 1, 4, 7, 9, 10, 13, 14, 16, 19])
41
42
43class Math(unittest.TestCase):
44 def test_integral(self):
45 self.assertTrue(m.is_integral(int(1)))
46 self.assertTrue(m.is_integral(long(1)))
47 self.assertFalse(m.is_integral("foo"))
48 self.assertFalse(m.is_integral(1.0))
49 self.assertFalse(m.is_integral(20 / 1))
50 self.assertFalse(m.is_integral(Fraction(100, 10)))
51
52 def test_gcd(self):
53 self.assertEqual(m.gcd(10, 3), 1)
54 self.assertEqual(m.gcd(10, 2), 2)
55 self.assertEqual(m.gcd(15, 27), 3)
56 self.assertEqual(m.gcd(-10, 2), 2)
57 self.assertEqual(m.gcd(-20, -1930), 10)
58 self.assertEqual(m.gcd(10, 0), 10)
59 self.assertEqual(m.gcd(0, 10), 10)
60 self.assertEqual(m.gcd(10, 20), 10)
61 self.assertEqual(m.gcd(113, 17), 1)
62 self.assertEqual(m.gcd(-23, 17), 1)
63 self.assertEqual(m.gcd(-23, -54), 1)
64
65 def test_lcm(self):
66 self.assertEqual(m.lcm(), 0)
67 self.assertEqual(m.lcm(99), 99)
68 self.assertEqual(m.lcm(10, 20, 3), 60)
69 self.assertEqual(m.lcm(10, 20), 20)
70 self.assertEqual(m.lcm(3, 4), 12)
71
72 def test_topsum(self):
73 vals = [30, 60, 10, 40, 50, 20]
74 self.assertEqual(m.topsum(vals, lambda x: x * 2, 3), 2 * (40 + 50 + 60))
75 self.assertEqual(m.lcm(99), 99)
76 self.assertEqual(m.lcm(10, 20, 3), 60)
77
78
79class LinEqs(unittest.TestCase):
80 def setUp(self):
81 self.f = m.lin(1, 3)
82 self.c = m.const(123)
83 self.pwlin = m.monotonic_pwlin([(0, 1), (1, 0), (1, 4), (2, 5)])
84
85 def test_const(self):
86 for x in xrange(1000):
87 self.assertAlmostEqual(self.c(x), 123)
88
89 def test_lin(self):
90 for x in xrange(1000):
91 self.assertAlmostEqual(self.f(x), 1 + x * 3.0)
92
93 def test_pwlin(self):
94 for x in xrange(1000):
95 self.assertAlmostEqual(self.pwlin(-x), 1)
96 self.assertAlmostEqual(self.pwlin(1), 1)
97 for x in xrange(1000):
98 x = x + 2
99 self.assertAlmostEqual(self.pwlin(x), x + 3)
100
101