aboutsummaryrefslogtreecommitdiffstats
path: root/ecrts14/ecrts14.py
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2014-01-31 21:52:27 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2014-01-31 21:52:27 -0500
commit14782cc1d7344f002415d5b54db0f8e091ade4d3 (patch)
treeea09b4c310ebcb762c71c0e795ced064c3ca4e0f /ecrts14/ecrts14.py
parent4cb266503cb7f6f5b7504b22451911c54bcc1f99 (diff)
Support for raw read/write overheads.
We need information about the time it takes to actually read and write data when we generate task sets for actual execution. This patch adds these overheads and support for them.
Diffstat (limited to 'ecrts14/ecrts14.py')
-rwxr-xr-xecrts14/ecrts14.py21
1 files changed, 20 insertions, 1 deletions
diff --git a/ecrts14/ecrts14.py b/ecrts14/ecrts14.py
index 9f55ea8..bdf9f5e 100755
--- a/ecrts14/ecrts14.py
+++ b/ecrts14/ecrts14.py
@@ -24,7 +24,7 @@ from pprint import pprint
24import traceback 24import traceback
25 25
26from schedcat.model.tasks import SporadicTask, TaskSystem 26from schedcat.model.tasks import SporadicTask, TaskSystem
27from schedcat.overheads.model import Overheads, CacheDelay, ConsumerOverheads, ProducerOverheads 27from schedcat.overheads.model import Overheads, CacheDelay, ConsumerOverheads, ProducerOverheads, RawOverheads
28 28
29import schedcat.model.resources as resources 29import schedcat.model.resources as resources
30import schedcat.generator.tasks as tasks 30import schedcat.generator.tasks as tasks
@@ -245,6 +245,23 @@ def get_ovh_dir():
245 ovh_dir = os.path.join(parent, 'overheads') 245 ovh_dir = os.path.join(parent, 'overheads')
246 return ovh_dir 246 return ovh_dir
247 247
248def get_read_overheads(dp, _system):
249 # always assume no polluters
250 polluters = 'False'
251 read_file = '%s/read/dro_hot_host=%s_lvl=mem_polluters=%s_walk=%s_hpages=%s_upages=%s_type=%s.csv' % (dp.host, dp.host, polluters, dp.walk, str(dp.huge_pages), str(dp.uncached), dp.ovh_type)
252 read_file = os.path.join(get_ovh_dir(), read_file)
253 # reuse the CPMD class
254 ro = RawOverheads.from_file(read_file)
255 return ro
256
257def get_write_overheads(dp, _system):
258 # always assume no polluters
259 polluters = 'False'
260 write_file = '%s/write/dwo_cold_host=%s_lvl=mem_polluters=%s_walk=%s_hpages=%s_upages=%s_type=%s.csv' % (dp.host, dp.host, polluters, dp.walk, str(dp.huge_pages), str(dp.uncached), dp.ovh_type)
261 write_file = os.path.join(get_ovh_dir(), write_file)
262 wo = RawOverheads.from_file(write_file)
263 return wo
264
248def get_consumer_overheads(dp, _system): 265def get_consumer_overheads(dp, _system):
249 co_file = '%s/consumer/dco_host=%s_lvl=mem_polluters=%s_walk=%s_hpages=%s_upages=%s_type=%s.csv' % (dp.host, dp.host, str(dp.polluters), dp.walk, str(dp.huge_pages), str(dp.uncached), dp.ovh_type) 266 co_file = '%s/consumer/dco_host=%s_lvl=mem_polluters=%s_walk=%s_hpages=%s_upages=%s_type=%s.csv' % (dp.host, dp.host, str(dp.polluters), dp.walk, str(dp.huge_pages), str(dp.uncached), dp.ovh_type)
250 co_file = os.path.join(get_ovh_dir(), co_file) 267 co_file = os.path.join(get_ovh_dir(), co_file)
@@ -276,6 +293,8 @@ def get_overheads(dp, system = None):
276 ovh.cache_affinity_loss.set_max_wss(max_wss) 293 ovh.cache_affinity_loss.set_max_wss(max_wss)
277 ovh.consumer = get_consumer_overheads(dp, system) 294 ovh.consumer = get_consumer_overheads(dp, system)
278 ovh.producer = get_producer_overheads(dp) 295 ovh.producer = get_producer_overheads(dp)
296 ovh.read = get_read_overheads(dp, system)
297 ovh.write = get_write_overheads(dp, system)
279 return ovh 298 return ovh
280 299
281def process_dp(_dp): 300def process_dp(_dp):