import generator as gen import random import ecrts14.partition as partition import schedcat.sched.split_heuristic as split import ecrts14.ecrts14 as ecrts14 import ecrts14.tests as tests from ecrts14.ecrts14 import create_pgm_task_set from ecrts14.ecrts14 import get_overheads from config.config import FILES,PARAMS from schedcat.overheads.model import Overheads, CacheDelay, ConsumerOverheads, ProducerOverheads from schedcat.generator.tasksets import NAMED_UTILIZATIONS from schedcat.mapping.binpack import DidNotFit TP_TBASE = """#for $t in $task_set {} $t.cost $t.period #end for""" TP_GLOB_TASK = TP_TBASE.format("") TP_PART_TASK = TP_TBASE.format("-p $t.cpu") TP_CLST_TASK = TP_TBASE.format("-p $t.cluster -z $t.cluster_sz") PARTITION_METHOD = { 'no_cache' : tests.test_partition_no_cache, 'parallel' : tests.test_partition_parallel, 'cache_aware' : tests.test_partition_cache_aware, 'cache_aware_parallel' : tests.test_partition_cache_aware_parallel, 'cache_aware_edges' : tests.test_partition_cache_aware_edges, 'cache_aware_bfs' : tests.test_partition_cache_aware_bfs, 'cache_aware_dfs' : tests.test_partition_cache_aware_dfs, } class EdfGenerator(gen.Generator): '''Creates sporadic task sets with the most common Litmus options.''' def __init__(self, scheduler, templates, options, params): super(EdfGenerator, self).__init__(scheduler, templates, self.__make_options() + options, params) def __make_options(self): '''Return generic EDF options.''' return [gen.Generator._dist_option('utils', 'uni-medium', gen.NAMED_UTILIZATIONS, 'Task utilization distributions.'), gen.Generator._dist_option('periods', 'harmonic', gen.NAMED_PERIODS, 'Task period distributions.')] def _create_exp(self, exp_params): '''Create a single experiment with @exp_params in @out_dir.''' pdist = self._create_dist('period', exp_params['periods'], gen.NAMED_PERIODS) udist = self._create_dist('utilization', exp_params['utils'], gen.NAMED_UTILIZATIONS) ts = self._create_taskset(exp_params, pdist, udist) self._customize(ts, exp_params) self._write_schedule(dict(exp_params.items() + [('task_set', ts)])) self._write_params(exp_params) def _customize(self, taskset, exp_params): '''Configure a generated taskset with extra parameters.''' pass class EdfPgmGenerator(gen.Generator): '''Creates sporadic task sets with the most common Litmus options.''' def __init__(self, scheduler, templates, options, params): super(EdfPgmGenerator, self).__init__(scheduler, templates, self.__make_options() + options, params) def __make_options(self): '''Return generic EDF options.''' return [gen.Generator._dist_option('period', ['uni-moderate'], ecrts14.NAMED_PERIODS_US, 'Task period distributions.'), gen.Generator._dist_option('num_graphs', ['uni-medium'], ecrts14.NAMED_NUM_GRAPHS, 'Number of graphs.'), # gen.Generator._dist_option('depth_factor', ['uni-medium'], gen.Generator._dist_option('depth_factor', ['pipeline'], ecrts14.NAMED_HEIGHT_FACTORS, 'Depth of graphs.'), gen.Generator._dist_option('clustering', ['L1', 'L2', 'L3', 'ALL'], {}, 'Clustering configurations'), gen.Generator._dist_option('partitions', ['no_cache', 'parallel', 'cache_aware', 'cache_aware_parallel'], PARTITION_METHOD, 'Partition methods.'), gen.Generator._dist_option('node_placement', ['uniform'], ecrts14.NAMED_SHAPES, 'The node placement of a graph.'), # gen.Generator._dist_option('fan_out', ['uniform_3'], gen.Generator._dist_option('fan_out', ['none'], ecrts14.NAMED_FAN, 'The number of out edges of a node.'), gen.Generator._dist_option('fan_in_cap', [3], {}, 'The maximum number of in-edges of a node.'), # gen.Generator._dist_option('edge_distance', ['uniform_3'], gen.Generator._dist_option('edge_distance', ['none'], ecrts14.NAMED_EDGE_HOP, 'The number of hops for an edge.'), gen.Generator._dist_option('wss', ['uni-light', 'uni-medium', 'uni-heavy'], ecrts14.NAMED_EDGE_WSS, 'Working set size.'), gen.Generator._dist_option('task_util', ['uni-medium'], NAMED_UTILIZATIONS, 'Task utilization.'), gen.Generator._dist_option('polluters', [False], {}, 'Polluters.'), gen.Generator._dist_option('job_splitting', [True], {}, 'Job splitting.'), gen.Generator._dist_option('ovh_type', 'max', {}, 'Overhead type.'), gen.Generator._dist_option('heur_aggressiveness', 0.75, {}, 'heur_aggressiveness.'), gen.Generator._dist_option('sys_util', [18.0, 13.0, 8.0], {}, 'Task set utilization.')] def _create_exp(self, dp, ts, graphs, subts): '''Create a single experiment with @exp_params in @out_dir.''' ret, ts = self._customize(ts, graphs, subts, dp) if ret: self._write_pgm_schedule(dict(dp.items() + [('task_set', ts)] + [('graphs', graphs)] + [('sub_task_set', subts)])) self._write_params(dict(dp.items() + [('num_tasks', len(ts)), ('num_graphs', len(graphs))])) return ret def _create_tasks(self, dp): '''Create a task set.''' ts, graphs, subts = create_pgm_task_set(dp) return ts, graphs, subts def _customize(self, taskset, exp_params): '''Configure a generated taskset with extra parameters.''' pass class PartitionedGenerator(EdfGenerator): def __init__(self, scheduler, templates, options, params): super(PartitionedGenerator, self).__init__(scheduler, templates + [TP_PART_TASK], options, params) def _customize(self, taskset, exp_params): cpus = exp_params['cpus'] start = 0 if exp_params['release_master']: cpus -= 1 start = 1 # Partition using worst-fit for most even distribution utils = [0]*cpus tasks = [0]*cpus for t in taskset: t.cpu = utils.index(min(utils)) utils[t.cpu] += t.utilization() tasks[t.cpu] += 1 # Increment by one so release master has no tasks t.cpu += start class PedfGenerator(PartitionedGenerator): def __init__(self, params={}): super(PedfGenerator, self).__init__("PSN-EDF", [], [], params) class CedfGenerator(PartitionedGenerator): TP_CLUSTER = "plugins/C-EDF/cluster{$level}" CLUSTER_OPTION = gen.GenOption('level', ['L2', 'L3', 'ALL'], 'L2', 'Cache clustering level.',) def __init__(self, params={}): super(CedfGenerator, self).__init__("C-EDF", [CedfGenerator.TP_CLUSTER], [CedfGenerator.CLUSTER_OPTION], params) class CflSplitGenerator(EdfGenerator): TP_CLUSTER = "plugins/C-FL-split/cluster{$level}" CLUSTER_OPTION = gen.GenOption('level', ['L1', 'L2', 'L3', 'ALL'], 'L2', 'Cache clustering level.',) def __init__(self, params={}): super(CflSplitGenerator, self).__init__("C-FL-split", [CflSplitGenerator.TP_CLUSTER, TP_CLST_TASK], [CflSplitGenerator.CLUSTER_OPTION], params) def _customize(self, taskset, exp_params): cpus = int(exp_params['cpus']) if exp_params['level'] == 'L1': cluster_sz = 1 elif exp_params['level'] == 'L2': cluster_sz = 2 elif exp_params['level'] == 'L3': cluster_sz = 6 elif exp_params['level'] == 'ALL': cluster_sz = 24 else: assert False num_clusters = cpus / cluster_sz assert num_clusters * cluster_sz == cpus utils = [0]*num_clusters tasks = [0]*num_clusters for t in taskset: t.cluster = utils.index(min(utils)) t.cluster_sz = cluster_sz utils[t.cluster] += t.utilization() tasks[t.cluster] += 1 class GedfGenerator(EdfGenerator): def __init__(self, params={}): super(GedfGenerator, self).__init__("GSN-EDF", [TP_GLOB_TASK], [], params) class CflSplitPgmGenerator(EdfPgmGenerator): TP_CLUSTER = "plugins/C-FL-split/cluster{$level}" # CLUSTER_OPTION = gen.GenOption('level', ['L1', 'L2', 'L3', 'ALL'], 'L2', # 'Cache clustering level.',) def __init__(self, params={}): super(CflSplitPgmGenerator, self).__init__("C-FL-split", [CflSplitPgmGenerator.TP_CLUSTER], [], params) def _customize(self, ts, graphs, subts, dp): exp_params = dict(dp.items()) cpus = int(exp_params['cpus']) if exp_params['level'] == 'L1': cluster_sz = 1 elif exp_params['level'] == 'L2': cluster_sz = 2 elif exp_params['level'] == 'L3': cluster_sz = 6 elif exp_params['level'] == 'ALL': cluster_sz = 24 else: assert False exp_params['fan_in_cap'] = int(exp_params['fan_in_cap']) dp.nr_clusters = cpus / cluster_sz assert dp.nr_clusters * cluster_sz == cpus overheads = get_overheads(dp, dp.system) # do the partition here ts = partition.clear_partitioning(ts) if overheads.consumer is not None: for t in ts: overheads.consumer.place_production(t) # the test will compute job splits if it is in the desgin point. is_sched, ts = PARTITION_METHOD[exp_params['partitions']](ts, graphs, subts, dp, overheads) if exp_params['level'] == 'ALL': # kludge: assume global task sets are always schedulable is_sched = True if is_sched: # compute the minimum time to produce/consume, so this can be discounted # from the execution time during runtime for ti in ts: consume_amount = ti.wss produce_amount = sum([e.wss for e in ti.node.outEdges]) consume_time = overheads.read(consume_amount) produce_time = overheads.write(produce_amount) ti.cost_discount = consume_time + produce_time return is_sched, ts