aboutsummaryrefslogtreecommitdiffstats
path: root/schedcat/locking/linprog/common.py
blob: 26a8a319267c9ae5c3ed52c5afbf4ab193f0bc53 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
from __future__ import division
from collections import defaultdict

class VariableNameCache(dict):
    def __init__(self, basename):
        self.basename = basename

    def __missing__(self, key):
        # unpack key tuple
        task_id, resource_id, request_num = key
        var = '%s_%d_%d_%d' % (self.basename, task_id, resource_id, request_num)
        self[key] = var
        return var

    def __call__(self, task_id, resource_id, request_num):
        return self[(task_id, resource_id, request_num)]

var_direct   = VariableNameCache('XD')
var_indirect = VariableNameCache('XI')
var_preempt  = VariableNameCache('XP')

def max_num_requests(tx, ti, res_id):
    return tx.maxjobs(ti.response_time) * tx.resmodel[res_id].max_requests

def enumerate_requests(tx, ti, res_id):
    if res_id in tx.resmodel:
        return xrange(max_num_requests(tx, ti, res_id))
    else:
        return []

def find_per_cluster_resources(resource_mapping, taskset=None):
    per_cluster_resources = defaultdict(set)

    if taskset is None:
        # default: count all resources
        for res_id in resource_mapping:
            c = resource_mapping[res_id]
            per_cluster_resources[c].add(res_id)
    else:
        # only count resources accessed by tasks
        for tx in taskset:
            for res_id in tx.resmodel:
                c = resource_mapping[res_id]
                per_cluster_resources[c].add(res_id)

    return per_cluster_resources

def set_blocking_objective(resource_mapping,
                           taskset, ti, linprog):
    objective = []
    local     = []
    remote    = []

    # direct blocking
    for tx in taskset.without(ti):
        for res_id in tx.resmodel:
            is_local = resource_mapping[res_id] == ti.partition
            for req_num in enumerate_requests(tx, ti, res_id):
                xd = var_direct(tx.id, res_id, req_num)
                coeff = tx.resmodel[res_id].max_length
                objective.append( (coeff, xd) )
                if is_local:
                    local.append( (coeff, xd) )
                else:
                    remote.append( (coeff, xd) )

    # indirect blocking
    for tx in taskset.without(ti):
        for res_id in tx.resmodel:
            is_local = resource_mapping[res_id] == ti.partition
            # compute adjusted indirect delay coefficient
            on_cluster = resource_mapping[res_id]
            coeff = tx.resmodel[res_id].max_length
            for req_num in enumerate_requests(tx, ti, res_id):
                xi = var_indirect(tx.id, res_id, req_num)
                objective.append( (coeff, xi) )
                if is_local:
                    local.append( (coeff, xi) )
                else:
                    remote.append( (coeff, xi) )

    # preemption blocking
    for tx in taskset.without(ti):
        for res_id in tx.resmodel:
            is_local = resource_mapping[res_id] == ti.partition
            for req_num in enumerate_requests(tx, ti, res_id):
                xp = var_preempt(tx.id, res_id, req_num)
                coeff = tx.resmodel[res_id].max_length
                objective.append( (coeff, xp) )
                if is_local:
                    local.append( (coeff, xp) )
                else:
                    remote.append( (coeff, xp) )

    linprog.set_objective(objective)
    linprog.local_objective = local
    linprog.remote_objective = remote

def add_topology_constraints(resource_mapping,
                             taskset, ti, linprog):
    """Constraint 2: no remote preemption blocking
    """

    vector = []

    for tx in taskset.without(ti):
        for res_id in tx.resmodel:
            if resource_mapping[res_id] != ti.partition:
                for req_num in enumerate_requests(tx, ti, res_id):
                    xp = var_preempt(tx.id, res_id, req_num)
                    vector.append( (1, xp) )

    if vector:
        linprog.add_equality(vector, 0)

def add_var_mutex_constraints(taskset, ti, linprog):
    """Constraint 1: the three kinds of blocking a mutually exclusive
    """
    for tx in taskset.without(ti):
        for res_id in tx.resmodel:
            for req_num in enumerate_requests(tx, ti, res_id):
                xd = var_direct(tx.id, res_id, req_num)
                xi = var_indirect(tx.id, res_id, req_num)
                xp = var_preempt(tx.id, res_id, req_num)
                linprog.inequality(1, xd, 1, xi, 1, xp, at_most=1)

def find_per_resource_tasks(taskset):
    per_resource_tasks = defaultdict(set)
    for tx in taskset:
        for res_id in tx.resmodel:
            per_resource_tasks[res_id].add(tx)
    return per_resource_tasks


def example():
    from schedcat.util.linprog import LinearProgram
    from schedcat.model.tasks import SporadicTask, TaskSystem
    from schedcat.model.resources import initialize_resource_model
    import schedcat.util.linprog

    t1 = SporadicTask(10, 100)
    t2 = SporadicTask(25, 200)
    t3 = SporadicTask(33, 33)
    ts = TaskSystem([t1, t2, t3])

    ts.assign_ids()
    initialize_resource_model(ts)

    t1.resmodel[0].add_request(1)
    t2.resmodel[0].add_request(2)
    t3.resmodel[0].add_request(3)

    for t in ts:
        t.response_time = t.period
        t.partition =  t.id % 2

    # only one resource, assigned to the first processor
    resource_locality = { 0: 0 }

    lp = LinearProgram()
    # Configure blocking objective.
    set_blocking_objective(resource_locality, ts, t1, lp)

    print lp

    print '*' * 80
    print 'Adding mutex constraints:'
    add_var_mutex_constraints(ts, t1, lp)
    print lp

    print '*' * 80
    print 'Adding toplogy constraints:'
    add_topology_constraints(resource_locality, ts, t1, lp)
    print lp

    from .dflp import get_lp_for_task as get_dflp_lp
    from .dpcp import get_lp_for_task as get_dpcp_lp

    print '*' * 80
    print 'DFLP LP:'
    lp = get_dflp_lp(resource_locality, ts, t1)
    print lp

    lp.kill_non_positive_vars()
    print 'DFLP LP (simplified)'
    print lp

    if schedcat.util.linprog.cplex_available:
        sol = lp.solve()

        print 'Solution: local=%d remote=%d' % \
            (sol.evaluate(lp.local_objective), sol.evaluate(lp.remote_objective))
        for x in sol:
            print x, '->', sol[x]

    print '*' * 80
    print 'DPCP LP:'
    lp = get_dpcp_lp(resource_locality, ts, t1)
    print lp

    lp.kill_non_positive_vars()
    print 'DPCP LP (simplified)'
    print lp

    if schedcat.util.linprog.cplex_available:
        sol = lp.solve()

        print 'Solution: local=%d remote=%d' % \
            (sol.evaluate(lp.local_objective), sol.evaluate(lp.remote_objective))
        for x in sol:
            print x, '->', sol[x]


if __name__ == '__main__':
    example()