aboutsummaryrefslogtreecommitdiffstats
path: root/schedcat/overheads/locking.py
blob: 8dce8e25dfcc69b383e6e7ac3efedf063cd29af0 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
from __future__ import division

from math import ceil

# All overhead accounting in this file assumes absence of any interrupts.

def charge_spinlock_overheads(oheads, tasks):
    if oheads is None or not tasks:
        return tasks

    ntasks = len(tasks)
    # the individual charges
    rcost  = oheads.read_lock(ntasks) + oheads.read_unlock(ntasks)
    wcost  = oheads.lock(ntasks) + oheads.unlock(ntasks)
    scost = oheads.syscall_in(ntasks) + oheads.syscall_out(ntasks)

    # inflate each request and each task's exec cost
    for t in tasks:
        extra_wcet = 0

        for res_id in t.resmodel:
            req = t.resmodel[res_id]
            if req.max_reads:
                req.max_read_length += rcost
                req.max_read_length = int(ceil(req.max_read_length))
                extra_wcet += req.max_reads * rcost

            if req.max_writes:
                req.max_write_length += wcost
                req.max_write_length = int(ceil(req.max_write_length))
                extra_wcet += req.max_writes * wcost
            extra_wcet += req.max_requests * scost

        t.cost    += int(ceil(extra_wcet))
        if t.density() > 1:
            return False
    return tasks

# for shared-memory semaphore protocols such as MPCP, FMLP, OMLP, etc.
def charge_semaphore_overheads(oheads, preemptive, suspension_aware, tasks):
    if oheads is None or not tasks:
        return tasks

    ntasks = len(tasks)
    lock   = oheads.lock(ntasks)
    unlock = oheads.unlock(ntasks)
    sysin  = oheads.syscall_in(ntasks)
    sysout = oheads.syscall_out(ntasks)
    sched  = oheads.schedule(ntasks) + oheads.ctx_switch(ntasks)
    cpmd   = oheads.cache_affinity_loss(ntasks)
    ipi    = oheads.ipi_latency(ntasks)

    # per-request execution cost increase (equ 7.3)
    # 3 sched: wait + resume + yield
    exec_increase = 3 * sched + \
        2 * sysin  + \
        2 * sysout + \
        1 *   lock + \
        1 * unlock + \
        2 * cpmd

    # delay to be woken up
    if suspension_aware:
        susp_increase = ipi
    else:
        # s-oblivious: count IPI as execution time
        susp_increase = 0
        exec_increase += ipi

    # For non-preemptive protocols, this is the remote case.
    # Additional local costs are charged separately.
    # This only affects the FMLP+, the partitioned OMLP, and the
    # clustered OMLP.
    cs_increase = ipi + sched + sysout + sysin + unlock

    # preemptive protocols, add in additional scheduling cost
    if preemptive:
        cs_increase += sched
    else:
        # non-preemptive semaphore: add additional delay to local cost
        cs_increase_local = cs_increase + sched

    # inflate each request and each task's exec cost
    for t in tasks:
        extra_wcet = 0
        extra_susp = 0

        for res_id in t.resmodel:
            req = t.resmodel[res_id]
            assert req.max_reads == 0 # doesn't handle RW at the moment

            if req.max_writes:
                if not preemptive:
                    req.max_write_length_local = int(ceil(req.max_write_length + cs_increase_local))
                req.max_write_length += cs_increase
                req.max_write_length = int(ceil(req.max_write_length))
                extra_wcet += req.max_writes * exec_increase
                extra_susp += req.max_writes * susp_increase

        t.cost    += int(ceil(extra_wcet))
        if suspension_aware:
            t.suspended += int(ceil(extra_susp))
        if t.density() > 1:
            return False
    return tasks

def charge_dpcp_overheads(oheads, tasks):
    if oheads is None or not tasks:
        return tasks

    ntasks = len(tasks)
    lock   = oheads.lock(ntasks)
    unlock = oheads.unlock(ntasks)
    sysin  = oheads.syscall_in(ntasks)
    sysout = oheads.syscall_out(ntasks)
    sched  = oheads.schedule(ntasks) + oheads.ctx_switch(ntasks)
    cpmd   = oheads.cache_affinity_loss(ntasks)
    ipi    = oheads.ipi_latency(ntasks)


    exec_increase = sysin + sysout + 2 * sched + 2 * cpmd
    cs_increase   = 3 * sched + sysin + sysout + lock + unlock
    susp_increase = 2 * ipi   + cs_increase

    # inflate each request and each task's exec cost
    for t in tasks:
        extra_wcet = 0
        extra_susp = 0

        for res_id in t.resmodel:
            req = t.resmodel[res_id]
            assert req.max_reads == 0 # DPCP doesn't handle RW

            if req.max_writes:
                req.max_write_length += cs_increase
                req.max_write_length = int(ceil(req.max_write_length))
                extra_wcet += req.max_writes * exec_increase
                extra_susp += req.max_writes * susp_increase

        t.cost    += int(ceil(extra_wcet))
        t.suspended += int(ceil(extra_susp))
        if t.density() > 1:
            return False
    return tasks