aboutsummaryrefslogtreecommitdiffstats
path: root/native/src
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2012-05-16 13:08:36 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2012-05-16 13:08:36 -0400
commit5cb503377c18b1c099c5b1a5375ece53fe25258e (patch)
tree0e6b09d493d6aaa75645abaa7cc18bb277bab96b /native/src
parente7e500366aa7b892070bf41500c64cb3f8a98f88 (diff)
C++: Break out the task-fair RW locks code
Part of refactoring sharedres.cpp.
Diffstat (limited to 'native/src')
-rw-r--r--native/src/blocking/rw-task-fair.cpp238
-rw-r--r--native/src/sharedres.cpp233
2 files changed, 238 insertions, 233 deletions
diff --git a/native/src/blocking/rw-task-fair.cpp b/native/src/blocking/rw-task-fair.cpp
new file mode 100644
index 0000000..ea0b102
--- /dev/null
+++ b/native/src/blocking/rw-task-fair.cpp
@@ -0,0 +1,238 @@
1#include "sharedres.h"
2#include "blocking.h"
3
4#include "stl-helper.h"
5
6
7#ifdef CONFIG_USE_0X
8#include <unordered_map>
9#define hashmap std::unordered_map
10#else
11#include <ext/hash_map>
12#define hashmap __gnu_cxx::hash_map
13#endif
14
15
16
17static Interference bound_blocking_all(
18 const TaskInfo* tsk,
19 const ContentionSet& all_reqs, // presumed sorted, for all clusters/tasks
20 const unsigned int max_remote_requests, // per cluster
21 const unsigned int max_local_requests, // local cluster
22 const unsigned int max_requests, // per task
23 unsigned int max_total) // stop after counting max_total
24{
25 unsigned long interval = tsk->get_response();
26 hashmap<unsigned long, unsigned int> task_counter(512);
27 hashmap<unsigned long, unsigned int>::iterator tctr;
28 hashmap<unsigned int, unsigned int> cluster_counter(64);
29 hashmap<unsigned int, unsigned int>::iterator cctr;
30 Interference inter;
31
32 cluster_counter[tsk->get_cluster()] = max_local_requests;
33
34 foreach(all_reqs, it)
35 {
36 const RequestBound* req = *it;
37 const TaskInfo* t = req->get_task();
38 unsigned long key = (unsigned long) t;
39 unsigned int cluster = t->get_cluster();
40
41 if (!max_total)
42 // we are done
43 break;
44
45 if (t == tsk)
46 // doesn't block itself
47 continue;
48
49 // make sure we have seen this task
50 tctr = task_counter.find(key);
51 if (tctr == task_counter.end())
52 {
53 task_counter[key] = max_requests;
54 tctr = task_counter.find(key);
55 }
56
57 if (!tctr->second)
58 continue;
59
60 cctr = cluster_counter.find(cluster);
61 if (cctr == cluster_counter.end())
62 {
63 cluster_counter[cluster] = max_remote_requests;
64 cctr = cluster_counter.find(cluster);
65 }
66
67 if (!cctr->second)
68 continue;
69
70 unsigned int remaining;
71 remaining = std::min(tctr->second, cctr->second);
72 remaining = std::min(remaining, max_total);
73 unsigned int num = std::min(req->get_max_num_requests(interval), remaining);
74
75 inter.total_length += num * req->get_request_length();
76 inter.count += num;
77 cctr->second -= num;
78 tctr->second -= num;
79 max_total -= num;
80 }
81
82 return inter;
83}
84
85static Interference tf_reader_all(
86 const TaskInfo& tsk,
87 const Resources& all_reads,
88 const unsigned int num_writes,
89 const unsigned int num_wblock,
90 const unsigned int num_reads,
91 const unsigned int res_id,
92 const unsigned int procs_per_cluster)
93{
94 Interference blocking;
95 unsigned int num_reqs = num_reads + num_writes;
96 unsigned int max_reader_phases = num_wblock + num_writes;
97 unsigned int task_limit = std::min(max_reader_phases, num_reqs);
98
99 return bound_blocking_all(
100 &tsk, all_reads[res_id],
101 num_reqs * procs_per_cluster,
102 num_reqs * (procs_per_cluster - 1),
103 task_limit,
104 max_reader_phases);
105}
106
107
108BlockingBounds* task_fair_rw_bounds(const ResourceSharingInfo& info,
109 const ResourceSharingInfo& info_mtx,
110 unsigned int procs_per_cluster,
111 int dedicated_irq)
112{
113 // split everything by partition
114 Clusters clusters, clusters_mtx;
115
116 split_by_cluster(info, clusters);
117 split_by_cluster(info_mtx, clusters_mtx);
118
119 // split each partition by resource
120 ClusterResources resources, resources_mtx;
121
122 split_by_resource(clusters, resources);
123 split_by_resource(clusters_mtx, resources_mtx);
124
125 // split all by resource
126 Resources all_task_reqs, all_reads, __all_writes;
127 split_by_resource(info, all_task_reqs);
128 split_by_type(all_task_reqs, all_reads, __all_writes);
129
130 // sort each contention set by request length
131 sort_by_request_length(resources);
132 sort_by_request_length(resources_mtx);
133 sort_by_request_length(all_reads);
134
135 // split by type --- sorted order is maintained
136 ClusterResources __reads, writes;
137 split_by_type(resources, __reads, writes);
138
139
140 // We need for each task the maximum request span. We also need the
141 // maximum direct blocking from remote partitions for each request. We
142 // can determine both in one pass.
143
144 unsigned int i;
145
146 // direct blocking results
147 BlockingBounds* _results = new BlockingBounds(info);
148 BlockingBounds& results = *_results;
149
150 for (i = 0; i < info.get_tasks().size(); i++)
151 {
152 const TaskInfo& tsk = info.get_tasks()[i];
153 RWCounts rwcounts;
154
155 Interference bterm;
156
157 merge_rw_requests(tsk, rwcounts);
158
159 foreach(rwcounts, jt)
160 {
161 const RWCount& rw = *jt;
162
163 // skip placeholders
164 if (!rw.num_reads && !rw.num_writes)
165 continue;
166
167
168 // 1) treat it as a mutex as a baseline
169 Interference mtx, mtx_1;
170
171 mtx = np_fifo_per_resource(
172 tsk, resources_mtx, procs_per_cluster, rw.res_id,
173 rw.num_reads + rw.num_writes,
174 dedicated_irq);
175
176 if (rw.num_reads + rw.num_writes == 1)
177 mtx_1 = mtx;
178 else
179 mtx_1 = np_fifo_per_resource(
180 tsk, resources_mtx, procs_per_cluster,
181 rw.res_id, 1, dedicated_irq);
182
183 // The span includes our own request.
184 mtx_1.total_length += std::max(rw.wlength, rw.rlength);
185 mtx_1.count += 1;
186
187 // 2) apply real RW analysis
188 Interference wblocking, wblocking_1;
189 Interference rblocking, rblocking_r1, rblocking_w1;
190
191 wblocking = np_fifo_per_resource(
192 tsk, writes, procs_per_cluster, rw.res_id,
193 rw.num_reads + rw.num_writes,
194 dedicated_irq);
195 wblocking_1 = np_fifo_per_resource(
196 tsk, writes, procs_per_cluster, rw.res_id, 1,
197 dedicated_irq);
198
199 rblocking = tf_reader_all(
200 tsk, all_reads, rw.num_writes, wblocking.count,
201 rw.num_reads, rw.res_id, procs_per_cluster);
202
203 if (rw.num_writes)
204 {
205 // single write
206 rblocking_w1 = tf_reader_all(
207 tsk, all_reads, 1, wblocking.count,
208 0, rw.res_id, procs_per_cluster);
209 // The span includes our own request.
210 rblocking_w1.total_length += rw.wlength;
211 rblocking_w1.count += 1;
212 }
213 if (rw.num_reads)
214 {
215 // single read
216 rblocking_r1 = tf_reader_all(
217 tsk, all_reads, 0, wblocking.count,
218 1, rw.res_id, procs_per_cluster);
219 // The span includes our own request.
220 rblocking_r1.total_length += rw.rlength;
221 rblocking_r1.count += 1;
222 }
223
224 // combine
225 wblocking += rblocking;
226 wblocking_1 += std::max(rblocking_w1, rblocking_r1);
227
228 bterm += std::min(wblocking, mtx);
229 results.raise_request_span(i, std::min(wblocking_1, mtx_1));
230 }
231 results[i] = bterm;
232 }
233
234 // This is the initial delay due to priority donation.
235 charge_arrival_blocking(info, results);
236
237 return _results;
238}
diff --git a/native/src/sharedres.cpp b/native/src/sharedres.cpp
index 09d46ca..00d7932 100644
--- a/native/src/sharedres.cpp
+++ b/native/src/sharedres.cpp
@@ -12,14 +12,6 @@
12 12
13#include "stl-helper.h" 13#include "stl-helper.h"
14 14
15#ifdef CONFIG_USE_0X
16#include <unordered_map>
17#define hashmap std::unordered_map
18#else
19#include <ext/hash_map>
20#define hashmap __gnu_cxx::hash_map
21#endif
22
23#include "blocking.h" 15#include "blocking.h"
24 16
25const unsigned int UNLIMITED = UINT_MAX; 17const unsigned int UNLIMITED = UINT_MAX;
@@ -481,228 +473,3 @@ BlockingBounds* phase_fair_rw_bounds(const ResourceSharingInfo& info,
481} 473}
482 474
483 475
484static Interference bound_blocking_all(
485 const TaskInfo* tsk,
486 const ContentionSet& all_reqs, // presumed sorted, for all clusters/tasks
487 const unsigned int max_remote_requests, // per cluster
488 const unsigned int max_local_requests, // local cluster
489 const unsigned int max_requests, // per task
490 unsigned int max_total) // stop after counting max_total
491{
492 unsigned long interval = tsk->get_response();
493 hashmap<unsigned long, unsigned int> task_counter(512);
494 hashmap<unsigned long, unsigned int>::iterator tctr;
495 hashmap<unsigned int, unsigned int> cluster_counter(64);
496 hashmap<unsigned int, unsigned int>::iterator cctr;
497 Interference inter;
498
499 cluster_counter[tsk->get_cluster()] = max_local_requests;
500
501 foreach(all_reqs, it)
502 {
503 const RequestBound* req = *it;
504 const TaskInfo* t = req->get_task();
505 unsigned long key = (unsigned long) t;
506 unsigned int cluster = t->get_cluster();
507
508 if (!max_total)
509 // we are done
510 break;
511
512 if (t == tsk)
513 // doesn't block itself
514 continue;
515
516 // make sure we have seen this task
517 tctr = task_counter.find(key);
518 if (tctr == task_counter.end())
519 {
520 task_counter[key] = max_requests;
521 tctr = task_counter.find(key);
522 }
523
524 if (!tctr->second)
525 continue;
526
527 cctr = cluster_counter.find(cluster);
528 if (cctr == cluster_counter.end())
529 {
530 cluster_counter[cluster] = max_remote_requests;
531 cctr = cluster_counter.find(cluster);
532 }
533
534 if (!cctr->second)
535 continue;
536
537 unsigned int remaining;
538 remaining = std::min(tctr->second, cctr->second);
539 remaining = std::min(remaining, max_total);
540 unsigned int num = std::min(req->get_max_num_requests(interval), remaining);
541
542 inter.total_length += num * req->get_request_length();
543 inter.count += num;
544 cctr->second -= num;
545 tctr->second -= num;
546 max_total -= num;
547 }
548
549 return inter;
550}
551
552
553static Interference tf_reader_all(
554 const TaskInfo& tsk,
555 const Resources& all_reads,
556 const unsigned int num_writes,
557 const unsigned int num_wblock,
558 const unsigned int num_reads,
559 const unsigned int res_id,
560 const unsigned int procs_per_cluster)
561{
562 Interference blocking;
563 unsigned int num_reqs = num_reads + num_writes;
564 unsigned int max_reader_phases = num_wblock + num_writes;
565 unsigned int task_limit = std::min(max_reader_phases, num_reqs);
566
567 return bound_blocking_all(
568 &tsk, all_reads[res_id],
569 num_reqs * procs_per_cluster,
570 num_reqs * (procs_per_cluster - 1),
571 task_limit,
572 max_reader_phases);
573}
574
575
576BlockingBounds* task_fair_rw_bounds(const ResourceSharingInfo& info,
577 const ResourceSharingInfo& info_mtx,
578 unsigned int procs_per_cluster,
579 int dedicated_irq)
580{
581 // split everything by partition
582 Clusters clusters, clusters_mtx;
583
584 split_by_cluster(info, clusters);
585 split_by_cluster(info_mtx, clusters_mtx);
586
587 // split each partition by resource
588 ClusterResources resources, resources_mtx;
589
590 split_by_resource(clusters, resources);
591 split_by_resource(clusters_mtx, resources_mtx);
592
593 // split all by resource
594 Resources all_task_reqs, all_reads, __all_writes;
595 split_by_resource(info, all_task_reqs);
596 split_by_type(all_task_reqs, all_reads, __all_writes);
597
598 // sort each contention set by request length
599 sort_by_request_length(resources);
600 sort_by_request_length(resources_mtx);
601 sort_by_request_length(all_reads);
602
603 // split by type --- sorted order is maintained
604 ClusterResources __reads, writes;
605 split_by_type(resources, __reads, writes);
606
607
608 // We need for each task the maximum request span. We also need the
609 // maximum direct blocking from remote partitions for each request. We
610 // can determine both in one pass.
611
612 unsigned int i;
613
614 // direct blocking results
615 BlockingBounds* _results = new BlockingBounds(info);
616 BlockingBounds& results = *_results;
617
618 for (i = 0; i < info.get_tasks().size(); i++)
619 {
620 const TaskInfo& tsk = info.get_tasks()[i];
621 RWCounts rwcounts;
622
623 Interference bterm;
624
625 merge_rw_requests(tsk, rwcounts);
626
627 foreach(rwcounts, jt)
628 {
629 const RWCount& rw = *jt;
630
631 // skip placeholders
632 if (!rw.num_reads && !rw.num_writes)
633 continue;
634
635
636 // 1) treat it as a mutex as a baseline
637 Interference mtx, mtx_1;
638
639 mtx = np_fifo_per_resource(
640 tsk, resources_mtx, procs_per_cluster, rw.res_id,
641 rw.num_reads + rw.num_writes,
642 dedicated_irq);
643
644 if (rw.num_reads + rw.num_writes == 1)
645 mtx_1 = mtx;
646 else
647 mtx_1 = np_fifo_per_resource(
648 tsk, resources_mtx, procs_per_cluster,
649 rw.res_id, 1, dedicated_irq);
650
651 // The span includes our own request.
652 mtx_1.total_length += std::max(rw.wlength, rw.rlength);
653 mtx_1.count += 1;
654
655 // 2) apply real RW analysis
656 Interference wblocking, wblocking_1;
657 Interference rblocking, rblocking_r1, rblocking_w1;
658
659 wblocking = np_fifo_per_resource(
660 tsk, writes, procs_per_cluster, rw.res_id,
661 rw.num_reads + rw.num_writes,
662 dedicated_irq);
663 wblocking_1 = np_fifo_per_resource(
664 tsk, writes, procs_per_cluster, rw.res_id, 1,
665 dedicated_irq);
666
667 rblocking = tf_reader_all(
668 tsk, all_reads, rw.num_writes, wblocking.count,
669 rw.num_reads, rw.res_id, procs_per_cluster);
670
671 if (rw.num_writes)
672 {
673 // single write
674 rblocking_w1 = tf_reader_all(
675 tsk, all_reads, 1, wblocking.count,
676 0, rw.res_id, procs_per_cluster);
677 // The span includes our own request.
678 rblocking_w1.total_length += rw.wlength;
679 rblocking_w1.count += 1;
680 }
681 if (rw.num_reads)
682 {
683 // single read
684 rblocking_r1 = tf_reader_all(
685 tsk, all_reads, 0, wblocking.count,
686 1, rw.res_id, procs_per_cluster);
687 // The span includes our own request.
688 rblocking_r1.total_length += rw.rlength;
689 rblocking_r1.count += 1;
690 }
691
692 // combine
693 wblocking += rblocking;
694 wblocking_1 += std::max(rblocking_w1, rblocking_r1);
695
696 bterm += std::min(wblocking, mtx);
697 results.raise_request_span(i, std::min(wblocking_1, mtx_1));
698 }
699 results[i] = bterm;
700 }
701
702 // This is the initial delay due to priority donation.
703 charge_arrival_blocking(info, results);
704
705 return _results;
706}
707
708