aboutsummaryrefslogtreecommitdiffstats
path: root/native/src
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2012-05-16 13:04:36 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2012-05-16 13:04:36 -0400
commite7e500366aa7b892070bf41500c64cb3f8a98f88 (patch)
tree1852f6f1c53c740ee2c7c298bdd2e2477d29ae35 /native/src
parentb7373fbe338936145b9d55471dc353951cb3a81a (diff)
C++: Break out the phase-fair RW locks code
Part of refactoring sharedres.cpp.
Diffstat (limited to 'native/src')
-rw-r--r--native/src/blocking/rw-phase-fair.cpp223
-rw-r--r--native/src/sharedres.cpp252
2 files changed, 232 insertions, 243 deletions
diff --git a/native/src/blocking/rw-phase-fair.cpp b/native/src/blocking/rw-phase-fair.cpp
new file mode 100644
index 0000000..65b85b4
--- /dev/null
+++ b/native/src/blocking/rw-phase-fair.cpp
@@ -0,0 +1,223 @@
1#include "sharedres.h"
2#include "blocking.h"
3
4#include "stl-helper.h"
5
6
7static Interference pf_writer_fifo(
8 const TaskInfo& tsk, const ClusterResources& writes,
9 const unsigned int num_writes,
10 const unsigned int num_reads,
11 const unsigned int res_id,
12 const unsigned int procs_per_cluster,
13 const int dedicated_irq)
14{
15 const unsigned int per_src_wlimit = num_reads + num_writes;
16 const unsigned long interval = tsk.get_response();
17 ClusterLimits limits;
18 int idx;
19
20 limits.reserve(writes.size());
21 enumerate(writes, ct, idx)
22 {
23 unsigned int total, parallelism = procs_per_cluster;
24
25 if (idx == dedicated_irq)
26 parallelism--;
27
28 if (parallelism && (int) tsk.get_cluster() == idx)
29 parallelism--;
30
31 // At most one blocking request per remote CPU in
32 // cluster per request.
33 if (parallelism)
34 total = num_reads + num_writes * parallelism;
35 else
36 // No interference from writers if we are hogging
37 // the only available CPU.
38 total = 0;
39
40 limits.push_back(ClusterLimit(total, per_src_wlimit));
41 }
42
43 Interference blocking;
44 blocking = bound_blocking_all_clusters(writes,
45 limits,
46 res_id,
47 interval,
48 &tsk);
49 return blocking;
50
51}
52
53static Interference pf_reader_all(
54 const TaskInfo& tsk,
55 const Resources& all_reads,
56 const unsigned int num_writes,
57 const unsigned int num_wblock,
58 const unsigned int num_reads,
59 const unsigned int res_id,
60 const unsigned int procs_per_cluster,
61 const unsigned int num_procs)
62{
63 const unsigned long interval = tsk.get_response();
64 Interference blocking;
65 unsigned int rlimit = std::min(num_wblock + num_writes,
66 num_reads + num_writes * (num_procs - 1));
67 blocking = bound_blocking(all_reads[res_id],
68 interval,
69 rlimit,
70 rlimit,
71 // exclude all if c == 1
72 procs_per_cluster == 1,
73 &tsk);
74 return blocking;
75}
76
77BlockingBounds* clustered_rw_omlp_bounds(const ResourceSharingInfo& info,
78 unsigned int procs_per_cluster,
79 int dedicated_irq)
80{
81 // split everything by partition
82 Clusters clusters;
83
84 split_by_cluster(info, clusters);
85
86 // split each partition by resource
87 ClusterResources resources;
88
89 split_by_resource(clusters, resources);
90
91 // split all by resource
92 Resources all_task_reqs, all_reads, __all_writes;
93 split_by_resource(info, all_task_reqs);
94 split_by_type(all_task_reqs, all_reads, __all_writes);
95
96 // sort each contention set by request length
97 sort_by_request_length(resources);
98 sort_by_request_length(all_reads);
99
100 // split by type --- sorted order is maintained
101 ClusterResources __reads, writes;
102 split_by_type(resources, __reads, writes);
103
104
105 // We need for each task the maximum request span. We also need the
106 // maximum direct blocking from remote partitions for each request. We
107 // can determine both in one pass.
108
109 const unsigned int num_procs = procs_per_cluster * clusters.size();
110 unsigned int i;
111
112 // direct blocking results
113 BlockingBounds* _results = new BlockingBounds(info);
114 BlockingBounds& results = *_results;
115
116 for (i = 0; i < info.get_tasks().size(); i++)
117 {
118 const TaskInfo& tsk = info.get_tasks()[i];
119 RWCounts rwcounts;
120 Interference bterm;
121
122 merge_rw_requests(tsk, rwcounts);
123
124 foreach(rwcounts, jt)
125 {
126 const RWCount& rw = *jt;
127
128 // skip placeholders
129 if (!rw.num_reads && !rw.num_writes)
130 continue;
131
132 Interference wblocking, rblocking;
133
134 wblocking = pf_writer_fifo(tsk, writes, rw.num_writes,
135 rw.num_reads, rw.res_id,
136 procs_per_cluster,
137 dedicated_irq);
138
139 rblocking = pf_reader_all(tsk, all_reads, rw.num_writes,
140 wblocking.count, rw.num_reads,
141 rw.res_id, procs_per_cluster,
142 num_procs);
143
144 //**** SINGLE WRITE
145 Interference rblocking_w1, wblocking_w1;
146
147 // Keep track of maximum request span.
148 // Is this already a single-issue request?
149 if (rw.num_writes &&
150 (rw.num_writes != 1 || rw.num_reads != 0))
151 {
152 wblocking_w1 = pf_writer_fifo(tsk, writes, 1, 0,
153 rw.res_id, procs_per_cluster,
154 dedicated_irq);
155
156 rblocking_w1 = pf_reader_all(
157 tsk, all_reads, 1,
158 wblocking_w1.count, 0,
159 rw.res_id, procs_per_cluster,
160 num_procs);
161 }
162 else if (rw.num_writes)
163 {
164 wblocking_w1 = wblocking;
165 rblocking_w1 = rblocking;
166 }
167 // else: zero, nothing to do
168
169 //**** SINGLE READ
170
171 Interference rblocking_r1, wblocking_r1;
172
173
174 if (rw.num_reads &&
175 (rw.num_reads != 1 || rw.num_writes != 0))
176 {
177 wblocking_r1 = pf_writer_fifo(tsk, writes, 0, 1,
178 rw.res_id, procs_per_cluster,
179 dedicated_irq);
180
181 rblocking_r1 = pf_reader_all(
182 tsk, all_reads, 0,
183 wblocking_r1.count, 1,
184 rw.res_id, procs_per_cluster,
185 num_procs);
186 }
187 else if (rw.num_reads)
188 {
189 wblocking_r1 = wblocking;
190 rblocking_r1 = rblocking;
191 }
192
193 // else: zero, nothing to do
194
195 // The span includes our own request.
196 if (rw.num_writes)
197 {
198 wblocking_w1.total_length += rw.wlength;
199 wblocking_w1.count += 1;
200 }
201 if (rw.num_reads)
202 {
203 rblocking_r1.total_length += rw.rlength;
204 wblocking_r1.count += 1;
205 }
206
207 // combine
208 wblocking_w1 += rblocking_w1;
209 wblocking_r1 += rblocking_r1;
210 wblocking += rblocking;
211
212 results.raise_request_span(i, wblocking_w1);
213 results.raise_request_span(i, wblocking_r1);
214 bterm += wblocking;
215 }
216 results[i] = bterm;
217 }
218
219 // This is the initial delay due to priority donation.
220 charge_arrival_blocking(info, results);
221
222 return _results;
223}
diff --git a/native/src/sharedres.cpp b/native/src/sharedres.cpp
index c988017..09d46ca 100644
--- a/native/src/sharedres.cpp
+++ b/native/src/sharedres.cpp
@@ -158,7 +158,7 @@ void split_by_resource(const Clusters& clusters,
158 } 158 }
159} 159}
160 160
161static void split_by_type(const ContentionSet& requests, 161void split_by_type(const ContentionSet& requests,
162 ContentionSet& reads, 162 ContentionSet& reads,
163 ContentionSet& writes) 163 ContentionSet& writes)
164{ 164{
@@ -173,9 +173,9 @@ static void split_by_type(const ContentionSet& requests,
173 } 173 }
174} 174}
175 175
176static void split_by_type(const Resources& resources, 176void split_by_type(const Resources& resources,
177 Resources &reads, 177 Resources &reads,
178 Resources &writes) 178 Resources &writes)
179{ 179{
180 reads.reserve(resources.size()); 180 reads.reserve(resources.size());
181 writes.reserve(resources.size()); 181 writes.reserve(resources.size());
@@ -187,9 +187,9 @@ static void split_by_type(const Resources& resources,
187 } 187 }
188} 188}
189 189
190static void split_by_type(const ClusterResources& per_cluster, 190void split_by_type(const ClusterResources& per_cluster,
191 ClusterResources &reads, 191 ClusterResources &reads,
192 ClusterResources &writes) 192 ClusterResources &writes)
193{ 193{
194 reads.reserve(per_cluster.size()); 194 reads.reserve(per_cluster.size());
195 writes.reserve(per_cluster.size()); 195 writes.reserve(per_cluster.size());
@@ -322,7 +322,7 @@ Interference bound_blocking(const ContentionSet& cont,
322 return inter; 322 return inter;
323} 323}
324 324
325static Interference bound_blocking_all_clusters( 325Interference bound_blocking_all_clusters(
326 const ClusterResources& clusters, 326 const ClusterResources& clusters,
327 const ClusterLimits& limits, 327 const ClusterLimits& limits,
328 unsigned int res_id, 328 unsigned int res_id,
@@ -436,24 +436,7 @@ Interference np_fifo_per_resource(
436 &tsk); 436 &tsk);
437} 437}
438 438
439struct RWCount { 439void merge_rw_requests(const TaskInfo &tsk, RWCounts &counts)
440 unsigned int res_id;
441 unsigned int num_reads;
442 unsigned int num_writes;
443 unsigned int rlength;
444 unsigned int wlength;
445
446 RWCount(unsigned int id) : res_id(id),
447 num_reads(0),
448 num_writes(0),
449 rlength(0),
450 wlength(0)
451 {}
452};
453
454typedef std::vector<RWCount> RWCounts;
455
456static void merge_rw_requests(const TaskInfo &tsk, RWCounts &counts)
457{ 440{
458 foreach(tsk.get_requests(), req) 441 foreach(tsk.get_requests(), req)
459 { 442 {
@@ -476,223 +459,6 @@ static void merge_rw_requests(const TaskInfo &tsk, RWCounts &counts)
476} 459}
477 460
478 461
479static Interference pf_writer_fifo(
480 const TaskInfo& tsk, const ClusterResources& writes,
481 const unsigned int num_writes,
482 const unsigned int num_reads,
483 const unsigned int res_id,
484 const unsigned int procs_per_cluster,
485 const int dedicated_irq)
486{
487 const unsigned int per_src_wlimit = num_reads + num_writes;
488 const unsigned long interval = tsk.get_response();
489 ClusterLimits limits;
490 int idx;
491
492 limits.reserve(writes.size());
493 enumerate(writes, ct, idx)
494 {
495 unsigned int total, parallelism = procs_per_cluster;
496
497 if (idx == dedicated_irq)
498 parallelism--;
499
500 if (parallelism && (int) tsk.get_cluster() == idx)
501 parallelism--;
502
503 // At most one blocking request per remote CPU in
504 // cluster per request.
505 if (parallelism)
506 total = num_reads + num_writes * parallelism;
507 else
508 // No interference from writers if we are hogging
509 // the only available CPU.
510 total = 0;
511
512 limits.push_back(ClusterLimit(total, per_src_wlimit));
513 }
514
515 Interference blocking;
516 blocking = bound_blocking_all_clusters(writes,
517 limits,
518 res_id,
519 interval,
520 &tsk);
521 return blocking;
522
523}
524
525static Interference pf_reader_all(
526 const TaskInfo& tsk,
527 const Resources& all_reads,
528 const unsigned int num_writes,
529 const unsigned int num_wblock,
530 const unsigned int num_reads,
531 const unsigned int res_id,
532 const unsigned int procs_per_cluster,
533 const unsigned int num_procs)
534{
535 const unsigned long interval = tsk.get_response();
536 Interference blocking;
537 unsigned int rlimit = std::min(num_wblock + num_writes,
538 num_reads + num_writes * (num_procs - 1));
539 blocking = bound_blocking(all_reads[res_id],
540 interval,
541 rlimit,
542 rlimit,
543 // exclude all if c == 1
544 procs_per_cluster == 1,
545 &tsk);
546 return blocking;
547}
548
549BlockingBounds* clustered_rw_omlp_bounds(const ResourceSharingInfo& info,
550 unsigned int procs_per_cluster,
551 int dedicated_irq)
552{
553 // split everything by partition
554 Clusters clusters;
555
556 split_by_cluster(info, clusters);
557
558 // split each partition by resource
559 ClusterResources resources;
560
561 split_by_resource(clusters, resources);
562
563 // split all by resource
564 Resources all_task_reqs, all_reads, __all_writes;
565 split_by_resource(info, all_task_reqs);
566 split_by_type(all_task_reqs, all_reads, __all_writes);
567
568 // sort each contention set by request length
569 sort_by_request_length(resources);
570 sort_by_request_length(all_reads);
571
572 // split by type --- sorted order is maintained
573 ClusterResources __reads, writes;
574 split_by_type(resources, __reads, writes);
575
576
577 // We need for each task the maximum request span. We also need the
578 // maximum direct blocking from remote partitions for each request. We
579 // can determine both in one pass.
580
581 const unsigned int num_procs = procs_per_cluster * clusters.size();
582 unsigned int i;
583
584 // direct blocking results
585 BlockingBounds* _results = new BlockingBounds(info);
586 BlockingBounds& results = *_results;
587
588 for (i = 0; i < info.get_tasks().size(); i++)
589 {
590 const TaskInfo& tsk = info.get_tasks()[i];
591 RWCounts rwcounts;
592 Interference bterm;
593
594 merge_rw_requests(tsk, rwcounts);
595
596 foreach(rwcounts, jt)
597 {
598 const RWCount& rw = *jt;
599
600 // skip placeholders
601 if (!rw.num_reads && !rw.num_writes)
602 continue;
603
604 Interference wblocking, rblocking;
605
606 wblocking = pf_writer_fifo(tsk, writes, rw.num_writes,
607 rw.num_reads, rw.res_id,
608 procs_per_cluster,
609 dedicated_irq);
610
611 rblocking = pf_reader_all(tsk, all_reads, rw.num_writes,
612 wblocking.count, rw.num_reads,
613 rw.res_id, procs_per_cluster,
614 num_procs);
615
616 //**** SINGLE WRITE
617 Interference rblocking_w1, wblocking_w1;
618
619 // Keep track of maximum request span.
620 // Is this already a single-issue request?
621 if (rw.num_writes &&
622 (rw.num_writes != 1 || rw.num_reads != 0))
623 {
624 wblocking_w1 = pf_writer_fifo(tsk, writes, 1, 0,
625 rw.res_id, procs_per_cluster,
626 dedicated_irq);
627
628 rblocking_w1 = pf_reader_all(
629 tsk, all_reads, 1,
630 wblocking_w1.count, 0,
631 rw.res_id, procs_per_cluster,
632 num_procs);
633 }
634 else if (rw.num_writes)
635 {
636 wblocking_w1 = wblocking;
637 rblocking_w1 = rblocking;
638 }
639 // else: zero, nothing to do
640
641 //**** SINGLE READ
642
643 Interference rblocking_r1, wblocking_r1;
644
645
646 if (rw.num_reads &&
647 (rw.num_reads != 1 || rw.num_writes != 0))
648 {
649 wblocking_r1 = pf_writer_fifo(tsk, writes, 0, 1,
650 rw.res_id, procs_per_cluster,
651 dedicated_irq);
652
653 rblocking_r1 = pf_reader_all(
654 tsk, all_reads, 0,
655 wblocking_r1.count, 1,
656 rw.res_id, procs_per_cluster,
657 num_procs);
658 }
659 else if (rw.num_reads)
660 {
661 wblocking_r1 = wblocking;
662 rblocking_r1 = rblocking;
663 }
664
665 // else: zero, nothing to do
666
667 // The span includes our own request.
668 if (rw.num_writes)
669 {
670 wblocking_w1.total_length += rw.wlength;
671 wblocking_w1.count += 1;
672 }
673 if (rw.num_reads)
674 {
675 rblocking_r1.total_length += rw.rlength;
676 wblocking_r1.count += 1;
677 }
678
679 // combine
680 wblocking_w1 += rblocking_w1;
681 wblocking_r1 += rblocking_r1;
682 wblocking += rblocking;
683
684 results.raise_request_span(i, wblocking_w1);
685 results.raise_request_span(i, wblocking_r1);
686 bterm += wblocking;
687 }
688 results[i] = bterm;
689 }
690
691 // This is the initial delay due to priority donation.
692 charge_arrival_blocking(info, results);
693
694 return _results;
695}
696 462
697 463
698BlockingBounds* task_fair_mutex_bounds(const ResourceSharingInfo& info, 464BlockingBounds* task_fair_mutex_bounds(const ResourceSharingInfo& info,