aboutsummaryrefslogtreecommitdiffstats
path: root/native/src/sharedres.cpp
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2012-05-16 13:04:36 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2012-05-16 13:04:36 -0400
commite7e500366aa7b892070bf41500c64cb3f8a98f88 (patch)
tree1852f6f1c53c740ee2c7c298bdd2e2477d29ae35 /native/src/sharedres.cpp
parentb7373fbe338936145b9d55471dc353951cb3a81a (diff)
C++: Break out the phase-fair RW locks code
Part of refactoring sharedres.cpp.
Diffstat (limited to 'native/src/sharedres.cpp')
-rw-r--r--native/src/sharedres.cpp252
1 files changed, 9 insertions, 243 deletions
diff --git a/native/src/sharedres.cpp b/native/src/sharedres.cpp
index c988017..09d46ca 100644
--- a/native/src/sharedres.cpp
+++ b/native/src/sharedres.cpp
@@ -158,7 +158,7 @@ void split_by_resource(const Clusters& clusters,
158 } 158 }
159} 159}
160 160
161static void split_by_type(const ContentionSet& requests, 161void split_by_type(const ContentionSet& requests,
162 ContentionSet& reads, 162 ContentionSet& reads,
163 ContentionSet& writes) 163 ContentionSet& writes)
164{ 164{
@@ -173,9 +173,9 @@ static void split_by_type(const ContentionSet& requests,
173 } 173 }
174} 174}
175 175
176static void split_by_type(const Resources& resources, 176void split_by_type(const Resources& resources,
177 Resources &reads, 177 Resources &reads,
178 Resources &writes) 178 Resources &writes)
179{ 179{
180 reads.reserve(resources.size()); 180 reads.reserve(resources.size());
181 writes.reserve(resources.size()); 181 writes.reserve(resources.size());
@@ -187,9 +187,9 @@ static void split_by_type(const Resources& resources,
187 } 187 }
188} 188}
189 189
190static void split_by_type(const ClusterResources& per_cluster, 190void split_by_type(const ClusterResources& per_cluster,
191 ClusterResources &reads, 191 ClusterResources &reads,
192 ClusterResources &writes) 192 ClusterResources &writes)
193{ 193{
194 reads.reserve(per_cluster.size()); 194 reads.reserve(per_cluster.size());
195 writes.reserve(per_cluster.size()); 195 writes.reserve(per_cluster.size());
@@ -322,7 +322,7 @@ Interference bound_blocking(const ContentionSet& cont,
322 return inter; 322 return inter;
323} 323}
324 324
325static Interference bound_blocking_all_clusters( 325Interference bound_blocking_all_clusters(
326 const ClusterResources& clusters, 326 const ClusterResources& clusters,
327 const ClusterLimits& limits, 327 const ClusterLimits& limits,
328 unsigned int res_id, 328 unsigned int res_id,
@@ -436,24 +436,7 @@ Interference np_fifo_per_resource(
436 &tsk); 436 &tsk);
437} 437}
438 438
439struct RWCount { 439void merge_rw_requests(const TaskInfo &tsk, RWCounts &counts)
440 unsigned int res_id;
441 unsigned int num_reads;
442 unsigned int num_writes;
443 unsigned int rlength;
444 unsigned int wlength;
445
446 RWCount(unsigned int id) : res_id(id),
447 num_reads(0),
448 num_writes(0),
449 rlength(0),
450 wlength(0)
451 {}
452};
453
454typedef std::vector<RWCount> RWCounts;
455
456static void merge_rw_requests(const TaskInfo &tsk, RWCounts &counts)
457{ 440{
458 foreach(tsk.get_requests(), req) 441 foreach(tsk.get_requests(), req)
459 { 442 {
@@ -476,223 +459,6 @@ static void merge_rw_requests(const TaskInfo &tsk, RWCounts &counts)
476} 459}
477 460
478 461
479static Interference pf_writer_fifo(
480 const TaskInfo& tsk, const ClusterResources& writes,
481 const unsigned int num_writes,
482 const unsigned int num_reads,
483 const unsigned int res_id,
484 const unsigned int procs_per_cluster,
485 const int dedicated_irq)
486{
487 const unsigned int per_src_wlimit = num_reads + num_writes;
488 const unsigned long interval = tsk.get_response();
489 ClusterLimits limits;
490 int idx;
491
492 limits.reserve(writes.size());
493 enumerate(writes, ct, idx)
494 {
495 unsigned int total, parallelism = procs_per_cluster;
496
497 if (idx == dedicated_irq)
498 parallelism--;
499
500 if (parallelism && (int) tsk.get_cluster() == idx)
501 parallelism--;
502
503 // At most one blocking request per remote CPU in
504 // cluster per request.
505 if (parallelism)
506 total = num_reads + num_writes * parallelism;
507 else
508 // No interference from writers if we are hogging
509 // the only available CPU.
510 total = 0;
511
512 limits.push_back(ClusterLimit(total, per_src_wlimit));
513 }
514
515 Interference blocking;
516 blocking = bound_blocking_all_clusters(writes,
517 limits,
518 res_id,
519 interval,
520 &tsk);
521 return blocking;
522
523}
524
525static Interference pf_reader_all(
526 const TaskInfo& tsk,
527 const Resources& all_reads,
528 const unsigned int num_writes,
529 const unsigned int num_wblock,
530 const unsigned int num_reads,
531 const unsigned int res_id,
532 const unsigned int procs_per_cluster,
533 const unsigned int num_procs)
534{
535 const unsigned long interval = tsk.get_response();
536 Interference blocking;
537 unsigned int rlimit = std::min(num_wblock + num_writes,
538 num_reads + num_writes * (num_procs - 1));
539 blocking = bound_blocking(all_reads[res_id],
540 interval,
541 rlimit,
542 rlimit,
543 // exclude all if c == 1
544 procs_per_cluster == 1,
545 &tsk);
546 return blocking;
547}
548
549BlockingBounds* clustered_rw_omlp_bounds(const ResourceSharingInfo& info,
550 unsigned int procs_per_cluster,
551 int dedicated_irq)
552{
553 // split everything by partition
554 Clusters clusters;
555
556 split_by_cluster(info, clusters);
557
558 // split each partition by resource
559 ClusterResources resources;
560
561 split_by_resource(clusters, resources);
562
563 // split all by resource
564 Resources all_task_reqs, all_reads, __all_writes;
565 split_by_resource(info, all_task_reqs);
566 split_by_type(all_task_reqs, all_reads, __all_writes);
567
568 // sort each contention set by request length
569 sort_by_request_length(resources);
570 sort_by_request_length(all_reads);
571
572 // split by type --- sorted order is maintained
573 ClusterResources __reads, writes;
574 split_by_type(resources, __reads, writes);
575
576
577 // We need for each task the maximum request span. We also need the
578 // maximum direct blocking from remote partitions for each request. We
579 // can determine both in one pass.
580
581 const unsigned int num_procs = procs_per_cluster * clusters.size();
582 unsigned int i;
583
584 // direct blocking results
585 BlockingBounds* _results = new BlockingBounds(info);
586 BlockingBounds& results = *_results;
587
588 for (i = 0; i < info.get_tasks().size(); i++)
589 {
590 const TaskInfo& tsk = info.get_tasks()[i];
591 RWCounts rwcounts;
592 Interference bterm;
593
594 merge_rw_requests(tsk, rwcounts);
595
596 foreach(rwcounts, jt)
597 {
598 const RWCount& rw = *jt;
599
600 // skip placeholders
601 if (!rw.num_reads && !rw.num_writes)
602 continue;
603
604 Interference wblocking, rblocking;
605
606 wblocking = pf_writer_fifo(tsk, writes, rw.num_writes,
607 rw.num_reads, rw.res_id,
608 procs_per_cluster,
609 dedicated_irq);
610
611 rblocking = pf_reader_all(tsk, all_reads, rw.num_writes,
612 wblocking.count, rw.num_reads,
613 rw.res_id, procs_per_cluster,
614 num_procs);
615
616 //**** SINGLE WRITE
617 Interference rblocking_w1, wblocking_w1;
618
619 // Keep track of maximum request span.
620 // Is this already a single-issue request?
621 if (rw.num_writes &&
622 (rw.num_writes != 1 || rw.num_reads != 0))
623 {
624 wblocking_w1 = pf_writer_fifo(tsk, writes, 1, 0,
625 rw.res_id, procs_per_cluster,
626 dedicated_irq);
627
628 rblocking_w1 = pf_reader_all(
629 tsk, all_reads, 1,
630 wblocking_w1.count, 0,
631 rw.res_id, procs_per_cluster,
632 num_procs);
633 }
634 else if (rw.num_writes)
635 {
636 wblocking_w1 = wblocking;
637 rblocking_w1 = rblocking;
638 }
639 // else: zero, nothing to do
640
641 //**** SINGLE READ
642
643 Interference rblocking_r1, wblocking_r1;
644
645
646 if (rw.num_reads &&
647 (rw.num_reads != 1 || rw.num_writes != 0))
648 {
649 wblocking_r1 = pf_writer_fifo(tsk, writes, 0, 1,
650 rw.res_id, procs_per_cluster,
651 dedicated_irq);
652
653 rblocking_r1 = pf_reader_all(
654 tsk, all_reads, 0,
655 wblocking_r1.count, 1,
656 rw.res_id, procs_per_cluster,
657 num_procs);
658 }
659 else if (rw.num_reads)
660 {
661 wblocking_r1 = wblocking;
662 rblocking_r1 = rblocking;
663 }
664
665 // else: zero, nothing to do
666
667 // The span includes our own request.
668 if (rw.num_writes)
669 {
670 wblocking_w1.total_length += rw.wlength;
671 wblocking_w1.count += 1;
672 }
673 if (rw.num_reads)
674 {
675 rblocking_r1.total_length += rw.rlength;
676 wblocking_r1.count += 1;
677 }
678
679 // combine
680 wblocking_w1 += rblocking_w1;
681 wblocking_r1 += rblocking_r1;
682 wblocking += rblocking;
683
684 results.raise_request_span(i, wblocking_w1);
685 results.raise_request_span(i, wblocking_r1);
686 bterm += wblocking;
687 }
688 results[i] = bterm;
689 }
690
691 // This is the initial delay due to priority donation.
692 charge_arrival_blocking(info, results);
693
694 return _results;
695}
696 462
697 463
698BlockingBounds* task_fair_mutex_bounds(const ResourceSharingInfo& info, 464BlockingBounds* task_fair_mutex_bounds(const ResourceSharingInfo& info,