diff options
| author | Bjoern Brandenburg <bbb@mpi-sws.org> | 2012-05-16 13:08:36 -0400 |
|---|---|---|
| committer | Bjoern Brandenburg <bbb@mpi-sws.org> | 2012-05-16 13:08:36 -0400 |
| commit | 5cb503377c18b1c099c5b1a5375ece53fe25258e (patch) | |
| tree | 0e6b09d493d6aaa75645abaa7cc18bb277bab96b /native/src/sharedres.cpp | |
| parent | e7e500366aa7b892070bf41500c64cb3f8a98f88 (diff) | |
C++: Break out the task-fair RW locks code
Part of refactoring sharedres.cpp.
Diffstat (limited to 'native/src/sharedres.cpp')
| -rw-r--r-- | native/src/sharedres.cpp | 233 |
1 files changed, 0 insertions, 233 deletions
diff --git a/native/src/sharedres.cpp b/native/src/sharedres.cpp index 09d46ca..00d7932 100644 --- a/native/src/sharedres.cpp +++ b/native/src/sharedres.cpp | |||
| @@ -12,14 +12,6 @@ | |||
| 12 | 12 | ||
| 13 | #include "stl-helper.h" | 13 | #include "stl-helper.h" |
| 14 | 14 | ||
| 15 | #ifdef CONFIG_USE_0X | ||
| 16 | #include <unordered_map> | ||
| 17 | #define hashmap std::unordered_map | ||
| 18 | #else | ||
| 19 | #include <ext/hash_map> | ||
| 20 | #define hashmap __gnu_cxx::hash_map | ||
| 21 | #endif | ||
| 22 | |||
| 23 | #include "blocking.h" | 15 | #include "blocking.h" |
| 24 | 16 | ||
| 25 | const unsigned int UNLIMITED = UINT_MAX; | 17 | const unsigned int UNLIMITED = UINT_MAX; |
| @@ -481,228 +473,3 @@ BlockingBounds* phase_fair_rw_bounds(const ResourceSharingInfo& info, | |||
| 481 | } | 473 | } |
| 482 | 474 | ||
| 483 | 475 | ||
| 484 | static Interference bound_blocking_all( | ||
| 485 | const TaskInfo* tsk, | ||
| 486 | const ContentionSet& all_reqs, // presumed sorted, for all clusters/tasks | ||
| 487 | const unsigned int max_remote_requests, // per cluster | ||
| 488 | const unsigned int max_local_requests, // local cluster | ||
| 489 | const unsigned int max_requests, // per task | ||
| 490 | unsigned int max_total) // stop after counting max_total | ||
| 491 | { | ||
| 492 | unsigned long interval = tsk->get_response(); | ||
| 493 | hashmap<unsigned long, unsigned int> task_counter(512); | ||
| 494 | hashmap<unsigned long, unsigned int>::iterator tctr; | ||
| 495 | hashmap<unsigned int, unsigned int> cluster_counter(64); | ||
| 496 | hashmap<unsigned int, unsigned int>::iterator cctr; | ||
| 497 | Interference inter; | ||
| 498 | |||
| 499 | cluster_counter[tsk->get_cluster()] = max_local_requests; | ||
| 500 | |||
| 501 | foreach(all_reqs, it) | ||
| 502 | { | ||
| 503 | const RequestBound* req = *it; | ||
| 504 | const TaskInfo* t = req->get_task(); | ||
| 505 | unsigned long key = (unsigned long) t; | ||
| 506 | unsigned int cluster = t->get_cluster(); | ||
| 507 | |||
| 508 | if (!max_total) | ||
| 509 | // we are done | ||
| 510 | break; | ||
| 511 | |||
| 512 | if (t == tsk) | ||
| 513 | // doesn't block itself | ||
| 514 | continue; | ||
| 515 | |||
| 516 | // make sure we have seen this task | ||
| 517 | tctr = task_counter.find(key); | ||
| 518 | if (tctr == task_counter.end()) | ||
| 519 | { | ||
| 520 | task_counter[key] = max_requests; | ||
| 521 | tctr = task_counter.find(key); | ||
| 522 | } | ||
| 523 | |||
| 524 | if (!tctr->second) | ||
| 525 | continue; | ||
| 526 | |||
| 527 | cctr = cluster_counter.find(cluster); | ||
| 528 | if (cctr == cluster_counter.end()) | ||
| 529 | { | ||
| 530 | cluster_counter[cluster] = max_remote_requests; | ||
| 531 | cctr = cluster_counter.find(cluster); | ||
| 532 | } | ||
| 533 | |||
| 534 | if (!cctr->second) | ||
| 535 | continue; | ||
| 536 | |||
| 537 | unsigned int remaining; | ||
| 538 | remaining = std::min(tctr->second, cctr->second); | ||
| 539 | remaining = std::min(remaining, max_total); | ||
| 540 | unsigned int num = std::min(req->get_max_num_requests(interval), remaining); | ||
| 541 | |||
| 542 | inter.total_length += num * req->get_request_length(); | ||
| 543 | inter.count += num; | ||
| 544 | cctr->second -= num; | ||
| 545 | tctr->second -= num; | ||
| 546 | max_total -= num; | ||
| 547 | } | ||
| 548 | |||
| 549 | return inter; | ||
| 550 | } | ||
| 551 | |||
| 552 | |||
| 553 | static Interference tf_reader_all( | ||
| 554 | const TaskInfo& tsk, | ||
| 555 | const Resources& all_reads, | ||
| 556 | const unsigned int num_writes, | ||
| 557 | const unsigned int num_wblock, | ||
| 558 | const unsigned int num_reads, | ||
| 559 | const unsigned int res_id, | ||
| 560 | const unsigned int procs_per_cluster) | ||
| 561 | { | ||
| 562 | Interference blocking; | ||
| 563 | unsigned int num_reqs = num_reads + num_writes; | ||
| 564 | unsigned int max_reader_phases = num_wblock + num_writes; | ||
| 565 | unsigned int task_limit = std::min(max_reader_phases, num_reqs); | ||
| 566 | |||
| 567 | return bound_blocking_all( | ||
| 568 | &tsk, all_reads[res_id], | ||
| 569 | num_reqs * procs_per_cluster, | ||
| 570 | num_reqs * (procs_per_cluster - 1), | ||
| 571 | task_limit, | ||
| 572 | max_reader_phases); | ||
| 573 | } | ||
| 574 | |||
| 575 | |||
| 576 | BlockingBounds* task_fair_rw_bounds(const ResourceSharingInfo& info, | ||
| 577 | const ResourceSharingInfo& info_mtx, | ||
| 578 | unsigned int procs_per_cluster, | ||
| 579 | int dedicated_irq) | ||
| 580 | { | ||
| 581 | // split everything by partition | ||
| 582 | Clusters clusters, clusters_mtx; | ||
| 583 | |||
| 584 | split_by_cluster(info, clusters); | ||
| 585 | split_by_cluster(info_mtx, clusters_mtx); | ||
| 586 | |||
| 587 | // split each partition by resource | ||
| 588 | ClusterResources resources, resources_mtx; | ||
| 589 | |||
| 590 | split_by_resource(clusters, resources); | ||
| 591 | split_by_resource(clusters_mtx, resources_mtx); | ||
| 592 | |||
| 593 | // split all by resource | ||
| 594 | Resources all_task_reqs, all_reads, __all_writes; | ||
| 595 | split_by_resource(info, all_task_reqs); | ||
| 596 | split_by_type(all_task_reqs, all_reads, __all_writes); | ||
| 597 | |||
| 598 | // sort each contention set by request length | ||
| 599 | sort_by_request_length(resources); | ||
| 600 | sort_by_request_length(resources_mtx); | ||
| 601 | sort_by_request_length(all_reads); | ||
| 602 | |||
| 603 | // split by type --- sorted order is maintained | ||
| 604 | ClusterResources __reads, writes; | ||
| 605 | split_by_type(resources, __reads, writes); | ||
| 606 | |||
| 607 | |||
| 608 | // We need for each task the maximum request span. We also need the | ||
| 609 | // maximum direct blocking from remote partitions for each request. We | ||
| 610 | // can determine both in one pass. | ||
| 611 | |||
| 612 | unsigned int i; | ||
| 613 | |||
| 614 | // direct blocking results | ||
| 615 | BlockingBounds* _results = new BlockingBounds(info); | ||
| 616 | BlockingBounds& results = *_results; | ||
| 617 | |||
| 618 | for (i = 0; i < info.get_tasks().size(); i++) | ||
| 619 | { | ||
| 620 | const TaskInfo& tsk = info.get_tasks()[i]; | ||
| 621 | RWCounts rwcounts; | ||
| 622 | |||
| 623 | Interference bterm; | ||
| 624 | |||
| 625 | merge_rw_requests(tsk, rwcounts); | ||
| 626 | |||
| 627 | foreach(rwcounts, jt) | ||
| 628 | { | ||
| 629 | const RWCount& rw = *jt; | ||
| 630 | |||
| 631 | // skip placeholders | ||
| 632 | if (!rw.num_reads && !rw.num_writes) | ||
| 633 | continue; | ||
| 634 | |||
| 635 | |||
| 636 | // 1) treat it as a mutex as a baseline | ||
| 637 | Interference mtx, mtx_1; | ||
| 638 | |||
| 639 | mtx = np_fifo_per_resource( | ||
| 640 | tsk, resources_mtx, procs_per_cluster, rw.res_id, | ||
| 641 | rw.num_reads + rw.num_writes, | ||
| 642 | dedicated_irq); | ||
| 643 | |||
| 644 | if (rw.num_reads + rw.num_writes == 1) | ||
| 645 | mtx_1 = mtx; | ||
| 646 | else | ||
| 647 | mtx_1 = np_fifo_per_resource( | ||
| 648 | tsk, resources_mtx, procs_per_cluster, | ||
| 649 | rw.res_id, 1, dedicated_irq); | ||
| 650 | |||
| 651 | // The span includes our own request. | ||
| 652 | mtx_1.total_length += std::max(rw.wlength, rw.rlength); | ||
| 653 | mtx_1.count += 1; | ||
| 654 | |||
| 655 | // 2) apply real RW analysis | ||
| 656 | Interference wblocking, wblocking_1; | ||
| 657 | Interference rblocking, rblocking_r1, rblocking_w1; | ||
| 658 | |||
| 659 | wblocking = np_fifo_per_resource( | ||
| 660 | tsk, writes, procs_per_cluster, rw.res_id, | ||
| 661 | rw.num_reads + rw.num_writes, | ||
| 662 | dedicated_irq); | ||
| 663 | wblocking_1 = np_fifo_per_resource( | ||
| 664 | tsk, writes, procs_per_cluster, rw.res_id, 1, | ||
| 665 | dedicated_irq); | ||
| 666 | |||
| 667 | rblocking = tf_reader_all( | ||
| 668 | tsk, all_reads, rw.num_writes, wblocking.count, | ||
| 669 | rw.num_reads, rw.res_id, procs_per_cluster); | ||
| 670 | |||
| 671 | if (rw.num_writes) | ||
| 672 | { | ||
| 673 | // single write | ||
| 674 | rblocking_w1 = tf_reader_all( | ||
| 675 | tsk, all_reads, 1, wblocking.count, | ||
| 676 | 0, rw.res_id, procs_per_cluster); | ||
| 677 | // The span includes our own request. | ||
| 678 | rblocking_w1.total_length += rw.wlength; | ||
| 679 | rblocking_w1.count += 1; | ||
| 680 | } | ||
| 681 | if (rw.num_reads) | ||
| 682 | { | ||
| 683 | // single read | ||
| 684 | rblocking_r1 = tf_reader_all( | ||
| 685 | tsk, all_reads, 0, wblocking.count, | ||
| 686 | 1, rw.res_id, procs_per_cluster); | ||
| 687 | // The span includes our own request. | ||
| 688 | rblocking_r1.total_length += rw.rlength; | ||
| 689 | rblocking_r1.count += 1; | ||
| 690 | } | ||
| 691 | |||
| 692 | // combine | ||
| 693 | wblocking += rblocking; | ||
| 694 | wblocking_1 += std::max(rblocking_w1, rblocking_r1); | ||
| 695 | |||
| 696 | bterm += std::min(wblocking, mtx); | ||
| 697 | results.raise_request_span(i, std::min(wblocking_1, mtx_1)); | ||
| 698 | } | ||
| 699 | results[i] = bterm; | ||
| 700 | } | ||
| 701 | |||
| 702 | // This is the initial delay due to priority donation. | ||
| 703 | charge_arrival_blocking(info, results); | ||
| 704 | |||
| 705 | return _results; | ||
| 706 | } | ||
| 707 | |||
| 708 | |||
