diff options
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/sched_cedf.c | 66 |
1 files changed, 39 insertions, 27 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index e815e31b1d84..098a449c2490 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -45,14 +45,17 @@ | |||
45 | #include <litmus/litmus_proc.h> | 45 | #include <litmus/litmus_proc.h> |
46 | #include <linux/uaccess.h> | 46 | #include <linux/uaccess.h> |
47 | 47 | ||
48 | /* | 48 | /* Reference configuration variable. Determines which cache level is used to |
49 | * It makes sense only to cluster around L2 or L3, so if cluster_index = 2 | 49 | * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that |
50 | * (default) we cluster all the CPUs that shares a L2 cache, while | 50 | * all CPUs form a single cluster (just like GSN-EDF). |
51 | * cluster_cache_index = 3 we cluster all CPs that shares a L3 cache | ||
52 | */ | 51 | */ |
53 | int cluster_index = 2; | 52 | static enum { |
53 | GLOBAL_CLUSTER = 0, | ||
54 | L1_CLUSTER = 1, | ||
55 | L2_CLUSTER = 2, | ||
56 | L3_CLUSTER = 3 | ||
57 | } cluster_config = GLOBAL_CLUSTER; | ||
54 | 58 | ||
55 | /* forward declaration... a funny thing with C ;) */ | ||
56 | struct clusterdomain; | 59 | struct clusterdomain; |
57 | 60 | ||
58 | /* cpu_entry_t - maintain the linked and scheduled state | 61 | /* cpu_entry_t - maintain the linked and scheduled state |
@@ -649,26 +652,25 @@ static long cedf_activate_plugin(void) | |||
649 | /* de-allocate old clusters, if any */ | 652 | /* de-allocate old clusters, if any */ |
650 | cleanup_cedf(); | 653 | cleanup_cedf(); |
651 | 654 | ||
652 | printk(KERN_INFO "C-EDF: Activate Plugin, cache index = %d\n", | 655 | printk(KERN_INFO "C-EDF: Activate Plugin, cluster configuration = %d\n", |
653 | cluster_index); | 656 | cluster_config); |
654 | 657 | ||
655 | /* need to get cluster_size first */ | 658 | /* need to get cluster_size first */ |
656 | if(!zalloc_cpumask_var(&mask, GFP_ATOMIC)) | 659 | if(!zalloc_cpumask_var(&mask, GFP_ATOMIC)) |
657 | return -ENOMEM; | 660 | return -ENOMEM; |
658 | 661 | ||
659 | if (unlikely(cluster_index == num_online_cpus())) { | 662 | if (unlikely(cluster_config == GLOBAL_CLUSTER)) { |
660 | |||
661 | cluster_size = num_online_cpus(); | 663 | cluster_size = num_online_cpus(); |
662 | } else { | 664 | } else { |
663 | 665 | chk = get_shared_cpu_map(mask, 0, cluster_config); | |
664 | chk = get_shared_cpu_map(mask, 0, cluster_index); | ||
665 | if (chk) { | 666 | if (chk) { |
666 | /* if chk != 0 then it is the max allowed index */ | 667 | /* if chk != 0 then it is the max allowed index */ |
667 | printk(KERN_INFO "C-EDF: Cannot support cache index = %d\n", | 668 | printk(KERN_INFO "C-EDF: Cluster configuration = %d " |
668 | cluster_index); | 669 | "is not supported on this hardware.\n", |
669 | printk(KERN_INFO "C-EDF: Using cache index = %d\n", | 670 | cluster_config); |
670 | chk); | 671 | /* User should notice that the configuration failed, so |
671 | cluster_index = chk; | 672 | * let's bail out. */ |
673 | return -EINVAL; | ||
672 | } | 674 | } |
673 | 675 | ||
674 | cluster_size = cpumask_weight(mask); | 676 | cluster_size = cpumask_weight(mask); |
@@ -716,10 +718,10 @@ static long cedf_activate_plugin(void) | |||
716 | 718 | ||
717 | /* this cpu isn't in any cluster */ | 719 | /* this cpu isn't in any cluster */ |
718 | /* get the shared cpus */ | 720 | /* get the shared cpus */ |
719 | if (unlikely(cluster_index == num_online_cpus())) | 721 | if (unlikely(cluster_config == GLOBAL_CLUSTER)) |
720 | cpumask_copy(mask, cpu_online_mask); | 722 | cpumask_copy(mask, cpu_online_mask); |
721 | else | 723 | else |
722 | get_shared_cpu_map(mask, cpu, cluster_index); | 724 | get_shared_cpu_map(mask, cpu, cluster_config); |
723 | 725 | ||
724 | cpumask_copy(cedf[i].cpu_map, mask); | 726 | cpumask_copy(cedf[i].cpu_map, mask); |
725 | #ifdef VERBOSE_INIT | 727 | #ifdef VERBOSE_INIT |
@@ -776,11 +778,21 @@ static int proc_read_cluster_size(char *page, char **start, | |||
776 | int *eof, void *data) | 778 | int *eof, void *data) |
777 | { | 779 | { |
778 | int len; | 780 | int len; |
779 | if (cluster_index >= 1 && cluster_index <= 3) | 781 | switch (cluster_config) { |
780 | len = snprintf(page, PAGE_SIZE, "L%d\n", cluster_index); | 782 | case GLOBAL_CLUSTER: |
781 | else | ||
782 | len = snprintf(page, PAGE_SIZE, "ALL\n"); | 783 | len = snprintf(page, PAGE_SIZE, "ALL\n"); |
783 | 784 | break; | |
785 | case L1_CLUSTER: | ||
786 | case L2_CLUSTER: | ||
787 | case L3_CLUSTER: | ||
788 | len = snprintf(page, PAGE_SIZE, "L%d\n", cluster_config); | ||
789 | break; | ||
790 | default: | ||
791 | /* This should be impossible, but let's be paranoid. */ | ||
792 | len = snprintf(page, PAGE_SIZE, "INVALID (%d)\n", | ||
793 | cluster_config); | ||
794 | break; | ||
795 | } | ||
784 | return len; | 796 | return len; |
785 | } | 797 | } |
786 | 798 | ||
@@ -808,13 +820,13 @@ static int proc_write_cluster_size(struct file *file, | |||
808 | 820 | ||
809 | /* do a quick and dirty comparison to find the cluster size */ | 821 | /* do a quick and dirty comparison to find the cluster size */ |
810 | if (!strcmp(cache_name, "L2")) | 822 | if (!strcmp(cache_name, "L2")) |
811 | cluster_index = 2; | 823 | cluster_config = L2_CLUSTER; |
812 | else if (!strcmp(cache_name, "L3")) | 824 | else if (!strcmp(cache_name, "L3")) |
813 | cluster_index = 3; | 825 | cluster_config = L3_CLUSTER; |
814 | else if (!strcmp(cache_name, "L1")) | 826 | else if (!strcmp(cache_name, "L1")) |
815 | cluster_index = 1; | 827 | cluster_config = L1_CLUSTER; |
816 | else if (!strcmp(cache_name, "ALL")) | 828 | else if (!strcmp(cache_name, "ALL")) |
817 | cluster_index = num_online_cpus(); | 829 | cluster_config = GLOBAL_CLUSTER; |
818 | else | 830 | else |
819 | printk(KERN_INFO "Cluster '%s' is unknown.\n", cache_name); | 831 | printk(KERN_INFO "Cluster '%s' is unknown.\n", cache_name); |
820 | 832 | ||