diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-04-15 17:47:15 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-04-15 17:47:15 -0400 |
commit | 7800dfe908ccf5e989ab3bfef7102c88ca81b2a7 (patch) | |
tree | 1c9b54923eba37b5b902ceb5e1991e9d57f73c8c | |
parent | 9b1379950e7de5e233495758fe1a92335d0e8080 (diff) |
Add support for changing cluster size upon re-activation
- Cluster size can be modified unloading / reloading the plugin
- Add proper cleanup functions
- Polishing comments
-rw-r--r-- | litmus/sched_cedf.c | 142 |
1 files changed, 86 insertions, 56 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index db543f179838..d79278a571f7 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -11,6 +11,8 @@ | |||
11 | * - The plugins _does not_ attempt to put tasks in the right cluster i.e. | 11 | * - The plugins _does not_ attempt to put tasks in the right cluster i.e. |
12 | * the programmer needs to be aware of the topology to place tasks | 12 | * the programmer needs to be aware of the topology to place tasks |
13 | * in the desired cluster | 13 | * in the desired cluster |
14 | * - default clustering is around L2 cache (cache index = 2) | ||
15 | * supported clusters are: L1 (private cache: pedf), L2, L3 | ||
14 | * | 16 | * |
15 | * For details on functions, take a look at sched_gsn_edf.c | 17 | * For details on functions, take a look at sched_gsn_edf.c |
16 | * | 18 | * |
@@ -85,10 +87,12 @@ cedf_domain_t *cedf; | |||
85 | #define remote_cluster(cpu) ((cedf_domain_t *) per_cpu(cedf_cpu_entries, cpu).cluster) | 87 | #define remote_cluster(cpu) ((cedf_domain_t *) per_cpu(cedf_cpu_entries, cpu).cluster) |
86 | #define task_cpu_cluster(task) remote_cluster(get_partition(task)) | 88 | #define task_cpu_cluster(task) remote_cluster(get_partition(task)) |
87 | 89 | ||
88 | /* Uncomment this if you want to see all scheduling decisions in the | 90 | /* Uncomment WANT_ALL_SCHED_EVENTS if you want to see all scheduling |
89 | * TRACE() log. | 91 | * decisions in the TRACE() log; uncomment VERBOSE_INIT for verbose |
92 | * information during the initialization of the plugin (e.g., topology) | ||
90 | #define WANT_ALL_SCHED_EVENTS | 93 | #define WANT_ALL_SCHED_EVENTS |
91 | */ | 94 | */ |
95 | #define VERBOSE_INIT | ||
92 | 96 | ||
93 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | 97 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) |
94 | { | 98 | { |
@@ -745,97 +749,107 @@ static int num_clusters; | |||
745 | /* we do not support cluster of different sizes */ | 749 | /* we do not support cluster of different sizes */ |
746 | static unsigned int cluster_size; | 750 | static unsigned int cluster_size; |
747 | 751 | ||
748 | static long cedf_activate_plugin(void) | 752 | #ifdef VERBOSE_INIT |
753 | static void print_cluster_topology(cpumask_var_t mask, int cpu) | ||
754 | { | ||
755 | int chk; | ||
756 | char buf[255]; | ||
757 | |||
758 | chk = cpulist_scnprintf(buf, 254, mask); | ||
759 | buf[chk] = '\0'; | ||
760 | printk(KERN_INFO "CPU = %d, shared cpu(s) = %s\n", cpu, buf); | ||
761 | |||
762 | } | ||
763 | |||
764 | static void cluster_check(void) | ||
749 | { | 765 | { |
750 | int i,j; | 766 | int i,j; |
751 | /* system checkup */ | 767 | |
752 | for (i = 0; i < num_clusters; i++) { | 768 | for (i = 0; i < num_clusters; i++) { |
753 | printk(KERN_INFO "\nclus = %d\n", i); | 769 | printk(KERN_INFO "\nCluster = %d\ncpus: ", i); |
754 | for (j = 0; j < cluster_size; j++) | 770 | for (j = 0; j < cluster_size; j++) |
755 | printk(KERN_INFO "cpus = %d\n", (cedf[i].cpus[j])->cpu); | 771 | printk(KERN_INFO "%d ", (cedf[i].cpus[j])->cpu); |
756 | } | 772 | } |
773 | } | ||
774 | #endif | ||
757 | 775 | ||
758 | return 0; | 776 | static void cleanup_cedf(void) |
777 | { | ||
778 | int i; | ||
779 | for (i = 0; i < num_clusters; i++) { | ||
780 | kfree(cedf[i].cpus); | ||
781 | kfree(cedf[i].heap_node); | ||
782 | free_cpumask_var(cedf[i].cpu_map); | ||
783 | } | ||
784 | |||
785 | kfree(cedf); | ||
759 | } | 786 | } |
760 | 787 | ||
761 | /* Plugin object */ | 788 | static long cedf_deactivate_plugin(void) |
762 | static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { | 789 | { |
763 | .plugin_name = "C-EDF", | 790 | cleanup_cedf(); |
764 | .finish_switch = cedf_finish_switch, | 791 | printk(KERN_INFO "C-EDF: Deactivate Plugin\n"); |
765 | .tick = cedf_tick, | ||
766 | .task_new = cedf_task_new, | ||
767 | .complete_job = complete_job, | ||
768 | .task_exit = cedf_task_exit, | ||
769 | .schedule = cedf_schedule, | ||
770 | .task_wake_up = cedf_task_wake_up, | ||
771 | .task_block = cedf_task_block, | ||
772 | #ifdef CONFIG_FMLP | ||
773 | .fmlp_active = 1, | ||
774 | .pi_block = cedf_pi_block, | ||
775 | .inherit_priority = cedf_inherit_priority, | ||
776 | .return_priority = cedf_return_priority, | ||
777 | #endif | ||
778 | .admit_task = cedf_admit_task, | ||
779 | .activate_plugin = cedf_activate_plugin, | ||
780 | }; | ||
781 | 792 | ||
793 | return 0; | ||
794 | } | ||
782 | 795 | ||
783 | static int __init init_cedf(void) | 796 | static long cedf_activate_plugin(void) |
784 | { | 797 | { |
785 | int i, j, cpu, ccpu, cpu_count; | 798 | int i, j, cpu, ccpu, cpu_count; |
786 | cpu_entry_t *entry; | 799 | cpu_entry_t *entry; |
787 | 800 | ||
788 | cpumask_var_t mask; | 801 | cpumask_var_t mask; |
789 | char buf[255]; | ||
790 | int chk = 0; | 802 | int chk = 0; |
791 | 803 | ||
792 | printk(KERN_INFO "C-EDF: Init Plugin, cluster around cache index %d\n", | 804 | printk(KERN_INFO "C-EDF: Activate Plugin, cache index = %d\n", |
793 | cluster_cache_index); | 805 | cluster_cache_index); |
794 | 806 | ||
795 | /* need to get cluster_size first */ | 807 | /* need to get cluster_size first */ |
796 | if(!zalloc_cpumask_var(&mask, GFP_KERNEL)) | 808 | if(!zalloc_cpumask_var(&mask, GFP_ATOMIC)) |
797 | return -ENOMEM; | 809 | return -ENOMEM; |
798 | 810 | ||
799 | chk = get_shared_cpu_map(mask, 0, cluster_cache_index); | 811 | chk = get_shared_cpu_map(mask, 0, cluster_cache_index); |
800 | if (chk) { | 812 | if (chk) { |
801 | /* if chk != 0 then it is the max allowed index */ | 813 | /* if chk != 0 then it is the max allowed index */ |
814 | printk(KERN_INFO "C-EDF: Cannot support cache index = %d\n", | ||
815 | cluster_cache_index); | ||
816 | printk(KERN_INFO "C-EDF: Using cache index = %d\n", | ||
817 | chk); | ||
802 | cluster_cache_index = chk; | 818 | cluster_cache_index = chk; |
803 | } | 819 | } |
804 | 820 | ||
805 | cluster_size = cpumask_weight(mask); | 821 | cluster_size = cpumask_weight(mask); |
806 | printk(KERN_INFO "cluster_size = %d\n", cluster_size); | ||
807 | 822 | ||
808 | if ((num_online_cpus() % cluster_size) != 0) { | 823 | if ((num_online_cpus() % cluster_size) != 0) { |
809 | /* this can't be right, some cpus are left out */ | 824 | /* this can't be right, some cpus are left out */ |
810 | printk(KERN_INFO "C-EDF: Trying to group %d cpus in %d!\n", | 825 | printk(KERN_ERR "C-EDF: Trying to group %d cpus in %d!\n", |
811 | num_online_cpus(), cluster_size); | 826 | num_online_cpus(), cluster_size); |
812 | return -1; | 827 | return -1; |
813 | } | 828 | } |
814 | 829 | ||
815 | num_clusters = num_online_cpus() / cluster_size; | 830 | num_clusters = num_online_cpus() / cluster_size; |
816 | printk(KERN_INFO "online_cpus = %d, nclus = %d\n", num_online_cpus(), num_clusters); | 831 | printk(KERN_INFO "C-EDF: %d cluster(s) of size = %d\n", |
832 | num_clusters, cluster_size); | ||
817 | 833 | ||
818 | /* initialize clusters */ | 834 | /* initialize clusters */ |
819 | cedf = kmalloc(num_clusters * sizeof(cedf_domain_t), GFP_KERNEL); | 835 | cedf = kmalloc(num_clusters * sizeof(cedf_domain_t), GFP_ATOMIC); |
820 | for (i = 0; i < num_clusters; i++) { | 836 | for (i = 0; i < num_clusters; i++) { |
821 | 837 | ||
822 | cedf[i].cpus = kmalloc(cluster_size * sizeof(cpu_entry_t), | 838 | cedf[i].cpus = kmalloc(cluster_size * sizeof(cpu_entry_t), |
823 | GFP_KERNEL); | 839 | GFP_ATOMIC); |
824 | cedf[i].heap_node = kmalloc( | 840 | cedf[i].heap_node = kmalloc( |
825 | cluster_size * sizeof(struct bheap_node), | 841 | cluster_size * sizeof(struct bheap_node), |
826 | GFP_KERNEL); | 842 | GFP_ATOMIC); |
827 | bheap_init(&(cedf[i].cpu_heap)); | 843 | bheap_init(&(cedf[i].cpu_heap)); |
828 | edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs); | 844 | edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs); |
829 | 845 | ||
830 | if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_KERNEL)) | 846 | if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC)) |
831 | return -ENOMEM; | 847 | return -ENOMEM; |
832 | } | 848 | } |
833 | 849 | ||
834 | /* cycle through cluster and add cpus to them */ | 850 | /* cycle through cluster and add cpus to them */ |
835 | for (i = 0; i < num_clusters; i++) { | 851 | for (i = 0; i < num_clusters; i++) { |
836 | 852 | ||
837 | printk(KERN_INFO "cluster %d\n", i); | ||
838 | |||
839 | for_each_online_cpu(cpu) { | 853 | for_each_online_cpu(cpu) { |
840 | /* check if the cpu is already in a cluster */ | 854 | /* check if the cpu is already in a cluster */ |
841 | for (j = 0; j < num_clusters; j++) | 855 | for (j = 0; j < num_clusters; j++) |
@@ -846,20 +860,15 @@ static int __init init_cedf(void) | |||
846 | continue; | 860 | continue; |
847 | 861 | ||
848 | /* this cpu isn't in any cluster */ | 862 | /* this cpu isn't in any cluster */ |
849 | printk(KERN_INFO "cpu = %d\n", cpu); | ||
850 | |||
851 | /* get the shared cpus */ | 863 | /* get the shared cpus */ |
852 | get_shared_cpu_map(mask, cpu, cluster_cache_index); | 864 | get_shared_cpu_map(mask, cpu, cluster_cache_index); |
853 | cpumask_copy(cedf[i].cpu_map, mask); | 865 | cpumask_copy(cedf[i].cpu_map, mask); |
854 | 866 | #ifdef VERBOSE_INIT | |
855 | chk = cpulist_scnprintf(buf, 254, mask); | 867 | print_cluster_topology(mask, cpu); |
856 | buf[chk] = '\0'; | 868 | #endif |
857 | printk(KERN_INFO "CPU = %d, shared cpu(s) = %s\n", cpu, buf); | ||
858 | |||
859 | /* add cpus to current cluster and init cpu_entry_t */ | 869 | /* add cpus to current cluster and init cpu_entry_t */ |
860 | cpu_count = 0; | 870 | cpu_count = 0; |
861 | for_each_cpu(ccpu, cedf[i].cpu_map) { | 871 | for_each_cpu(ccpu, cedf[i].cpu_map) { |
862 | printk(KERN_INFO "cCPU = %d\n", ccpu); | ||
863 | 872 | ||
864 | entry = &per_cpu(cedf_cpu_entries, ccpu); | 873 | entry = &per_cpu(cedf_cpu_entries, ccpu); |
865 | cedf[i].cpus[cpu_count] = entry; | 874 | cedf[i].cpus[cpu_count] = entry; |
@@ -881,19 +890,40 @@ static int __init init_cedf(void) | |||
881 | } | 890 | } |
882 | 891 | ||
883 | free_cpumask_var(mask); | 892 | free_cpumask_var(mask); |
893 | return 0; | ||
894 | } | ||
895 | |||
896 | /* Plugin object */ | ||
897 | static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { | ||
898 | .plugin_name = "C-EDF", | ||
899 | .finish_switch = cedf_finish_switch, | ||
900 | .tick = cedf_tick, | ||
901 | .task_new = cedf_task_new, | ||
902 | .complete_job = complete_job, | ||
903 | .task_exit = cedf_task_exit, | ||
904 | .schedule = cedf_schedule, | ||
905 | .task_wake_up = cedf_task_wake_up, | ||
906 | .task_block = cedf_task_block, | ||
907 | #ifdef CONFIG_FMLP | ||
908 | .fmlp_active = 1, | ||
909 | .pi_block = cedf_pi_block, | ||
910 | .inherit_priority = cedf_inherit_priority, | ||
911 | .return_priority = cedf_return_priority, | ||
912 | #endif | ||
913 | .admit_task = cedf_admit_task, | ||
914 | .activate_plugin = cedf_activate_plugin, | ||
915 | .deactivate_plugin = cedf_deactivate_plugin, | ||
916 | }; | ||
917 | |||
918 | |||
919 | static int __init init_cedf(void) | ||
920 | { | ||
884 | return register_sched_plugin(&cedf_plugin); | 921 | return register_sched_plugin(&cedf_plugin); |
885 | } | 922 | } |
886 | 923 | ||
887 | static void clean_cedf(void) | 924 | static void clean_cedf(void) |
888 | { | 925 | { |
889 | int i; | 926 | cleanup_cedf(); |
890 | for (i = 0; i < num_clusters; i++) { | ||
891 | kfree(cedf[i].cpus); | ||
892 | kfree(cedf[i].heap_node); | ||
893 | free_cpumask_var(cedf[i].cpu_map); | ||
894 | } | ||
895 | |||
896 | kfree(cedf); | ||
897 | } | 927 | } |
898 | 928 | ||
899 | module_init(init_cedf); | 929 | module_init(init_cedf); |