diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2011-12-27 05:27:09 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-12-27 05:27:10 -0500 |
commit | 83a24e32908476c33ea9abc132c73020e2cd3620 (patch) | |
tree | cd1857a48ce2995dc5d2ab6c0950622120137bb4 /arch | |
parent | 3931723f36165e137c67b8c62346024a6c4f223d (diff) |
[S390] topology: get rid of ifdefs
Remove all ifdefs from topology code and also only compile it for the
CONFIG_SCHED_BOOK case. The new code selects SCHED_MC if SCHED_BOOK is
selected. SCHED_MC without SCHED_BOOK is not possible anymore.
Furthermore various sysfs attributes are not available anymore for the
!SCHED_BOOK case. In particular all attributes that correspond to
CPU polarization.
But since all real world kernels have SCHED_BOOK selected anyway this
doesn't matter too much.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/Kconfig | 11 | ||||
-rw-r--r-- | arch/s390/include/asm/smp.h | 1 | ||||
-rw-r--r-- | arch/s390/include/asm/topology.h | 38 | ||||
-rw-r--r-- | arch/s390/kernel/Makefile | 3 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 113 | ||||
-rw-r--r-- | arch/s390/kernel/topology.c | 172 |
6 files changed, 176 insertions, 162 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 373679b3744a..6b35b41a09ca 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -191,18 +191,13 @@ config HOTPLUG_CPU | |||
191 | Say N if you want to disable CPU hotplug. | 191 | Say N if you want to disable CPU hotplug. |
192 | 192 | ||
193 | config SCHED_MC | 193 | config SCHED_MC |
194 | def_bool y | 194 | def_bool n |
195 | prompt "Multi-core scheduler support" | ||
196 | depends on SMP | ||
197 | help | ||
198 | Multi-core scheduler support improves the CPU scheduler's decision | ||
199 | making when dealing with multi-core CPU chips at a cost of slightly | ||
200 | increased overhead in some places. | ||
201 | 195 | ||
202 | config SCHED_BOOK | 196 | config SCHED_BOOK |
203 | def_bool y | 197 | def_bool y |
204 | prompt "Book scheduler support" | 198 | prompt "Book scheduler support" |
205 | depends on SMP && SCHED_MC | 199 | depends on SMP |
200 | select SCHED_MC | ||
206 | help | 201 | help |
207 | Book scheduler support improves the CPU scheduler's decision making | 202 | Book scheduler support improves the CPU scheduler's decision making |
208 | when dealing with machines that have several books. | 203 | when dealing with machines that have several books. |
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index ab47a69fdf07..c32e9123b40c 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h | |||
@@ -23,7 +23,6 @@ extern void __cpu_die (unsigned int cpu); | |||
23 | extern int __cpu_up (unsigned int cpu); | 23 | extern int __cpu_up (unsigned int cpu); |
24 | 24 | ||
25 | extern struct mutex smp_cpu_state_mutex; | 25 | extern struct mutex smp_cpu_state_mutex; |
26 | extern int smp_cpu_polarization[]; | ||
27 | 26 | ||
28 | extern void arch_send_call_function_single_ipi(int cpu); | 27 | extern void arch_send_call_function_single_ipi(int cpu); |
29 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | 28 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 005d77d8ae2a..7016dd7b6bc4 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h | |||
@@ -4,6 +4,10 @@ | |||
4 | #include <linux/cpumask.h> | 4 | #include <linux/cpumask.h> |
5 | #include <asm/sysinfo.h> | 5 | #include <asm/sysinfo.h> |
6 | 6 | ||
7 | struct cpu; | ||
8 | |||
9 | #ifdef CONFIG_SCHED_BOOK | ||
10 | |||
7 | extern unsigned char cpu_core_id[NR_CPUS]; | 11 | extern unsigned char cpu_core_id[NR_CPUS]; |
8 | extern cpumask_t cpu_core_map[NR_CPUS]; | 12 | extern cpumask_t cpu_core_map[NR_CPUS]; |
9 | 13 | ||
@@ -16,8 +20,6 @@ static inline const struct cpumask *cpu_coregroup_mask(int cpu) | |||
16 | #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) | 20 | #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) |
17 | #define mc_capable() (1) | 21 | #define mc_capable() (1) |
18 | 22 | ||
19 | #ifdef CONFIG_SCHED_BOOK | ||
20 | |||
21 | extern unsigned char cpu_book_id[NR_CPUS]; | 23 | extern unsigned char cpu_book_id[NR_CPUS]; |
22 | extern cpumask_t cpu_book_map[NR_CPUS]; | 24 | extern cpumask_t cpu_book_map[NR_CPUS]; |
23 | 25 | ||
@@ -29,19 +31,43 @@ static inline const struct cpumask *cpu_book_mask(int cpu) | |||
29 | #define topology_book_id(cpu) (cpu_book_id[cpu]) | 31 | #define topology_book_id(cpu) (cpu_book_id[cpu]) |
30 | #define topology_book_cpumask(cpu) (&cpu_book_map[cpu]) | 32 | #define topology_book_cpumask(cpu) (&cpu_book_map[cpu]) |
31 | 33 | ||
32 | #endif /* CONFIG_SCHED_BOOK */ | 34 | int topology_cpu_init(struct cpu *); |
33 | |||
34 | int topology_set_cpu_management(int fc); | 35 | int topology_set_cpu_management(int fc); |
35 | void topology_schedule_update(void); | 36 | void topology_schedule_update(void); |
36 | void store_topology(struct sysinfo_15_1_x *info); | 37 | void store_topology(struct sysinfo_15_1_x *info); |
37 | 38 | ||
38 | #define POLARIZATION_UNKNWN (-1) | 39 | #else /* CONFIG_SCHED_BOOK */ |
40 | |||
41 | static inline void topology_schedule_update(void) { } | ||
42 | static inline int topology_cpu_init(struct cpu *cpu) { return 0; } | ||
43 | |||
44 | #endif /* CONFIG_SCHED_BOOK */ | ||
45 | |||
46 | #define POLARIZATION_UNKNOWN (-1) | ||
39 | #define POLARIZATION_HRZ (0) | 47 | #define POLARIZATION_HRZ (0) |
40 | #define POLARIZATION_VL (1) | 48 | #define POLARIZATION_VL (1) |
41 | #define POLARIZATION_VM (2) | 49 | #define POLARIZATION_VM (2) |
42 | #define POLARIZATION_VH (3) | 50 | #define POLARIZATION_VH (3) |
43 | 51 | ||
44 | #ifdef CONFIG_SMP | 52 | extern int cpu_polarization[]; |
53 | |||
54 | static inline void cpu_set_polarization(int cpu, int val) | ||
55 | { | ||
56 | #ifdef CONFIG_SCHED_BOOK | ||
57 | cpu_polarization[cpu] = val; | ||
58 | #endif | ||
59 | } | ||
60 | |||
61 | static inline int cpu_read_polarization(int cpu) | ||
62 | { | ||
63 | #ifdef CONFIG_SCHED_BOOK | ||
64 | return cpu_polarization[cpu]; | ||
65 | #else | ||
66 | return POLARIZATION_HRZ; | ||
67 | #endif | ||
68 | } | ||
69 | |||
70 | #ifdef CONFIG_SCHED_BOOK | ||
45 | void s390_init_cpu_topology(void); | 71 | void s390_init_cpu_topology(void); |
46 | #else | 72 | #else |
47 | static inline void s390_init_cpu_topology(void) | 73 | static inline void s390_init_cpu_topology(void) |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index dd4f07640919..7d9ec924e7e7 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -32,7 +32,8 @@ extra-y += head.o init_task.o vmlinux.lds | |||
32 | extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o) | 32 | extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o) |
33 | 33 | ||
34 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o | 34 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o |
35 | obj-$(CONFIG_SMP) += smp.o topology.o | 35 | obj-$(CONFIG_SMP) += smp.o |
36 | obj-$(CONFIG_SCHED_BOOK) += topology.o | ||
36 | obj-$(CONFIG_SMP) += $(if $(CONFIG_64BIT),switch_cpu64.o, \ | 37 | obj-$(CONFIG_SMP) += $(if $(CONFIG_64BIT),switch_cpu64.o, \ |
37 | switch_cpu.o) | 38 | switch_cpu.o) |
38 | obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o | 39 | obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 14d5211a185d..9cf01e455e50 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -69,9 +69,7 @@ enum s390_cpu_state { | |||
69 | }; | 69 | }; |
70 | 70 | ||
71 | DEFINE_MUTEX(smp_cpu_state_mutex); | 71 | DEFINE_MUTEX(smp_cpu_state_mutex); |
72 | int smp_cpu_polarization[NR_CPUS]; | ||
73 | static int smp_cpu_state[NR_CPUS]; | 72 | static int smp_cpu_state[NR_CPUS]; |
74 | static int cpu_management; | ||
75 | 73 | ||
76 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | 74 | static DEFINE_PER_CPU(struct cpu, cpu_devices); |
77 | 75 | ||
@@ -369,7 +367,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail) | |||
369 | if (cpu_known(cpu_id)) | 367 | if (cpu_known(cpu_id)) |
370 | continue; | 368 | continue; |
371 | __cpu_logical_map[logical_cpu] = cpu_id; | 369 | __cpu_logical_map[logical_cpu] = cpu_id; |
372 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; | 370 | cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN); |
373 | if (!cpu_stopped(logical_cpu)) | 371 | if (!cpu_stopped(logical_cpu)) |
374 | continue; | 372 | continue; |
375 | set_cpu_present(logical_cpu, true); | 373 | set_cpu_present(logical_cpu, true); |
@@ -403,7 +401,7 @@ static int smp_rescan_cpus_sclp(cpumask_t avail) | |||
403 | if (cpu_known(cpu_id)) | 401 | if (cpu_known(cpu_id)) |
404 | continue; | 402 | continue; |
405 | __cpu_logical_map[logical_cpu] = cpu_id; | 403 | __cpu_logical_map[logical_cpu] = cpu_id; |
406 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; | 404 | cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN); |
407 | set_cpu_present(logical_cpu, true); | 405 | set_cpu_present(logical_cpu, true); |
408 | if (cpu >= info->configured) | 406 | if (cpu >= info->configured) |
409 | smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; | 407 | smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; |
@@ -806,7 +804,7 @@ void __init smp_prepare_boot_cpu(void) | |||
806 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; | 804 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; |
807 | current_set[0] = current; | 805 | current_set[0] = current; |
808 | smp_cpu_state[0] = CPU_STATE_CONFIGURED; | 806 | smp_cpu_state[0] = CPU_STATE_CONFIGURED; |
809 | smp_cpu_polarization[0] = POLARIZATION_UNKNWN; | 807 | cpu_set_polarization(0, POLARIZATION_UNKNOWN); |
810 | } | 808 | } |
811 | 809 | ||
812 | void __init smp_cpus_done(unsigned int max_cpus) | 810 | void __init smp_cpus_done(unsigned int max_cpus) |
@@ -868,7 +866,7 @@ static ssize_t cpu_configure_store(struct sys_device *dev, | |||
868 | rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); | 866 | rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); |
869 | if (!rc) { | 867 | if (!rc) { |
870 | smp_cpu_state[cpu] = CPU_STATE_STANDBY; | 868 | smp_cpu_state[cpu] = CPU_STATE_STANDBY; |
871 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | 869 | cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); |
872 | } | 870 | } |
873 | } | 871 | } |
874 | break; | 872 | break; |
@@ -877,7 +875,7 @@ static ssize_t cpu_configure_store(struct sys_device *dev, | |||
877 | rc = sclp_cpu_configure(__cpu_logical_map[cpu]); | 875 | rc = sclp_cpu_configure(__cpu_logical_map[cpu]); |
878 | if (!rc) { | 876 | if (!rc) { |
879 | smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; | 877 | smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; |
880 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | 878 | cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); |
881 | } | 879 | } |
882 | } | 880 | } |
883 | break; | 881 | break; |
@@ -892,35 +890,6 @@ out: | |||
892 | static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); | 890 | static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); |
893 | #endif /* CONFIG_HOTPLUG_CPU */ | 891 | #endif /* CONFIG_HOTPLUG_CPU */ |
894 | 892 | ||
895 | static ssize_t cpu_polarization_show(struct sys_device *dev, | ||
896 | struct sysdev_attribute *attr, char *buf) | ||
897 | { | ||
898 | int cpu = dev->id; | ||
899 | ssize_t count; | ||
900 | |||
901 | mutex_lock(&smp_cpu_state_mutex); | ||
902 | switch (smp_cpu_polarization[cpu]) { | ||
903 | case POLARIZATION_HRZ: | ||
904 | count = sprintf(buf, "horizontal\n"); | ||
905 | break; | ||
906 | case POLARIZATION_VL: | ||
907 | count = sprintf(buf, "vertical:low\n"); | ||
908 | break; | ||
909 | case POLARIZATION_VM: | ||
910 | count = sprintf(buf, "vertical:medium\n"); | ||
911 | break; | ||
912 | case POLARIZATION_VH: | ||
913 | count = sprintf(buf, "vertical:high\n"); | ||
914 | break; | ||
915 | default: | ||
916 | count = sprintf(buf, "unknown\n"); | ||
917 | break; | ||
918 | } | ||
919 | mutex_unlock(&smp_cpu_state_mutex); | ||
920 | return count; | ||
921 | } | ||
922 | static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL); | ||
923 | |||
924 | static ssize_t show_cpu_address(struct sys_device *dev, | 893 | static ssize_t show_cpu_address(struct sys_device *dev, |
925 | struct sysdev_attribute *attr, char *buf) | 894 | struct sysdev_attribute *attr, char *buf) |
926 | { | 895 | { |
@@ -928,13 +897,11 @@ static ssize_t show_cpu_address(struct sys_device *dev, | |||
928 | } | 897 | } |
929 | static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL); | 898 | static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL); |
930 | 899 | ||
931 | |||
932 | static struct attribute *cpu_common_attrs[] = { | 900 | static struct attribute *cpu_common_attrs[] = { |
933 | #ifdef CONFIG_HOTPLUG_CPU | 901 | #ifdef CONFIG_HOTPLUG_CPU |
934 | &attr_configure.attr, | 902 | &attr_configure.attr, |
935 | #endif | 903 | #endif |
936 | &attr_address.attr, | 904 | &attr_address.attr, |
937 | &attr_polarization.attr, | ||
938 | NULL, | 905 | NULL, |
939 | }; | 906 | }; |
940 | 907 | ||
@@ -1055,11 +1022,20 @@ static int __devinit smp_add_present_cpu(int cpu) | |||
1055 | rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group); | 1022 | rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group); |
1056 | if (rc) | 1023 | if (rc) |
1057 | goto out_cpu; | 1024 | goto out_cpu; |
1058 | if (!cpu_online(cpu)) | 1025 | if (cpu_online(cpu)) { |
1059 | goto out; | 1026 | rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group); |
1060 | rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group); | 1027 | if (rc) |
1061 | if (!rc) | 1028 | goto out_online; |
1062 | return 0; | 1029 | } |
1030 | rc = topology_cpu_init(c); | ||
1031 | if (rc) | ||
1032 | goto out_topology; | ||
1033 | return 0; | ||
1034 | |||
1035 | out_topology: | ||
1036 | if (cpu_online(cpu)) | ||
1037 | sysfs_remove_group(&s->kobj, &cpu_online_attr_group); | ||
1038 | out_online: | ||
1063 | sysfs_remove_group(&s->kobj, &cpu_common_attr_group); | 1039 | sysfs_remove_group(&s->kobj, &cpu_common_attr_group); |
1064 | out_cpu: | 1040 | out_cpu: |
1065 | #ifdef CONFIG_HOTPLUG_CPU | 1041 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -1111,61 +1087,16 @@ static ssize_t __ref rescan_store(struct sysdev_class *class, | |||
1111 | static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store); | 1087 | static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store); |
1112 | #endif /* CONFIG_HOTPLUG_CPU */ | 1088 | #endif /* CONFIG_HOTPLUG_CPU */ |
1113 | 1089 | ||
1114 | static ssize_t dispatching_show(struct sysdev_class *class, | 1090 | static int __init s390_smp_init(void) |
1115 | struct sysdev_class_attribute *attr, | ||
1116 | char *buf) | ||
1117 | { | 1091 | { |
1118 | ssize_t count; | 1092 | int cpu, rc; |
1119 | |||
1120 | mutex_lock(&smp_cpu_state_mutex); | ||
1121 | count = sprintf(buf, "%d\n", cpu_management); | ||
1122 | mutex_unlock(&smp_cpu_state_mutex); | ||
1123 | return count; | ||
1124 | } | ||
1125 | |||
1126 | static ssize_t dispatching_store(struct sysdev_class *dev, | ||
1127 | struct sysdev_class_attribute *attr, | ||
1128 | const char *buf, | ||
1129 | size_t count) | ||
1130 | { | ||
1131 | int val, rc; | ||
1132 | char delim; | ||
1133 | |||
1134 | if (sscanf(buf, "%d %c", &val, &delim) != 1) | ||
1135 | return -EINVAL; | ||
1136 | if (val != 0 && val != 1) | ||
1137 | return -EINVAL; | ||
1138 | rc = 0; | ||
1139 | get_online_cpus(); | ||
1140 | mutex_lock(&smp_cpu_state_mutex); | ||
1141 | if (cpu_management == val) | ||
1142 | goto out; | ||
1143 | rc = topology_set_cpu_management(val); | ||
1144 | if (!rc) | ||
1145 | cpu_management = val; | ||
1146 | out: | ||
1147 | mutex_unlock(&smp_cpu_state_mutex); | ||
1148 | put_online_cpus(); | ||
1149 | return rc ? rc : count; | ||
1150 | } | ||
1151 | static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show, | ||
1152 | dispatching_store); | ||
1153 | |||
1154 | static int __init topology_init(void) | ||
1155 | { | ||
1156 | int cpu; | ||
1157 | int rc; | ||
1158 | 1093 | ||
1159 | register_cpu_notifier(&smp_cpu_nb); | 1094 | register_cpu_notifier(&smp_cpu_nb); |
1160 | |||
1161 | #ifdef CONFIG_HOTPLUG_CPU | 1095 | #ifdef CONFIG_HOTPLUG_CPU |
1162 | rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan); | 1096 | rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan); |
1163 | if (rc) | 1097 | if (rc) |
1164 | return rc; | 1098 | return rc; |
1165 | #endif | 1099 | #endif |
1166 | rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching); | ||
1167 | if (rc) | ||
1168 | return rc; | ||
1169 | for_each_present_cpu(cpu) { | 1100 | for_each_present_cpu(cpu) { |
1170 | rc = smp_add_present_cpu(cpu); | 1101 | rc = smp_add_present_cpu(cpu); |
1171 | if (rc) | 1102 | if (rc) |
@@ -1173,4 +1104,4 @@ static int __init topology_init(void) | |||
1173 | } | 1104 | } |
1174 | return 0; | 1105 | return 0; |
1175 | } | 1106 | } |
1176 | subsys_initcall(topology_init); | 1107 | subsys_initcall(s390_smp_init); |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index fdb5b8cb260f..621f89e36c8a 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -6,17 +6,17 @@ | |||
6 | #define KMSG_COMPONENT "cpu" | 6 | #define KMSG_COMPONENT "cpu" |
7 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 7 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
8 | 8 | ||
9 | #include <linux/kernel.h> | 9 | #include <linux/workqueue.h> |
10 | #include <linux/mm.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/device.h> | ||
13 | #include <linux/bootmem.h> | 10 | #include <linux/bootmem.h> |
11 | #include <linux/cpuset.h> | ||
12 | #include <linux/device.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/workqueue.h> | 15 | #include <linux/init.h> |
16 | #include <linux/delay.h> | ||
16 | #include <linux/cpu.h> | 17 | #include <linux/cpu.h> |
17 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
18 | #include <linux/cpuset.h> | 19 | #include <linux/mm.h> |
19 | #include <asm/delay.h> | ||
20 | 20 | ||
21 | #define PTF_HORIZONTAL (0UL) | 21 | #define PTF_HORIZONTAL (0UL) |
22 | #define PTF_VERTICAL (1UL) | 22 | #define PTF_VERTICAL (1UL) |
@@ -41,11 +41,12 @@ static struct mask_info core_info; | |||
41 | cpumask_t cpu_core_map[NR_CPUS]; | 41 | cpumask_t cpu_core_map[NR_CPUS]; |
42 | unsigned char cpu_core_id[NR_CPUS]; | 42 | unsigned char cpu_core_id[NR_CPUS]; |
43 | 43 | ||
44 | #ifdef CONFIG_SCHED_BOOK | ||
45 | static struct mask_info book_info; | 44 | static struct mask_info book_info; |
46 | cpumask_t cpu_book_map[NR_CPUS]; | 45 | cpumask_t cpu_book_map[NR_CPUS]; |
47 | unsigned char cpu_book_id[NR_CPUS]; | 46 | unsigned char cpu_book_id[NR_CPUS]; |
48 | #endif | 47 | |
48 | /* smp_cpu_state_mutex must be held when accessing this array */ | ||
49 | int cpu_polarization[NR_CPUS]; | ||
49 | 50 | ||
50 | static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) | 51 | static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) |
51 | { | 52 | { |
@@ -85,10 +86,8 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, | |||
85 | for_each_present_cpu(lcpu) { | 86 | for_each_present_cpu(lcpu) { |
86 | if (cpu_logical_map(lcpu) != rcpu) | 87 | if (cpu_logical_map(lcpu) != rcpu) |
87 | continue; | 88 | continue; |
88 | #ifdef CONFIG_SCHED_BOOK | ||
89 | cpumask_set_cpu(lcpu, &book->mask); | 89 | cpumask_set_cpu(lcpu, &book->mask); |
90 | cpu_book_id[lcpu] = book->id; | 90 | cpu_book_id[lcpu] = book->id; |
91 | #endif | ||
92 | cpumask_set_cpu(lcpu, &core->mask); | 91 | cpumask_set_cpu(lcpu, &core->mask); |
93 | if (z10) { | 92 | if (z10) { |
94 | cpu_core_id[lcpu] = rcpu; | 93 | cpu_core_id[lcpu] = rcpu; |
@@ -96,7 +95,7 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, | |||
96 | } else { | 95 | } else { |
97 | cpu_core_id[lcpu] = core->id; | 96 | cpu_core_id[lcpu] = core->id; |
98 | } | 97 | } |
99 | smp_cpu_polarization[lcpu] = tl_cpu->pp; | 98 | cpu_set_polarization(lcpu, tl_cpu->pp); |
100 | } | 99 | } |
101 | } | 100 | } |
102 | return core; | 101 | return core; |
@@ -111,13 +110,11 @@ static void clear_masks(void) | |||
111 | cpumask_clear(&info->mask); | 110 | cpumask_clear(&info->mask); |
112 | info = info->next; | 111 | info = info->next; |
113 | } | 112 | } |
114 | #ifdef CONFIG_SCHED_BOOK | ||
115 | info = &book_info; | 113 | info = &book_info; |
116 | while (info) { | 114 | while (info) { |
117 | cpumask_clear(&info->mask); | 115 | cpumask_clear(&info->mask); |
118 | info = info->next; | 116 | info = info->next; |
119 | } | 117 | } |
120 | #endif | ||
121 | } | 118 | } |
122 | 119 | ||
123 | static union topology_entry *next_tle(union topology_entry *tle) | 120 | static union topology_entry *next_tle(union topology_entry *tle) |
@@ -129,26 +126,19 @@ static union topology_entry *next_tle(union topology_entry *tle) | |||
129 | 126 | ||
130 | static void tl_to_cores(struct sysinfo_15_1_x *info) | 127 | static void tl_to_cores(struct sysinfo_15_1_x *info) |
131 | { | 128 | { |
132 | #ifdef CONFIG_SCHED_BOOK | ||
133 | struct mask_info *book = &book_info; | ||
134 | struct cpuid cpu_id; | ||
135 | #else | ||
136 | struct mask_info *book = NULL; | ||
137 | #endif | ||
138 | struct mask_info *core = &core_info; | 129 | struct mask_info *core = &core_info; |
130 | struct mask_info *book = &book_info; | ||
139 | union topology_entry *tle, *end; | 131 | union topology_entry *tle, *end; |
132 | struct cpuid cpu_id; | ||
140 | int z10 = 0; | 133 | int z10 = 0; |
141 | 134 | ||
142 | #ifdef CONFIG_SCHED_BOOK | ||
143 | get_cpu_id(&cpu_id); | 135 | get_cpu_id(&cpu_id); |
144 | z10 = cpu_id.machine == 0x2097 || cpu_id.machine == 0x2098; | 136 | z10 = cpu_id.machine == 0x2097 || cpu_id.machine == 0x2098; |
145 | #endif | ||
146 | spin_lock_irq(&topology_lock); | 137 | spin_lock_irq(&topology_lock); |
147 | clear_masks(); | 138 | clear_masks(); |
148 | tle = info->tle; | 139 | tle = info->tle; |
149 | end = (union topology_entry *)((unsigned long)info + info->length); | 140 | end = (union topology_entry *)((unsigned long)info + info->length); |
150 | while (tle < end) { | 141 | while (tle < end) { |
151 | #ifdef CONFIG_SCHED_BOOK | ||
152 | if (z10) { | 142 | if (z10) { |
153 | switch (tle->nl) { | 143 | switch (tle->nl) { |
154 | case 1: | 144 | case 1: |
@@ -165,14 +155,11 @@ static void tl_to_cores(struct sysinfo_15_1_x *info) | |||
165 | tle = next_tle(tle); | 155 | tle = next_tle(tle); |
166 | continue; | 156 | continue; |
167 | } | 157 | } |
168 | #endif | ||
169 | switch (tle->nl) { | 158 | switch (tle->nl) { |
170 | #ifdef CONFIG_SCHED_BOOK | ||
171 | case 2: | 159 | case 2: |
172 | book = book->next; | 160 | book = book->next; |
173 | book->id = tle->container.id; | 161 | book->id = tle->container.id; |
174 | break; | 162 | break; |
175 | #endif | ||
176 | case 1: | 163 | case 1: |
177 | core = core->next; | 164 | core = core->next; |
178 | core->id = tle->container.id; | 165 | core->id = tle->container.id; |
@@ -196,7 +183,7 @@ static void topology_update_polarization_simple(void) | |||
196 | 183 | ||
197 | mutex_lock(&smp_cpu_state_mutex); | 184 | mutex_lock(&smp_cpu_state_mutex); |
198 | for_each_possible_cpu(cpu) | 185 | for_each_possible_cpu(cpu) |
199 | smp_cpu_polarization[cpu] = POLARIZATION_HRZ; | 186 | cpu_set_polarization(cpu, POLARIZATION_HRZ); |
200 | mutex_unlock(&smp_cpu_state_mutex); | 187 | mutex_unlock(&smp_cpu_state_mutex); |
201 | } | 188 | } |
202 | 189 | ||
@@ -215,8 +202,7 @@ static int ptf(unsigned long fc) | |||
215 | 202 | ||
216 | int topology_set_cpu_management(int fc) | 203 | int topology_set_cpu_management(int fc) |
217 | { | 204 | { |
218 | int cpu; | 205 | int cpu, rc; |
219 | int rc; | ||
220 | 206 | ||
221 | if (!MACHINE_HAS_TOPOLOGY) | 207 | if (!MACHINE_HAS_TOPOLOGY) |
222 | return -EOPNOTSUPP; | 208 | return -EOPNOTSUPP; |
@@ -227,7 +213,7 @@ int topology_set_cpu_management(int fc) | |||
227 | if (rc) | 213 | if (rc) |
228 | return -EBUSY; | 214 | return -EBUSY; |
229 | for_each_possible_cpu(cpu) | 215 | for_each_possible_cpu(cpu) |
230 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | 216 | cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); |
231 | return rc; | 217 | return rc; |
232 | } | 218 | } |
233 | 219 | ||
@@ -239,22 +225,18 @@ static void update_cpu_core_map(void) | |||
239 | spin_lock_irqsave(&topology_lock, flags); | 225 | spin_lock_irqsave(&topology_lock, flags); |
240 | for_each_possible_cpu(cpu) { | 226 | for_each_possible_cpu(cpu) { |
241 | cpu_core_map[cpu] = cpu_group_map(&core_info, cpu); | 227 | cpu_core_map[cpu] = cpu_group_map(&core_info, cpu); |
242 | #ifdef CONFIG_SCHED_BOOK | ||
243 | cpu_book_map[cpu] = cpu_group_map(&book_info, cpu); | 228 | cpu_book_map[cpu] = cpu_group_map(&book_info, cpu); |
244 | #endif | ||
245 | } | 229 | } |
246 | spin_unlock_irqrestore(&topology_lock, flags); | 230 | spin_unlock_irqrestore(&topology_lock, flags); |
247 | } | 231 | } |
248 | 232 | ||
249 | void store_topology(struct sysinfo_15_1_x *info) | 233 | void store_topology(struct sysinfo_15_1_x *info) |
250 | { | 234 | { |
251 | #ifdef CONFIG_SCHED_BOOK | ||
252 | int rc; | 235 | int rc; |
253 | 236 | ||
254 | rc = stsi(info, 15, 1, 3); | 237 | rc = stsi(info, 15, 1, 3); |
255 | if (rc != -ENOSYS) | 238 | if (rc != -ENOSYS) |
256 | return; | 239 | return; |
257 | #endif | ||
258 | stsi(info, 15, 1, 2); | 240 | stsi(info, 15, 1, 2); |
259 | } | 241 | } |
260 | 242 | ||
@@ -313,23 +295,6 @@ static int __init early_parse_topology(char *p) | |||
313 | } | 295 | } |
314 | early_param("topology", early_parse_topology); | 296 | early_param("topology", early_parse_topology); |
315 | 297 | ||
316 | static int __init init_topology_update(void) | ||
317 | { | ||
318 | int rc; | ||
319 | |||
320 | rc = 0; | ||
321 | if (!MACHINE_HAS_TOPOLOGY) { | ||
322 | topology_update_polarization_simple(); | ||
323 | goto out; | ||
324 | } | ||
325 | init_timer_deferrable(&topology_timer); | ||
326 | set_topology_timer(); | ||
327 | out: | ||
328 | update_cpu_core_map(); | ||
329 | return rc; | ||
330 | } | ||
331 | __initcall(init_topology_update); | ||
332 | |||
333 | static void __init alloc_masks(struct sysinfo_15_1_x *info, | 298 | static void __init alloc_masks(struct sysinfo_15_1_x *info, |
334 | struct mask_info *mask, int offset) | 299 | struct mask_info *mask, int offset) |
335 | { | 300 | { |
@@ -357,10 +322,107 @@ void __init s390_init_cpu_topology(void) | |||
357 | store_topology(info); | 322 | store_topology(info); |
358 | pr_info("The CPU configuration topology of the machine is:"); | 323 | pr_info("The CPU configuration topology of the machine is:"); |
359 | for (i = 0; i < TOPOLOGY_NR_MAG; i++) | 324 | for (i = 0; i < TOPOLOGY_NR_MAG; i++) |
360 | printk(" %d", info->mag[i]); | 325 | printk(KERN_CONT " %d", info->mag[i]); |
361 | printk(" / %d\n", info->mnest); | 326 | printk(KERN_CONT " / %d\n", info->mnest); |
362 | alloc_masks(info, &core_info, 1); | 327 | alloc_masks(info, &core_info, 1); |
363 | #ifdef CONFIG_SCHED_BOOK | ||
364 | alloc_masks(info, &book_info, 2); | 328 | alloc_masks(info, &book_info, 2); |
365 | #endif | ||
366 | } | 329 | } |
330 | |||
331 | static int cpu_management; | ||
332 | |||
333 | static ssize_t dispatching_show(struct sysdev_class *class, | ||
334 | struct sysdev_class_attribute *attr, | ||
335 | char *buf) | ||
336 | { | ||
337 | ssize_t count; | ||
338 | |||
339 | mutex_lock(&smp_cpu_state_mutex); | ||
340 | count = sprintf(buf, "%d\n", cpu_management); | ||
341 | mutex_unlock(&smp_cpu_state_mutex); | ||
342 | return count; | ||
343 | } | ||
344 | |||
345 | static ssize_t dispatching_store(struct sysdev_class *dev, | ||
346 | struct sysdev_class_attribute *attr, | ||
347 | const char *buf, | ||
348 | size_t count) | ||
349 | { | ||
350 | int val, rc; | ||
351 | char delim; | ||
352 | |||
353 | if (sscanf(buf, "%d %c", &val, &delim) != 1) | ||
354 | return -EINVAL; | ||
355 | if (val != 0 && val != 1) | ||
356 | return -EINVAL; | ||
357 | rc = 0; | ||
358 | get_online_cpus(); | ||
359 | mutex_lock(&smp_cpu_state_mutex); | ||
360 | if (cpu_management == val) | ||
361 | goto out; | ||
362 | rc = topology_set_cpu_management(val); | ||
363 | if (!rc) | ||
364 | cpu_management = val; | ||
365 | out: | ||
366 | mutex_unlock(&smp_cpu_state_mutex); | ||
367 | put_online_cpus(); | ||
368 | return rc ? rc : count; | ||
369 | } | ||
370 | static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show, | ||
371 | dispatching_store); | ||
372 | |||
373 | static ssize_t cpu_polarization_show(struct sys_device *dev, | ||
374 | struct sysdev_attribute *attr, char *buf) | ||
375 | { | ||
376 | int cpu = dev->id; | ||
377 | ssize_t count; | ||
378 | |||
379 | mutex_lock(&smp_cpu_state_mutex); | ||
380 | switch (cpu_read_polarization(cpu)) { | ||
381 | case POLARIZATION_HRZ: | ||
382 | count = sprintf(buf, "horizontal\n"); | ||
383 | break; | ||
384 | case POLARIZATION_VL: | ||
385 | count = sprintf(buf, "vertical:low\n"); | ||
386 | break; | ||
387 | case POLARIZATION_VM: | ||
388 | count = sprintf(buf, "vertical:medium\n"); | ||
389 | break; | ||
390 | case POLARIZATION_VH: | ||
391 | count = sprintf(buf, "vertical:high\n"); | ||
392 | break; | ||
393 | default: | ||
394 | count = sprintf(buf, "unknown\n"); | ||
395 | break; | ||
396 | } | ||
397 | mutex_unlock(&smp_cpu_state_mutex); | ||
398 | return count; | ||
399 | } | ||
400 | static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL); | ||
401 | |||
402 | static struct attribute *topology_cpu_attrs[] = { | ||
403 | &attr_polarization.attr, | ||
404 | NULL, | ||
405 | }; | ||
406 | |||
407 | static struct attribute_group topology_cpu_attr_group = { | ||
408 | .attrs = topology_cpu_attrs, | ||
409 | }; | ||
410 | |||
411 | int topology_cpu_init(struct cpu *cpu) | ||
412 | { | ||
413 | return sysfs_create_group(&cpu->sysdev.kobj, &topology_cpu_attr_group); | ||
414 | } | ||
415 | |||
416 | static int __init topology_init(void) | ||
417 | { | ||
418 | if (!MACHINE_HAS_TOPOLOGY) { | ||
419 | topology_update_polarization_simple(); | ||
420 | goto out; | ||
421 | } | ||
422 | init_timer_deferrable(&topology_timer); | ||
423 | set_topology_timer(); | ||
424 | out: | ||
425 | update_cpu_core_map(); | ||
426 | return sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching); | ||
427 | } | ||
428 | device_initcall(topology_init); | ||