diff options
author | Len Brown <len.brown@intel.com> | 2008-02-07 03:30:48 -0500 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2008-02-07 03:30:48 -0500 |
commit | 8976b6fd7a0060f72e20d5cec833c03d50874cd1 (patch) | |
tree | d5c98a6fc41e19b5a85e9181072d74452b2dc2ab /drivers/acpi | |
parent | 52b097fff89b14c0b8b7a7deef1d274889b1886d (diff) | |
parent | 3391a76f2bbb74e42b9ba44c05a7366ffd388753 (diff) |
Merge branches 'release' and 'throttling-domains' into release
Diffstat (limited to 'drivers/acpi')
-rw-r--r-- | drivers/acpi/processor_core.c | 2 | ||||
-rw-r--r-- | drivers/acpi/processor_throttling.c | 346 |
2 files changed, 342 insertions, 6 deletions
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 315fd8f7e8a1..75ccf5d18bf4 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -1091,6 +1091,8 @@ static int __init acpi_processor_init(void) | |||
1091 | 1091 | ||
1092 | acpi_processor_ppc_init(); | 1092 | acpi_processor_ppc_init(); |
1093 | 1093 | ||
1094 | acpi_processor_throttling_init(); | ||
1095 | |||
1094 | return 0; | 1096 | return 0; |
1095 | 1097 | ||
1096 | out_cpuidle: | 1098 | out_cpuidle: |
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index 1685b40abda7..1b8e592a8241 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
@@ -45,9 +45,229 @@ | |||
45 | #define _COMPONENT ACPI_PROCESSOR_COMPONENT | 45 | #define _COMPONENT ACPI_PROCESSOR_COMPONENT |
46 | ACPI_MODULE_NAME("processor_throttling"); | 46 | ACPI_MODULE_NAME("processor_throttling"); |
47 | 47 | ||
48 | struct throttling_tstate { | ||
49 | unsigned int cpu; /* cpu nr */ | ||
50 | int target_state; /* target T-state */ | ||
51 | }; | ||
52 | |||
53 | #define THROTTLING_PRECHANGE (1) | ||
54 | #define THROTTLING_POSTCHANGE (2) | ||
55 | |||
48 | static int acpi_processor_get_throttling(struct acpi_processor *pr); | 56 | static int acpi_processor_get_throttling(struct acpi_processor *pr); |
49 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state); | 57 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state); |
50 | 58 | ||
59 | static int acpi_processor_update_tsd_coord(void) | ||
60 | { | ||
61 | int count, count_target; | ||
62 | int retval = 0; | ||
63 | unsigned int i, j; | ||
64 | cpumask_t covered_cpus; | ||
65 | struct acpi_processor *pr, *match_pr; | ||
66 | struct acpi_tsd_package *pdomain, *match_pdomain; | ||
67 | struct acpi_processor_throttling *pthrottling, *match_pthrottling; | ||
68 | |||
69 | /* | ||
70 | * Now that we have _TSD data from all CPUs, lets setup T-state | ||
71 | * coordination between all CPUs. | ||
72 | */ | ||
73 | for_each_possible_cpu(i) { | ||
74 | pr = processors[i]; | ||
75 | if (!pr) | ||
76 | continue; | ||
77 | |||
78 | /* Basic validity check for domain info */ | ||
79 | pthrottling = &(pr->throttling); | ||
80 | |||
81 | /* | ||
82 | * If tsd package for one cpu is invalid, the coordination | ||
83 | * among all CPUs is thought as invalid. | ||
84 | * Maybe it is ugly. | ||
85 | */ | ||
86 | if (!pthrottling->tsd_valid_flag) { | ||
87 | retval = -EINVAL; | ||
88 | break; | ||
89 | } | ||
90 | } | ||
91 | if (retval) | ||
92 | goto err_ret; | ||
93 | |||
94 | cpus_clear(covered_cpus); | ||
95 | for_each_possible_cpu(i) { | ||
96 | pr = processors[i]; | ||
97 | if (!pr) | ||
98 | continue; | ||
99 | |||
100 | if (cpu_isset(i, covered_cpus)) | ||
101 | continue; | ||
102 | pthrottling = &pr->throttling; | ||
103 | |||
104 | pdomain = &(pthrottling->domain_info); | ||
105 | cpu_set(i, pthrottling->shared_cpu_map); | ||
106 | cpu_set(i, covered_cpus); | ||
107 | /* | ||
108 | * If the number of processor in the TSD domain is 1, it is | ||
109 | * unnecessary to parse the coordination for this CPU. | ||
110 | */ | ||
111 | if (pdomain->num_processors <= 1) | ||
112 | continue; | ||
113 | |||
114 | /* Validate the Domain info */ | ||
115 | count_target = pdomain->num_processors; | ||
116 | count = 1; | ||
117 | |||
118 | for_each_possible_cpu(j) { | ||
119 | if (i == j) | ||
120 | continue; | ||
121 | |||
122 | match_pr = processors[j]; | ||
123 | if (!match_pr) | ||
124 | continue; | ||
125 | |||
126 | match_pthrottling = &(match_pr->throttling); | ||
127 | match_pdomain = &(match_pthrottling->domain_info); | ||
128 | if (match_pdomain->domain != pdomain->domain) | ||
129 | continue; | ||
130 | |||
131 | /* Here i and j are in the same domain. | ||
132 | * If two TSD packages have the same domain, they | ||
133 | * should have the same num_porcessors and | ||
134 | * coordination type. Otherwise it will be regarded | ||
135 | * as illegal. | ||
136 | */ | ||
137 | if (match_pdomain->num_processors != count_target) { | ||
138 | retval = -EINVAL; | ||
139 | goto err_ret; | ||
140 | } | ||
141 | |||
142 | if (pdomain->coord_type != match_pdomain->coord_type) { | ||
143 | retval = -EINVAL; | ||
144 | goto err_ret; | ||
145 | } | ||
146 | |||
147 | cpu_set(j, covered_cpus); | ||
148 | cpu_set(j, pthrottling->shared_cpu_map); | ||
149 | count++; | ||
150 | } | ||
151 | for_each_possible_cpu(j) { | ||
152 | if (i == j) | ||
153 | continue; | ||
154 | |||
155 | match_pr = processors[j]; | ||
156 | if (!match_pr) | ||
157 | continue; | ||
158 | |||
159 | match_pthrottling = &(match_pr->throttling); | ||
160 | match_pdomain = &(match_pthrottling->domain_info); | ||
161 | if (match_pdomain->domain != pdomain->domain) | ||
162 | continue; | ||
163 | |||
164 | /* | ||
165 | * If some CPUS have the same domain, they | ||
166 | * will have the same shared_cpu_map. | ||
167 | */ | ||
168 | match_pthrottling->shared_cpu_map = | ||
169 | pthrottling->shared_cpu_map; | ||
170 | } | ||
171 | } | ||
172 | |||
173 | err_ret: | ||
174 | for_each_possible_cpu(i) { | ||
175 | pr = processors[i]; | ||
176 | if (!pr) | ||
177 | continue; | ||
178 | |||
179 | /* | ||
180 | * Assume no coordination on any error parsing domain info. | ||
181 | * The coordination type will be forced as SW_ALL. | ||
182 | */ | ||
183 | if (retval) { | ||
184 | pthrottling = &(pr->throttling); | ||
185 | cpus_clear(pthrottling->shared_cpu_map); | ||
186 | cpu_set(i, pthrottling->shared_cpu_map); | ||
187 | pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; | ||
188 | } | ||
189 | } | ||
190 | |||
191 | return retval; | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * Update the T-state coordination after the _TSD | ||
196 | * data for all cpus is obtained. | ||
197 | */ | ||
198 | void acpi_processor_throttling_init(void) | ||
199 | { | ||
200 | if (acpi_processor_update_tsd_coord()) | ||
201 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
202 | "Assume no T-state coordination\n")); | ||
203 | |||
204 | return; | ||
205 | } | ||
206 | |||
207 | static int acpi_processor_throttling_notifier(unsigned long event, void *data) | ||
208 | { | ||
209 | struct throttling_tstate *p_tstate = data; | ||
210 | struct acpi_processor *pr; | ||
211 | unsigned int cpu ; | ||
212 | int target_state; | ||
213 | struct acpi_processor_limit *p_limit; | ||
214 | struct acpi_processor_throttling *p_throttling; | ||
215 | |||
216 | cpu = p_tstate->cpu; | ||
217 | pr = processors[cpu]; | ||
218 | if (!pr) { | ||
219 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n")); | ||
220 | return 0; | ||
221 | } | ||
222 | if (!pr->flags.throttling) { | ||
223 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is " | ||
224 | "unsupported on CPU %d\n", cpu)); | ||
225 | return 0; | ||
226 | } | ||
227 | target_state = p_tstate->target_state; | ||
228 | p_throttling = &(pr->throttling); | ||
229 | switch (event) { | ||
230 | case THROTTLING_PRECHANGE: | ||
231 | /* | ||
232 | * Prechange event is used to choose one proper t-state, | ||
233 | * which meets the limits of thermal, user and _TPC. | ||
234 | */ | ||
235 | p_limit = &pr->limit; | ||
236 | if (p_limit->thermal.tx > target_state) | ||
237 | target_state = p_limit->thermal.tx; | ||
238 | if (p_limit->user.tx > target_state) | ||
239 | target_state = p_limit->user.tx; | ||
240 | if (pr->throttling_platform_limit > target_state) | ||
241 | target_state = pr->throttling_platform_limit; | ||
242 | if (target_state >= p_throttling->state_count) { | ||
243 | printk(KERN_WARNING | ||
244 | "Exceed the limit of T-state \n"); | ||
245 | target_state = p_throttling->state_count - 1; | ||
246 | } | ||
247 | p_tstate->target_state = target_state; | ||
248 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:" | ||
249 | "target T-state of CPU %d is T%d\n", | ||
250 | cpu, target_state)); | ||
251 | break; | ||
252 | case THROTTLING_POSTCHANGE: | ||
253 | /* | ||
254 | * Postchange event is only used to update the | ||
255 | * T-state flag of acpi_processor_throttling. | ||
256 | */ | ||
257 | p_throttling->state = target_state; | ||
258 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:" | ||
259 | "CPU %d is switched to T%d\n", | ||
260 | cpu, target_state)); | ||
261 | break; | ||
262 | default: | ||
263 | printk(KERN_WARNING | ||
264 | "Unsupported Throttling notifier event\n"); | ||
265 | break; | ||
266 | } | ||
267 | |||
268 | return 0; | ||
269 | } | ||
270 | |||
51 | /* | 271 | /* |
52 | * _TPC - Throttling Present Capabilities | 272 | * _TPC - Throttling Present Capabilities |
53 | */ | 273 | */ |
@@ -293,6 +513,10 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr) | |||
293 | struct acpi_buffer state = { 0, NULL }; | 513 | struct acpi_buffer state = { 0, NULL }; |
294 | union acpi_object *tsd = NULL; | 514 | union acpi_object *tsd = NULL; |
295 | struct acpi_tsd_package *pdomain; | 515 | struct acpi_tsd_package *pdomain; |
516 | struct acpi_processor_throttling *pthrottling; | ||
517 | |||
518 | pthrottling = &pr->throttling; | ||
519 | pthrottling->tsd_valid_flag = 0; | ||
296 | 520 | ||
297 | status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer); | 521 | status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer); |
298 | if (ACPI_FAILURE(status)) { | 522 | if (ACPI_FAILURE(status)) { |
@@ -340,6 +564,22 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr) | |||
340 | goto end; | 564 | goto end; |
341 | } | 565 | } |
342 | 566 | ||
567 | pthrottling = &pr->throttling; | ||
568 | pthrottling->tsd_valid_flag = 1; | ||
569 | pthrottling->shared_type = pdomain->coord_type; | ||
570 | cpu_set(pr->id, pthrottling->shared_cpu_map); | ||
571 | /* | ||
572 | * If the coordination type is not defined in ACPI spec, | ||
573 | * the tsd_valid_flag will be clear and coordination type | ||
574 | * will be forecd as DOMAIN_COORD_TYPE_SW_ALL. | ||
575 | */ | ||
576 | if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && | ||
577 | pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && | ||
578 | pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { | ||
579 | pthrottling->tsd_valid_flag = 0; | ||
580 | pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; | ||
581 | } | ||
582 | |||
343 | end: | 583 | end: |
344 | kfree(buffer.pointer); | 584 | kfree(buffer.pointer); |
345 | return result; | 585 | return result; |
@@ -589,6 +829,11 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr) | |||
589 | cpumask_t saved_mask; | 829 | cpumask_t saved_mask; |
590 | int ret; | 830 | int ret; |
591 | 831 | ||
832 | if (!pr) | ||
833 | return -EINVAL; | ||
834 | |||
835 | if (!pr->flags.throttling) | ||
836 | return -ENODEV; | ||
592 | /* | 837 | /* |
593 | * Migrate task to the cpu pointed by pr. | 838 | * Migrate task to the cpu pointed by pr. |
594 | */ | 839 | */ |
@@ -742,13 +987,92 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, | |||
742 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | 987 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state) |
743 | { | 988 | { |
744 | cpumask_t saved_mask; | 989 | cpumask_t saved_mask; |
745 | int ret; | 990 | int ret = 0; |
991 | unsigned int i; | ||
992 | struct acpi_processor *match_pr; | ||
993 | struct acpi_processor_throttling *p_throttling; | ||
994 | struct throttling_tstate t_state; | ||
995 | cpumask_t online_throttling_cpus; | ||
996 | |||
997 | if (!pr) | ||
998 | return -EINVAL; | ||
999 | |||
1000 | if (!pr->flags.throttling) | ||
1001 | return -ENODEV; | ||
1002 | |||
1003 | if ((state < 0) || (state > (pr->throttling.state_count - 1))) | ||
1004 | return -EINVAL; | ||
1005 | |||
1006 | saved_mask = current->cpus_allowed; | ||
1007 | t_state.target_state = state; | ||
1008 | p_throttling = &(pr->throttling); | ||
1009 | cpus_and(online_throttling_cpus, cpu_online_map, | ||
1010 | p_throttling->shared_cpu_map); | ||
746 | /* | 1011 | /* |
747 | * Migrate task to the cpu pointed by pr. | 1012 | * The throttling notifier will be called for every |
1013 | * affected cpu in order to get one proper T-state. | ||
1014 | * The notifier event is THROTTLING_PRECHANGE. | ||
748 | */ | 1015 | */ |
749 | saved_mask = current->cpus_allowed; | 1016 | for_each_cpu_mask(i, online_throttling_cpus) { |
750 | set_cpus_allowed(current, cpumask_of_cpu(pr->id)); | 1017 | t_state.cpu = i; |
751 | ret = pr->throttling.acpi_processor_set_throttling(pr, state); | 1018 | acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, |
1019 | &t_state); | ||
1020 | } | ||
1021 | /* | ||
1022 | * The function of acpi_processor_set_throttling will be called | ||
1023 | * to switch T-state. If the coordination type is SW_ALL or HW_ALL, | ||
1024 | * it is necessary to call it for every affected cpu. Otherwise | ||
1025 | * it can be called only for the cpu pointed by pr. | ||
1026 | */ | ||
1027 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { | ||
1028 | set_cpus_allowed(current, cpumask_of_cpu(pr->id)); | ||
1029 | ret = p_throttling->acpi_processor_set_throttling(pr, | ||
1030 | t_state.target_state); | ||
1031 | } else { | ||
1032 | /* | ||
1033 | * When the T-state coordination is SW_ALL or HW_ALL, | ||
1034 | * it is necessary to set T-state for every affected | ||
1035 | * cpus. | ||
1036 | */ | ||
1037 | for_each_cpu_mask(i, online_throttling_cpus) { | ||
1038 | match_pr = processors[i]; | ||
1039 | /* | ||
1040 | * If the pointer is invalid, we will report the | ||
1041 | * error message and continue. | ||
1042 | */ | ||
1043 | if (!match_pr) { | ||
1044 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
1045 | "Invalid Pointer for CPU %d\n", i)); | ||
1046 | continue; | ||
1047 | } | ||
1048 | /* | ||
1049 | * If the throttling control is unsupported on CPU i, | ||
1050 | * we will report the error message and continue. | ||
1051 | */ | ||
1052 | if (!match_pr->flags.throttling) { | ||
1053 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
1054 | "Throttling Controll is unsupported " | ||
1055 | "on CPU %d\n", i)); | ||
1056 | continue; | ||
1057 | } | ||
1058 | t_state.cpu = i; | ||
1059 | set_cpus_allowed(current, cpumask_of_cpu(i)); | ||
1060 | ret = match_pr->throttling. | ||
1061 | acpi_processor_set_throttling( | ||
1062 | match_pr, t_state.target_state); | ||
1063 | } | ||
1064 | } | ||
1065 | /* | ||
1066 | * After the set_throttling is called, the | ||
1067 | * throttling notifier is called for every | ||
1068 | * affected cpu to update the T-states. | ||
1069 | * The notifier event is THROTTLING_POSTCHANGE | ||
1070 | */ | ||
1071 | for_each_cpu_mask(i, online_throttling_cpus) { | ||
1072 | t_state.cpu = i; | ||
1073 | acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, | ||
1074 | &t_state); | ||
1075 | } | ||
752 | /* restore the previous state */ | 1076 | /* restore the previous state */ |
753 | set_cpus_allowed(current, saved_mask); | 1077 | set_cpus_allowed(current, saved_mask); |
754 | return ret; | 1078 | return ret; |
@@ -757,6 +1081,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
757 | int acpi_processor_get_throttling_info(struct acpi_processor *pr) | 1081 | int acpi_processor_get_throttling_info(struct acpi_processor *pr) |
758 | { | 1082 | { |
759 | int result = 0; | 1083 | int result = 0; |
1084 | struct acpi_processor_throttling *pthrottling; | ||
760 | 1085 | ||
761 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 1086 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
762 | "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n", | 1087 | "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n", |
@@ -788,7 +1113,16 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) | |||
788 | &acpi_processor_set_throttling_ptc; | 1113 | &acpi_processor_set_throttling_ptc; |
789 | } | 1114 | } |
790 | 1115 | ||
791 | acpi_processor_get_tsd(pr); | 1116 | /* |
1117 | * If TSD package for one CPU can't be parsed successfully, it means | ||
1118 | * that this CPU will have no coordination with other CPUs. | ||
1119 | */ | ||
1120 | if (acpi_processor_get_tsd(pr)) { | ||
1121 | pthrottling = &pr->throttling; | ||
1122 | pthrottling->tsd_valid_flag = 0; | ||
1123 | cpu_set(pr->id, pthrottling->shared_cpu_map); | ||
1124 | pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; | ||
1125 | } | ||
792 | 1126 | ||
793 | /* | 1127 | /* |
794 | * PIIX4 Errata: We don't support throttling on the original PIIX4. | 1128 | * PIIX4 Errata: We don't support throttling on the original PIIX4. |