aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2007-02-16 04:27:55 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-16 11:13:58 -0500
commit169a0abbe32813af4904cc1605c0f7ea0534f77b (patch)
tree42594b4ef0729bb38bbd4c9f1e33ec92944a7203
parent3434933b17fa64adddf83059603c61296f6e1ee2 (diff)
[PATCH] ACPI keep track of timer broadcasting
This is a preperatory patch for highres/dyntick: - replace the big #ifdef ARCH_APICTIMER_STOPS_ON_C3 hackery by functions - remove the double switch in the power verify function (in the worst case we switched ipi to apic and 20usec later apic to ipi) - keep track of the the state which stops local APIC timer Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: Len Brown <len.brown@intel.com> Cc: <linux-acpi@vger.kernel.org> Cc: Andi Kleen <ak@suse.de> Cc: john stultz <johnstul@us.ibm.com> Cc: Roman Zippel <zippel@linux-m68k.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/acpi/processor_idle.c66
-rw-r--r--include/acpi/processor.h1
2 files changed, 48 insertions, 19 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index e34af7772a66..4ea6d8b20d17 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -248,6 +248,48 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
248 } 248 }
249} 249}
250 250
251#ifdef ARCH_APICTIMER_STOPS_ON_C3
252
253/*
254 * Some BIOS implementations switch to C3 in the published C2 state.
255 * This seems to be a common problem on AMD boxen, but other vendors
256 * are affected too. We pick the most conservative approach: we assume
257 * that the local APIC stops in both C2 and C3.
258 */
259static void acpi_timer_check_state(int state, struct acpi_processor *pr,
260 struct acpi_processor_cx *cx)
261{
262 struct acpi_processor_power *pwr = &pr->power;
263
264 /*
265 * Check, if one of the previous states already marked the lapic
266 * unstable
267 */
268 if (pwr->timer_broadcast_on_state < state)
269 return;
270
271 if (cx->type >= ACPI_STATE_C2)
272 pr->power.timer_broadcast_on_state = state;
273}
274
275static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
276{
277 cpumask_t mask = cpumask_of_cpu(pr->id);
278
279 if (pr->power.timer_broadcast_on_state < INT_MAX)
280 on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1);
281 else
282 on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
283}
284
285#else
286
287static void acpi_timer_check_state(int state, struct acpi_processor *pr,
288 struct acpi_processor_cx *cstate) { }
289static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
290
291#endif
292
251static void acpi_processor_idle(void) 293static void acpi_processor_idle(void)
252{ 294{
253 struct acpi_processor *pr = NULL; 295 struct acpi_processor *pr = NULL;
@@ -914,11 +956,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
914 unsigned int i; 956 unsigned int i;
915 unsigned int working = 0; 957 unsigned int working = 0;
916 958
917#ifdef ARCH_APICTIMER_STOPS_ON_C3 959 pr->power.timer_broadcast_on_state = INT_MAX;
918 int timer_broadcast = 0;
919 cpumask_t mask = cpumask_of_cpu(pr->id);
920 on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
921#endif
922 960
923 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 961 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
924 struct acpi_processor_cx *cx = &pr->power.states[i]; 962 struct acpi_processor_cx *cx = &pr->power.states[i];
@@ -930,21 +968,14 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
930 968
931 case ACPI_STATE_C2: 969 case ACPI_STATE_C2:
932 acpi_processor_power_verify_c2(cx); 970 acpi_processor_power_verify_c2(cx);
933#ifdef ARCH_APICTIMER_STOPS_ON_C3 971 if (cx->valid)
934 /* Some AMD systems fake C3 as C2, but still 972 acpi_timer_check_state(i, pr, cx);
935 have timer troubles */
936 if (cx->valid &&
937 boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
938 timer_broadcast++;
939#endif
940 break; 973 break;
941 974
942 case ACPI_STATE_C3: 975 case ACPI_STATE_C3:
943 acpi_processor_power_verify_c3(pr, cx); 976 acpi_processor_power_verify_c3(pr, cx);
944#ifdef ARCH_APICTIMER_STOPS_ON_C3
945 if (cx->valid) 977 if (cx->valid)
946 timer_broadcast++; 978 acpi_timer_check_state(i, pr, cx);
947#endif
948 break; 979 break;
949 } 980 }
950 981
@@ -952,10 +983,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
952 working++; 983 working++;
953 } 984 }
954 985
955#ifdef ARCH_APICTIMER_STOPS_ON_C3 986 acpi_propagate_timer_broadcast(pr);
956 if (timer_broadcast)
957 on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1);
958#endif
959 987
960 return (working); 988 return (working);
961} 989}
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 7798d2a9f793..916c0102db5b 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -79,6 +79,7 @@ struct acpi_processor_power {
79 u32 bm_activity; 79 u32 bm_activity;
80 int count; 80 int count;
81 struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER]; 81 struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER];
82 int timer_broadcast_on_state;
82}; 83};
83 84
84/* Performance Management */ 85/* Performance Management */