aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/processor_idle.c
diff options
context:
space:
mode:
authorAlexey Starikovskiy <alexey.y.starikovskiy@intel.com>2007-02-02 11:48:22 -0500
committerLen Brown <len.brown@intel.com>2007-02-02 21:14:28 -0500
commitcee324b145a1e5488b34191de670e5ed1d346ebb (patch)
tree9e00f815bda0db1a134b23c9495a71e643724c58 /drivers/acpi/processor_idle.c
parentceb6c46839021d5c7c338d48deac616944660124 (diff)
ACPICA: use new ACPI headers.
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/acpi/processor_idle.c')
-rw-r--r--drivers/acpi/processor_idle.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index db21dda5837d..1d633f7e64fb 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -160,7 +160,7 @@ static inline u32 ticks_elapsed(u32 t1, u32 t2)
160{ 160{
161 if (t2 >= t1) 161 if (t2 >= t1)
162 return (t2 - t1); 162 return (t2 - t1);
163 else if (!(acpi_fadt.flags & ACPI_FADT_32BIT_TIMER)) 163 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
164 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); 164 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
165 else 165 else
166 return ((0xFFFFFFFF - t1) + t2); 166 return ((0xFFFFFFFF - t1) + t2);
@@ -234,7 +234,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
234 /* Dummy wait op - must do something useless after P_LVL2 read 234 /* Dummy wait op - must do something useless after P_LVL2 read
235 because chipsets cannot guarantee that STPCLK# signal 235 because chipsets cannot guarantee that STPCLK# signal
236 gets asserted in time to freeze execution properly. */ 236 gets asserted in time to freeze execution properly. */
237 unused = inl(acpi_fadt.xpm_timer_block.address); 237 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
238 } 238 }
239} 239}
240 240
@@ -334,7 +334,7 @@ static void acpi_processor_idle(void)
334 * detection phase, to work cleanly with logical CPU hotplug. 334 * detection phase, to work cleanly with logical CPU hotplug.
335 */ 335 */
336 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 336 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
337 !pr->flags.has_cst && !(acpi_fadt.flags & ACPI_FADT_C2_MP_SUPPORTED)) 337 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
338 cx = &pr->power.states[ACPI_STATE_C1]; 338 cx = &pr->power.states[ACPI_STATE_C1];
339#endif 339#endif
340 340
@@ -380,11 +380,11 @@ static void acpi_processor_idle(void)
380 380
381 case ACPI_STATE_C2: 381 case ACPI_STATE_C2:
382 /* Get start time (ticks) */ 382 /* Get start time (ticks) */
383 t1 = inl(acpi_fadt.xpm_timer_block.address); 383 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
384 /* Invoke C2 */ 384 /* Invoke C2 */
385 acpi_cstate_enter(cx); 385 acpi_cstate_enter(cx);
386 /* Get end time (ticks) */ 386 /* Get end time (ticks) */
387 t2 = inl(acpi_fadt.xpm_timer_block.address); 387 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
388 388
389#ifdef CONFIG_GENERIC_TIME 389#ifdef CONFIG_GENERIC_TIME
390 /* TSC halts in C2, so notify users */ 390 /* TSC halts in C2, so notify users */
@@ -415,11 +415,11 @@ static void acpi_processor_idle(void)
415 } 415 }
416 416
417 /* Get start time (ticks) */ 417 /* Get start time (ticks) */
418 t1 = inl(acpi_fadt.xpm_timer_block.address); 418 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
419 /* Invoke C3 */ 419 /* Invoke C3 */
420 acpi_cstate_enter(cx); 420 acpi_cstate_enter(cx);
421 /* Get end time (ticks) */ 421 /* Get end time (ticks) */
422 t2 = inl(acpi_fadt.xpm_timer_block.address); 422 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
423 if (pr->flags.bm_check) { 423 if (pr->flags.bm_check) {
424 /* Enable bus master arbitration */ 424 /* Enable bus master arbitration */
425 atomic_dec(&c3_cpu_count); 425 atomic_dec(&c3_cpu_count);
@@ -451,7 +451,7 @@ static void acpi_processor_idle(void)
451#ifdef CONFIG_HOTPLUG_CPU 451#ifdef CONFIG_HOTPLUG_CPU
452 /* Don't do promotion/demotion */ 452 /* Don't do promotion/demotion */
453 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) && 453 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
454 !pr->flags.has_cst && !(acpi_fadt.flags & ACPI_FADT_C2_MP_SUPPORTED)) { 454 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
455 next_state = cx; 455 next_state = cx;
456 goto end; 456 goto end;
457 } 457 }
@@ -622,7 +622,7 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
622 * an SMP system. 622 * an SMP system.
623 */ 623 */
624 if ((num_online_cpus() > 1) && 624 if ((num_online_cpus() > 1) &&
625 !(acpi_fadt.flags & ACPI_FADT_C2_MP_SUPPORTED)) 625 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
626 return -ENODEV; 626 return -ENODEV;
627#endif 627#endif
628 628
@@ -631,8 +631,8 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
631 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 631 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
632 632
633 /* determine latencies from FADT */ 633 /* determine latencies from FADT */
634 pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.C2latency; 634 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
635 pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.C3latency; 635 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
636 636
637 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 637 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
638 "lvl2[0x%08x] lvl3[0x%08x]\n", 638 "lvl2[0x%08x] lvl3[0x%08x]\n",
@@ -878,7 +878,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
878 * WBINVD should be set in fadt, for C3 state to be 878 * WBINVD should be set in fadt, for C3 state to be
879 * supported on when bm_check is not required. 879 * supported on when bm_check is not required.
880 */ 880 */
881 if (!(acpi_fadt.flags & ACPI_FADT_WBINVD)) { 881 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
882 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 882 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
883 "Cache invalidation should work properly" 883 "Cache invalidation should work properly"
884 " for C3 to be enabled on SMP systems\n")); 884 " for C3 to be enabled on SMP systems\n"));
@@ -1158,9 +1158,9 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1158 if (!pr) 1158 if (!pr)
1159 return -EINVAL; 1159 return -EINVAL;
1160 1160
1161 if (acpi_fadt.cst_control && !nocst) { 1161 if (acpi_gbl_FADT.cst_control && !nocst) {
1162 status = 1162 status =
1163 acpi_os_write_port(acpi_fadt.smi_command, acpi_fadt.cst_control, 8); 1163 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1164 if (ACPI_FAILURE(status)) { 1164 if (ACPI_FAILURE(status)) {
1165 ACPI_EXCEPTION((AE_INFO, status, 1165 ACPI_EXCEPTION((AE_INFO, status,
1166 "Notifying BIOS of _CST ability failed")); 1166 "Notifying BIOS of _CST ability failed"));