diff options
author | Pallipadi, Venkatesh <venkatesh.pallipadi@intel.com> | 2009-05-21 20:09:10 -0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2009-05-27 21:57:30 -0400 |
commit | ee1ca48fae7e575d5e399d4fdcfe0afc1212a64c (patch) | |
tree | d2d8c9bd4a806e603ba272a7f71644af1fdb16a3 | |
parent | cd86a536c81e9300d984327517548ca0652eebf9 (diff) |
ACPI: Disable ARB_DISABLE on platforms where it is not needed
ARB_DISABLE is a NOP on all of the recent Intel platforms.
For such platforms, reduce contention on c3_lock
by skipping the fake ARB_DISABLE.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
-rw-r--r-- | arch/x86/kernel/acpi/cstate.c | 16 | ||||
-rw-r--r-- | drivers/acpi/processor_idle.c | 7 |
2 files changed, 18 insertions, 5 deletions
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index bbbe4bbb6f34..8c44c232efcb 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c | |||
@@ -34,12 +34,22 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, | |||
34 | flags->bm_check = 1; | 34 | flags->bm_check = 1; |
35 | else if (c->x86_vendor == X86_VENDOR_INTEL) { | 35 | else if (c->x86_vendor == X86_VENDOR_INTEL) { |
36 | /* | 36 | /* |
37 | * Today all CPUs that support C3 share cache. | 37 | * Today all MP CPUs that support C3 share cache. |
38 | * TBD: This needs to look at cache shared map, once | 38 | * And caches should not be flushed by software while |
39 | * multi-core detection patch makes to the base. | 39 | * entering C3 type state. |
40 | */ | 40 | */ |
41 | flags->bm_check = 1; | 41 | flags->bm_check = 1; |
42 | } | 42 | } |
43 | |||
44 | /* | ||
45 | * On all recent Intel platforms, ARB_DISABLE is a nop. | ||
46 | * So, set bm_control to zero to indicate that ARB_DISABLE | ||
47 | * is not required while entering C3 type state on | ||
48 | * P4, Core and beyond CPUs | ||
49 | */ | ||
50 | if (c->x86_vendor == X86_VENDOR_INTEL && | ||
51 | (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14))) | ||
52 | flags->bm_control = 0; | ||
43 | } | 53 | } |
44 | EXPORT_SYMBOL(acpi_processor_power_init_bm_check); | 54 | EXPORT_SYMBOL(acpi_processor_power_init_bm_check); |
45 | 55 | ||
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 72069ba5f1ed..4840c79fd8e0 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -512,7 +512,8 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) | |||
512 | static void acpi_processor_power_verify_c3(struct acpi_processor *pr, | 512 | static void acpi_processor_power_verify_c3(struct acpi_processor *pr, |
513 | struct acpi_processor_cx *cx) | 513 | struct acpi_processor_cx *cx) |
514 | { | 514 | { |
515 | static int bm_check_flag; | 515 | static int bm_check_flag = -1; |
516 | static int bm_control_flag = -1; | ||
516 | 517 | ||
517 | 518 | ||
518 | if (!cx->address) | 519 | if (!cx->address) |
@@ -542,12 +543,14 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, | |||
542 | } | 543 | } |
543 | 544 | ||
544 | /* All the logic here assumes flags.bm_check is same across all CPUs */ | 545 | /* All the logic here assumes flags.bm_check is same across all CPUs */ |
545 | if (!bm_check_flag) { | 546 | if (bm_check_flag == -1) { |
546 | /* Determine whether bm_check is needed based on CPU */ | 547 | /* Determine whether bm_check is needed based on CPU */ |
547 | acpi_processor_power_init_bm_check(&(pr->flags), pr->id); | 548 | acpi_processor_power_init_bm_check(&(pr->flags), pr->id); |
548 | bm_check_flag = pr->flags.bm_check; | 549 | bm_check_flag = pr->flags.bm_check; |
550 | bm_control_flag = pr->flags.bm_control; | ||
549 | } else { | 551 | } else { |
550 | pr->flags.bm_check = bm_check_flag; | 552 | pr->flags.bm_check = bm_check_flag; |
553 | pr->flags.bm_control = bm_control_flag; | ||
551 | } | 554 | } |
552 | 555 | ||
553 | if (pr->flags.bm_check) { | 556 | if (pr->flags.bm_check) { |