diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-07-25 12:14:37 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-09-13 05:12:07 -0400 |
commit | e12f65f7a49905c013263ac522af224892aafc00 (patch) | |
tree | 8dd3f687e468cbc93b6a6bfb0e37342096c84efd /drivers/acpi | |
parent | 2d21a29fb62f142b8a62496700d8d82a6a8fd783 (diff) |
locking, ACPI: Annotate c3_lock as raw
We cannot preempt this lock on -rt as we are in an
interrupt disabled region and about to go into deep sleep.
In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Len Brown <len.brown@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/acpi')
-rw-r--r-- | drivers/acpi/processor_idle.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 431ab11c8c1b..3e0531405997 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -852,7 +852,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
852 | } | 852 | } |
853 | 853 | ||
854 | static int c3_cpu_count; | 854 | static int c3_cpu_count; |
855 | static DEFINE_SPINLOCK(c3_lock); | 855 | static DEFINE_RAW_SPINLOCK(c3_lock); |
856 | 856 | ||
857 | /** | 857 | /** |
858 | * acpi_idle_enter_bm - enters C3 with proper BM handling | 858 | * acpi_idle_enter_bm - enters C3 with proper BM handling |
@@ -930,12 +930,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
930 | * without doing anything. | 930 | * without doing anything. |
931 | */ | 931 | */ |
932 | if (pr->flags.bm_check && pr->flags.bm_control) { | 932 | if (pr->flags.bm_check && pr->flags.bm_control) { |
933 | spin_lock(&c3_lock); | 933 | raw_spin_lock(&c3_lock); |
934 | c3_cpu_count++; | 934 | c3_cpu_count++; |
935 | /* Disable bus master arbitration when all CPUs are in C3 */ | 935 | /* Disable bus master arbitration when all CPUs are in C3 */ |
936 | if (c3_cpu_count == num_online_cpus()) | 936 | if (c3_cpu_count == num_online_cpus()) |
937 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); | 937 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); |
938 | spin_unlock(&c3_lock); | 938 | raw_spin_unlock(&c3_lock); |
939 | } else if (!pr->flags.bm_check) { | 939 | } else if (!pr->flags.bm_check) { |
940 | ACPI_FLUSH_CPU_CACHE(); | 940 | ACPI_FLUSH_CPU_CACHE(); |
941 | } | 941 | } |
@@ -944,10 +944,10 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
944 | 944 | ||
945 | /* Re-enable bus master arbitration */ | 945 | /* Re-enable bus master arbitration */ |
946 | if (pr->flags.bm_check && pr->flags.bm_control) { | 946 | if (pr->flags.bm_check && pr->flags.bm_control) { |
947 | spin_lock(&c3_lock); | 947 | raw_spin_lock(&c3_lock); |
948 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); | 948 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); |
949 | c3_cpu_count--; | 949 | c3_cpu_count--; |
950 | spin_unlock(&c3_lock); | 950 | raw_spin_unlock(&c3_lock); |
951 | } | 951 | } |
952 | kt2 = ktime_get_real(); | 952 | kt2 = ktime_get_real(); |
953 | idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); | 953 | idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); |