diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-29 14:18:09 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-29 14:18:09 -0400 |
| commit | f310642123e0d32d919c60ca3fab5acd130c4ba3 (patch) | |
| tree | f3844152e2e8c0fdd01621a400f84c8a159252a0 | |
| parent | ef1d57599dc904fdb31b8e9b5336350d21a1fde1 (diff) | |
| parent | 5d4c47e0195b989f284907358bd5c268a44b91c7 (diff) | |
Merge branch 'idle-release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-idle-2.6
* 'idle-release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-idle-2.6:
x86 idle: deprecate mwait_idle() and "idle=mwait" cmdline param
x86 idle: deprecate "no-hlt" cmdline param
x86 idle APM: deprecate CONFIG_APM_CPU_IDLE
x86 idle floppy: deprecate disable_hlt()
x86 idle: EXPORT_SYMBOL(default_idle, pm_idle) only when APM demands it
x86 idle: clarify AMD erratum 400 workaround
idle governor: Avoid lock acquisition to read pm_qos before entering idle
cpuidle: menu: fixed wrapping timers at 4.294 seconds
| -rw-r--r-- | Documentation/feature-removal-schedule.txt | 36 | ||||
| -rw-r--r-- | arch/x86/include/asm/acpi.h | 2 | ||||
| -rw-r--r-- | arch/x86/include/asm/idle.h | 2 | ||||
| -rw-r--r-- | arch/x86/include/asm/processor.h | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/apm_32.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 1 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/common.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/process.c | 43 | ||||
| -rw-r--r-- | arch/x86/kernel/smpboot.c | 2 | ||||
| -rw-r--r-- | drivers/acpi/processor_idle.c | 2 | ||||
| -rw-r--r-- | drivers/block/floppy.c | 1 | ||||
| -rw-r--r-- | drivers/cpuidle/governors/menu.c | 4 | ||||
| -rw-r--r-- | include/linux/pm_qos_params.h | 4 | ||||
| -rw-r--r-- | kernel/pm_qos_params.c | 37 |
14 files changed, 102 insertions, 40 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index ff31b1cc50aa..1a9446b59153 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
| @@ -6,6 +6,42 @@ be removed from this file. | |||
| 6 | 6 | ||
| 7 | --------------------------- | 7 | --------------------------- |
| 8 | 8 | ||
| 9 | What: x86 floppy disable_hlt | ||
| 10 | When: 2012 | ||
| 11 | Why: ancient workaround of dubious utility clutters the | ||
| 12 | code used by everybody else. | ||
| 13 | Who: Len Brown <len.brown@intel.com> | ||
| 14 | |||
| 15 | --------------------------- | ||
| 16 | |||
| 17 | What: CONFIG_APM_CPU_IDLE, and its ability to call APM BIOS in idle | ||
| 18 | When: 2012 | ||
| 19 | Why: This optional sub-feature of APM is of dubious reliability, | ||
| 20 | and ancient APM laptops are likely better served by calling HLT. | ||
| 21 | Deleting CONFIG_APM_CPU_IDLE allows x86 to stop exporting | ||
| 22 | the pm_idle function pointer to modules. | ||
| 23 | Who: Len Brown <len.brown@intel.com> | ||
| 24 | |||
| 25 | ---------------------------- | ||
| 26 | |||
| 27 | What: x86_32 "no-hlt" cmdline param | ||
| 28 | When: 2012 | ||
| 29 | Why: remove a branch from idle path, simplify code used by everybody. | ||
| 30 | This option disabled the use of HLT in idle and machine_halt() | ||
| 31 | for hardware that was flakey 15-years ago. Today we have | ||
| 32 | "idle=poll" that removed HLT from idle, and so if such a machine | ||
| 33 | is still running the upstream kernel, "idle=poll" is likely sufficient. | ||
| 34 | Who: Len Brown <len.brown@intel.com> | ||
| 35 | |||
| 36 | ---------------------------- | ||
| 37 | |||
| 38 | What: x86 "idle=mwait" cmdline param | ||
| 39 | When: 2012 | ||
| 40 | Why: simplify x86 idle code | ||
| 41 | Who: Len Brown <len.brown@intel.com> | ||
| 42 | |||
| 43 | ---------------------------- | ||
| 44 | |||
| 9 | What: PRISM54 | 45 | What: PRISM54 |
| 10 | When: 2.6.34 | 46 | When: 2.6.34 |
| 11 | 47 | ||
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 416d865eae39..610001d385dd 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h | |||
| @@ -139,7 +139,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) | |||
| 139 | boot_cpu_data.x86_model <= 0x05 && | 139 | boot_cpu_data.x86_model <= 0x05 && |
| 140 | boot_cpu_data.x86_mask < 0x0A) | 140 | boot_cpu_data.x86_mask < 0x0A) |
| 141 | return 1; | 141 | return 1; |
| 142 | else if (c1e_detected) | 142 | else if (amd_e400_c1e_detected) |
| 143 | return 1; | 143 | return 1; |
| 144 | else | 144 | else |
| 145 | return max_cstate; | 145 | return max_cstate; |
diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h index 38d87379e270..f49253d75710 100644 --- a/arch/x86/include/asm/idle.h +++ b/arch/x86/include/asm/idle.h | |||
| @@ -16,6 +16,6 @@ static inline void enter_idle(void) { } | |||
| 16 | static inline void exit_idle(void) { } | 16 | static inline void exit_idle(void) { } |
| 17 | #endif /* CONFIG_X86_64 */ | 17 | #endif /* CONFIG_X86_64 */ |
| 18 | 18 | ||
| 19 | void c1e_remove_cpu(int cpu); | 19 | void amd_e400_remove_cpu(int cpu); |
| 20 | 20 | ||
| 21 | #endif /* _ASM_X86_IDLE_H */ | 21 | #endif /* _ASM_X86_IDLE_H */ |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 4c25ab48257b..219371546afd 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
| @@ -754,10 +754,10 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | |||
| 754 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | 754 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); |
| 755 | 755 | ||
| 756 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | 756 | extern void select_idle_routine(const struct cpuinfo_x86 *c); |
| 757 | extern void init_c1e_mask(void); | 757 | extern void init_amd_e400_c1e_mask(void); |
| 758 | 758 | ||
| 759 | extern unsigned long boot_option_idle_override; | 759 | extern unsigned long boot_option_idle_override; |
| 760 | extern bool c1e_detected; | 760 | extern bool amd_e400_c1e_detected; |
| 761 | 761 | ||
| 762 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, | 762 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, |
| 763 | IDLE_POLL, IDLE_FORCE_MWAIT}; | 763 | IDLE_POLL, IDLE_FORCE_MWAIT}; |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 3bfa02235965..965a7666c283 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
| @@ -361,6 +361,7 @@ struct apm_user { | |||
| 361 | * idle percentage above which bios idle calls are done | 361 | * idle percentage above which bios idle calls are done |
| 362 | */ | 362 | */ |
| 363 | #ifdef CONFIG_APM_CPU_IDLE | 363 | #ifdef CONFIG_APM_CPU_IDLE |
| 364 | #warning deprecated CONFIG_APM_CPU_IDLE will be deleted in 2012 | ||
| 364 | #define DEFAULT_IDLE_THRESHOLD 95 | 365 | #define DEFAULT_IDLE_THRESHOLD 95 |
| 365 | #else | 366 | #else |
| 366 | #define DEFAULT_IDLE_THRESHOLD 100 | 367 | #define DEFAULT_IDLE_THRESHOLD 100 |
| @@ -904,6 +905,7 @@ static void apm_cpu_idle(void) | |||
| 904 | unsigned int jiffies_since_last_check = jiffies - last_jiffies; | 905 | unsigned int jiffies_since_last_check = jiffies - last_jiffies; |
| 905 | unsigned int bucket; | 906 | unsigned int bucket; |
| 906 | 907 | ||
| 908 | WARN_ONCE(1, "deprecated apm_cpu_idle will be deleted in 2012"); | ||
| 907 | recalc: | 909 | recalc: |
| 908 | if (jiffies_since_last_check > IDLE_CALC_LIMIT) { | 910 | if (jiffies_since_last_check > IDLE_CALC_LIMIT) { |
| 909 | use_apm_idle = 0; | 911 | use_apm_idle = 0; |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index c39576cb3018..525514cf33c3 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | 19 | ||
| 20 | static int __init no_halt(char *s) | 20 | static int __init no_halt(char *s) |
| 21 | { | 21 | { |
| 22 | WARN_ONCE(1, "\"no-hlt\" is deprecated, please use \"idle=poll\"\n"); | ||
| 22 | boot_cpu_data.hlt_works_ok = 0; | 23 | boot_cpu_data.hlt_works_ok = 0; |
| 23 | return 1; | 24 | return 1; |
| 24 | } | 25 | } |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 53f02f5d8cce..22a073d7fbff 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
| @@ -902,7 +902,7 @@ static void vgetcpu_set_mode(void) | |||
| 902 | void __init identify_boot_cpu(void) | 902 | void __init identify_boot_cpu(void) |
| 903 | { | 903 | { |
| 904 | identify_cpu(&boot_cpu_data); | 904 | identify_cpu(&boot_cpu_data); |
| 905 | init_c1e_mask(); | 905 | init_amd_e400_c1e_mask(); |
| 906 | #ifdef CONFIG_X86_32 | 906 | #ifdef CONFIG_X86_32 |
| 907 | sysenter_setup(); | 907 | sysenter_setup(); |
| 908 | enable_sep_cpu(); | 908 | enable_sep_cpu(); |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 88a90a977f8e..426a5b66f7e4 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
| @@ -337,7 +337,9 @@ EXPORT_SYMBOL(boot_option_idle_override); | |||
| 337 | * Powermanagement idle function, if any.. | 337 | * Powermanagement idle function, if any.. |
| 338 | */ | 338 | */ |
| 339 | void (*pm_idle)(void); | 339 | void (*pm_idle)(void); |
| 340 | #if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE) | ||
| 340 | EXPORT_SYMBOL(pm_idle); | 341 | EXPORT_SYMBOL(pm_idle); |
| 342 | #endif | ||
| 341 | 343 | ||
| 342 | #ifdef CONFIG_X86_32 | 344 | #ifdef CONFIG_X86_32 |
| 343 | /* | 345 | /* |
| @@ -397,7 +399,7 @@ void default_idle(void) | |||
| 397 | cpu_relax(); | 399 | cpu_relax(); |
| 398 | } | 400 | } |
| 399 | } | 401 | } |
| 400 | #ifdef CONFIG_APM_MODULE | 402 | #if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE) |
| 401 | EXPORT_SYMBOL(default_idle); | 403 | EXPORT_SYMBOL(default_idle); |
| 402 | #endif | 404 | #endif |
| 403 | 405 | ||
| @@ -535,45 +537,45 @@ int mwait_usable(const struct cpuinfo_x86 *c) | |||
| 535 | return (edx & MWAIT_EDX_C1); | 537 | return (edx & MWAIT_EDX_C1); |
| 536 | } | 538 | } |
| 537 | 539 | ||
| 538 | bool c1e_detected; | 540 | bool amd_e400_c1e_detected; |
| 539 | EXPORT_SYMBOL(c1e_detected); | 541 | EXPORT_SYMBOL(amd_e400_c1e_detected); |
| 540 | 542 | ||
| 541 | static cpumask_var_t c1e_mask; | 543 | static cpumask_var_t amd_e400_c1e_mask; |
| 542 | 544 | ||
| 543 | void c1e_remove_cpu(int cpu) | 545 | void amd_e400_remove_cpu(int cpu) |
| 544 | { | 546 | { |
| 545 | if (c1e_mask != NULL) | 547 | if (amd_e400_c1e_mask != NULL) |
| 546 | cpumask_clear_cpu(cpu, c1e_mask); | 548 | cpumask_clear_cpu(cpu, amd_e400_c1e_mask); |
| 547 | } | 549 | } |
| 548 | 550 | ||
| 549 | /* | 551 | /* |
| 550 | * C1E aware idle routine. We check for C1E active in the interrupt | 552 | * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt |
| 551 | * pending message MSR. If we detect C1E, then we handle it the same | 553 | * pending message MSR. If we detect C1E, then we handle it the same |
| 552 | * way as C3 power states (local apic timer and TSC stop) | 554 | * way as C3 power states (local apic timer and TSC stop) |
| 553 | */ | 555 | */ |
| 554 | static void c1e_idle(void) | 556 | static void amd_e400_idle(void) |
| 555 | { | 557 | { |
| 556 | if (need_resched()) | 558 | if (need_resched()) |
| 557 | return; | 559 | return; |
| 558 | 560 | ||
| 559 | if (!c1e_detected) { | 561 | if (!amd_e400_c1e_detected) { |
| 560 | u32 lo, hi; | 562 | u32 lo, hi; |
| 561 | 563 | ||
| 562 | rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); | 564 | rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); |
| 563 | 565 | ||
| 564 | if (lo & K8_INTP_C1E_ACTIVE_MASK) { | 566 | if (lo & K8_INTP_C1E_ACTIVE_MASK) { |
| 565 | c1e_detected = true; | 567 | amd_e400_c1e_detected = true; |
| 566 | if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) | 568 | if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) |
| 567 | mark_tsc_unstable("TSC halt in AMD C1E"); | 569 | mark_tsc_unstable("TSC halt in AMD C1E"); |
| 568 | printk(KERN_INFO "System has AMD C1E enabled\n"); | 570 | printk(KERN_INFO "System has AMD C1E enabled\n"); |
| 569 | } | 571 | } |
| 570 | } | 572 | } |
| 571 | 573 | ||
| 572 | if (c1e_detected) { | 574 | if (amd_e400_c1e_detected) { |
| 573 | int cpu = smp_processor_id(); | 575 | int cpu = smp_processor_id(); |
| 574 | 576 | ||
| 575 | if (!cpumask_test_cpu(cpu, c1e_mask)) { | 577 | if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) { |
| 576 | cpumask_set_cpu(cpu, c1e_mask); | 578 | cpumask_set_cpu(cpu, amd_e400_c1e_mask); |
| 577 | /* | 579 | /* |
| 578 | * Force broadcast so ACPI can not interfere. | 580 | * Force broadcast so ACPI can not interfere. |
| 579 | */ | 581 | */ |
| @@ -616,17 +618,17 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | |||
| 616 | pm_idle = mwait_idle; | 618 | pm_idle = mwait_idle; |
| 617 | } else if (cpu_has_amd_erratum(amd_erratum_400)) { | 619 | } else if (cpu_has_amd_erratum(amd_erratum_400)) { |
| 618 | /* E400: APIC timer interrupt does not wake up CPU from C1e */ | 620 | /* E400: APIC timer interrupt does not wake up CPU from C1e */ |
| 619 | printk(KERN_INFO "using C1E aware idle routine\n"); | 621 | printk(KERN_INFO "using AMD E400 aware idle routine\n"); |
| 620 | pm_idle = c1e_idle; | 622 | pm_idle = amd_e400_idle; |
| 621 | } else | 623 | } else |
| 622 | pm_idle = default_idle; | 624 | pm_idle = default_idle; |
| 623 | } | 625 | } |
| 624 | 626 | ||
| 625 | void __init init_c1e_mask(void) | 627 | void __init init_amd_e400_c1e_mask(void) |
| 626 | { | 628 | { |
| 627 | /* If we're using c1e_idle, we need to allocate c1e_mask. */ | 629 | /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */ |
| 628 | if (pm_idle == c1e_idle) | 630 | if (pm_idle == amd_e400_idle) |
| 629 | zalloc_cpumask_var(&c1e_mask, GFP_KERNEL); | 631 | zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL); |
| 630 | } | 632 | } |
| 631 | 633 | ||
| 632 | static int __init idle_setup(char *str) | 634 | static int __init idle_setup(char *str) |
| @@ -640,6 +642,7 @@ static int __init idle_setup(char *str) | |||
| 640 | boot_option_idle_override = IDLE_POLL; | 642 | boot_option_idle_override = IDLE_POLL; |
| 641 | } else if (!strcmp(str, "mwait")) { | 643 | } else if (!strcmp(str, "mwait")) { |
| 642 | boot_option_idle_override = IDLE_FORCE_MWAIT; | 644 | boot_option_idle_override = IDLE_FORCE_MWAIT; |
| 645 | WARN_ONCE(1, "\idle=mwait\" will be removed in 2012\"\n"); | ||
| 643 | } else if (!strcmp(str, "halt")) { | 646 | } else if (!strcmp(str, "halt")) { |
| 644 | /* | 647 | /* |
| 645 | * When the boot option of idle=halt is added, halt is | 648 | * When the boot option of idle=halt is added, halt is |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index a3c430bdfb60..eefd96765e79 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -1307,7 +1307,7 @@ void play_dead_common(void) | |||
| 1307 | { | 1307 | { |
| 1308 | idle_task_exit(); | 1308 | idle_task_exit(); |
| 1309 | reset_lazy_tlbstate(); | 1309 | reset_lazy_tlbstate(); |
| 1310 | c1e_remove_cpu(raw_smp_processor_id()); | 1310 | amd_e400_remove_cpu(raw_smp_processor_id()); |
| 1311 | 1311 | ||
| 1312 | mb(); | 1312 | mb(); |
| 1313 | /* Ack it */ | 1313 | /* Ack it */ |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index d615b7d69bca..431ab11c8c1b 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
| @@ -161,7 +161,7 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr, | |||
| 161 | if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) | 161 | if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) |
| 162 | return; | 162 | return; |
| 163 | 163 | ||
| 164 | if (c1e_detected) | 164 | if (amd_e400_c1e_detected) |
| 165 | type = ACPI_STATE_C1; | 165 | type = ACPI_STATE_C1; |
| 166 | 166 | ||
| 167 | /* | 167 | /* |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index db8f88586c8d..98de8f418676 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
| @@ -1038,6 +1038,7 @@ static void floppy_disable_hlt(void) | |||
| 1038 | { | 1038 | { |
| 1039 | unsigned long flags; | 1039 | unsigned long flags; |
| 1040 | 1040 | ||
| 1041 | WARN_ONCE(1, "floppy_disable_hlt() scheduled for removal in 2012"); | ||
| 1041 | spin_lock_irqsave(&floppy_hlt_lock, flags); | 1042 | spin_lock_irqsave(&floppy_hlt_lock, flags); |
| 1042 | if (!hlt_disabled) { | 1043 | if (!hlt_disabled) { |
| 1043 | hlt_disabled = 1; | 1044 | hlt_disabled = 1; |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index f508690eb958..c47f3d09c1ee 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
| @@ -237,6 +237,7 @@ static int menu_select(struct cpuidle_device *dev) | |||
| 237 | unsigned int power_usage = -1; | 237 | unsigned int power_usage = -1; |
| 238 | int i; | 238 | int i; |
| 239 | int multiplier; | 239 | int multiplier; |
| 240 | struct timespec t; | ||
| 240 | 241 | ||
| 241 | if (data->needs_update) { | 242 | if (data->needs_update) { |
| 242 | menu_update(dev); | 243 | menu_update(dev); |
| @@ -251,8 +252,9 @@ static int menu_select(struct cpuidle_device *dev) | |||
| 251 | return 0; | 252 | return 0; |
| 252 | 253 | ||
| 253 | /* determine the expected residency time, round up */ | 254 | /* determine the expected residency time, round up */ |
| 255 | t = ktime_to_timespec(tick_nohz_get_sleep_length()); | ||
| 254 | data->expected_us = | 256 | data->expected_us = |
| 255 | DIV_ROUND_UP((u32)ktime_to_ns(tick_nohz_get_sleep_length()), 1000); | 257 | t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC; |
| 256 | 258 | ||
| 257 | 259 | ||
| 258 | data->bucket = which_bucket(data->expected_us); | 260 | data->bucket = which_bucket(data->expected_us); |
diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h index 77cbddb3784c..a7d87f911cab 100644 --- a/include/linux/pm_qos_params.h +++ b/include/linux/pm_qos_params.h | |||
| @@ -16,6 +16,10 @@ | |||
| 16 | #define PM_QOS_NUM_CLASSES 4 | 16 | #define PM_QOS_NUM_CLASSES 4 |
| 17 | #define PM_QOS_DEFAULT_VALUE -1 | 17 | #define PM_QOS_DEFAULT_VALUE -1 |
| 18 | 18 | ||
| 19 | #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) | ||
| 20 | #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) | ||
| 21 | #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 | ||
| 22 | |||
| 19 | struct pm_qos_request_list { | 23 | struct pm_qos_request_list { |
| 20 | struct plist_node list; | 24 | struct plist_node list; |
| 21 | int pm_qos_class; | 25 | int pm_qos_class; |
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index fd8d1e035df9..6824ca7d4d0c 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c | |||
| @@ -54,11 +54,17 @@ enum pm_qos_type { | |||
| 54 | PM_QOS_MIN /* return the smallest value */ | 54 | PM_QOS_MIN /* return the smallest value */ |
| 55 | }; | 55 | }; |
| 56 | 56 | ||
| 57 | /* | ||
| 58 | * Note: The lockless read path depends on the CPU accessing | ||
| 59 | * target_value atomically. Atomic access is only guaranteed on all CPU | ||
| 60 | * types linux supports for 32 bit quantites | ||
| 61 | */ | ||
| 57 | struct pm_qos_object { | 62 | struct pm_qos_object { |
| 58 | struct plist_head requests; | 63 | struct plist_head requests; |
| 59 | struct blocking_notifier_head *notifiers; | 64 | struct blocking_notifier_head *notifiers; |
| 60 | struct miscdevice pm_qos_power_miscdev; | 65 | struct miscdevice pm_qos_power_miscdev; |
| 61 | char *name; | 66 | char *name; |
| 67 | s32 target_value; /* Do not change to 64 bit */ | ||
| 62 | s32 default_value; | 68 | s32 default_value; |
| 63 | enum pm_qos_type type; | 69 | enum pm_qos_type type; |
| 64 | }; | 70 | }; |
| @@ -71,7 +77,8 @@ static struct pm_qos_object cpu_dma_pm_qos = { | |||
| 71 | .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock), | 77 | .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock), |
| 72 | .notifiers = &cpu_dma_lat_notifier, | 78 | .notifiers = &cpu_dma_lat_notifier, |
| 73 | .name = "cpu_dma_latency", | 79 | .name = "cpu_dma_latency", |
| 74 | .default_value = 2000 * USEC_PER_SEC, | 80 | .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, |
| 81 | .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, | ||
| 75 | .type = PM_QOS_MIN, | 82 | .type = PM_QOS_MIN, |
| 76 | }; | 83 | }; |
| 77 | 84 | ||
| @@ -80,7 +87,8 @@ static struct pm_qos_object network_lat_pm_qos = { | |||
| 80 | .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock), | 87 | .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock), |
| 81 | .notifiers = &network_lat_notifier, | 88 | .notifiers = &network_lat_notifier, |
| 82 | .name = "network_latency", | 89 | .name = "network_latency", |
| 83 | .default_value = 2000 * USEC_PER_SEC, | 90 | .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, |
| 91 | .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, | ||
| 84 | .type = PM_QOS_MIN | 92 | .type = PM_QOS_MIN |
| 85 | }; | 93 | }; |
| 86 | 94 | ||
| @@ -90,7 +98,8 @@ static struct pm_qos_object network_throughput_pm_qos = { | |||
| 90 | .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock), | 98 | .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock), |
| 91 | .notifiers = &network_throughput_notifier, | 99 | .notifiers = &network_throughput_notifier, |
| 92 | .name = "network_throughput", | 100 | .name = "network_throughput", |
| 93 | .default_value = 0, | 101 | .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, |
| 102 | .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, | ||
| 94 | .type = PM_QOS_MAX, | 103 | .type = PM_QOS_MAX, |
| 95 | }; | 104 | }; |
| 96 | 105 | ||
| @@ -136,6 +145,16 @@ static inline int pm_qos_get_value(struct pm_qos_object *o) | |||
| 136 | } | 145 | } |
| 137 | } | 146 | } |
| 138 | 147 | ||
| 148 | static inline s32 pm_qos_read_value(struct pm_qos_object *o) | ||
| 149 | { | ||
| 150 | return o->target_value; | ||
| 151 | } | ||
| 152 | |||
| 153 | static inline void pm_qos_set_value(struct pm_qos_object *o, s32 value) | ||
| 154 | { | ||
| 155 | o->target_value = value; | ||
| 156 | } | ||
| 157 | |||
| 139 | static void update_target(struct pm_qos_object *o, struct plist_node *node, | 158 | static void update_target(struct pm_qos_object *o, struct plist_node *node, |
| 140 | int del, int value) | 159 | int del, int value) |
| 141 | { | 160 | { |
| @@ -160,6 +179,7 @@ static void update_target(struct pm_qos_object *o, struct plist_node *node, | |||
| 160 | plist_add(node, &o->requests); | 179 | plist_add(node, &o->requests); |
| 161 | } | 180 | } |
| 162 | curr_value = pm_qos_get_value(o); | 181 | curr_value = pm_qos_get_value(o); |
| 182 | pm_qos_set_value(o, curr_value); | ||
| 163 | spin_unlock_irqrestore(&pm_qos_lock, flags); | 183 | spin_unlock_irqrestore(&pm_qos_lock, flags); |
| 164 | 184 | ||
| 165 | if (prev_value != curr_value) | 185 | if (prev_value != curr_value) |
| @@ -194,18 +214,11 @@ static int find_pm_qos_object_by_minor(int minor) | |||
| 194 | * pm_qos_request - returns current system wide qos expectation | 214 | * pm_qos_request - returns current system wide qos expectation |
| 195 | * @pm_qos_class: identification of which qos value is requested | 215 | * @pm_qos_class: identification of which qos value is requested |
| 196 | * | 216 | * |
| 197 | * This function returns the current target value in an atomic manner. | 217 | * This function returns the current target value. |
| 198 | */ | 218 | */ |
| 199 | int pm_qos_request(int pm_qos_class) | 219 | int pm_qos_request(int pm_qos_class) |
| 200 | { | 220 | { |
| 201 | unsigned long flags; | 221 | return pm_qos_read_value(pm_qos_array[pm_qos_class]); |
| 202 | int value; | ||
| 203 | |||
| 204 | spin_lock_irqsave(&pm_qos_lock, flags); | ||
| 205 | value = pm_qos_get_value(pm_qos_array[pm_qos_class]); | ||
| 206 | spin_unlock_irqrestore(&pm_qos_lock, flags); | ||
| 207 | |||
| 208 | return value; | ||
| 209 | } | 222 | } |
| 210 | EXPORT_SYMBOL_GPL(pm_qos_request); | 223 | EXPORT_SYMBOL_GPL(pm_qos_request); |
| 211 | 224 | ||
