diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2012-11-29 15:46:14 -0500 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2012-11-29 15:46:14 -0500 |
commit | 9ee71f513c698b05f67e74f7ce66ba4f23f9073f (patch) | |
tree | bdbfad86c3473998a844e7cfc501aad688043a34 /drivers | |
parent | aa849506742212659f1ac3b7c3ea387478f81ae0 (diff) | |
parent | a474a515497ef3566cfc17a2cab3d54d6d50ff1c (diff) |
Merge branch 'pm-cpuidle'
* pm-cpuidle:
cpuidle: Measure idle state durations with monotonic clock
cpuidle: fix a suspicious RCU usage in menu governor
cpuidle: support multiple drivers
cpuidle: prepare the cpuidle core to handle multiple drivers
cpuidle: move driver checking within the lock section
cpuidle: move driver's refcount to cpuidle
cpuidle: fixup device.h header in cpuidle.h
cpuidle / sysfs: move structure declaration into the sysfs.c file
cpuidle: Get typical recent sleep interval
cpuidle: Set residency to 0 if target Cstate not enter
cpuidle: Quickly notice prediction failure in general case
cpuidle: Quickly notice prediction failure for repeat mode
cpuidle / sysfs: move kobj initialization in the syfs file
cpuidle / sysfs: change function parameter
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/acpi/processor_idle.c | 57 | ||||
-rw-r--r-- | drivers/cpuidle/Kconfig | 9 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle.c | 55 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle.h | 13 | ||||
-rw-r--r-- | drivers/cpuidle/driver.c | 209 | ||||
-rw-r--r-- | drivers/cpuidle/governors/menu.c | 168 | ||||
-rw-r--r-- | drivers/cpuidle/sysfs.c | 201 | ||||
-rw-r--r-- | drivers/idle/intel_idle.c | 14 |
8 files changed, 560 insertions, 166 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index e8086c725305..f1a5da44591d 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -735,31 +735,18 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | |||
735 | static int acpi_idle_enter_c1(struct cpuidle_device *dev, | 735 | static int acpi_idle_enter_c1(struct cpuidle_device *dev, |
736 | struct cpuidle_driver *drv, int index) | 736 | struct cpuidle_driver *drv, int index) |
737 | { | 737 | { |
738 | ktime_t kt1, kt2; | ||
739 | s64 idle_time; | ||
740 | struct acpi_processor *pr; | 738 | struct acpi_processor *pr; |
741 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | 739 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
742 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); | 740 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); |
743 | 741 | ||
744 | pr = __this_cpu_read(processors); | 742 | pr = __this_cpu_read(processors); |
745 | dev->last_residency = 0; | ||
746 | 743 | ||
747 | if (unlikely(!pr)) | 744 | if (unlikely(!pr)) |
748 | return -EINVAL; | 745 | return -EINVAL; |
749 | 746 | ||
750 | local_irq_disable(); | ||
751 | |||
752 | |||
753 | lapic_timer_state_broadcast(pr, cx, 1); | 747 | lapic_timer_state_broadcast(pr, cx, 1); |
754 | kt1 = ktime_get_real(); | ||
755 | acpi_idle_do_entry(cx); | 748 | acpi_idle_do_entry(cx); |
756 | kt2 = ktime_get_real(); | ||
757 | idle_time = ktime_to_us(ktime_sub(kt2, kt1)); | ||
758 | |||
759 | /* Update device last_residency*/ | ||
760 | dev->last_residency = (int)idle_time; | ||
761 | 749 | ||
762 | local_irq_enable(); | ||
763 | lapic_timer_state_broadcast(pr, cx, 0); | 750 | lapic_timer_state_broadcast(pr, cx, 0); |
764 | 751 | ||
765 | return index; | 752 | return index; |
@@ -806,19 +793,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
806 | struct acpi_processor *pr; | 793 | struct acpi_processor *pr; |
807 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | 794 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
808 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); | 795 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); |
809 | ktime_t kt1, kt2; | ||
810 | s64 idle_time_ns; | ||
811 | s64 idle_time; | ||
812 | 796 | ||
813 | pr = __this_cpu_read(processors); | 797 | pr = __this_cpu_read(processors); |
814 | dev->last_residency = 0; | ||
815 | 798 | ||
816 | if (unlikely(!pr)) | 799 | if (unlikely(!pr)) |
817 | return -EINVAL; | 800 | return -EINVAL; |
818 | 801 | ||
819 | local_irq_disable(); | ||
820 | |||
821 | |||
822 | if (cx->entry_method != ACPI_CSTATE_FFH) { | 802 | if (cx->entry_method != ACPI_CSTATE_FFH) { |
823 | current_thread_info()->status &= ~TS_POLLING; | 803 | current_thread_info()->status &= ~TS_POLLING; |
824 | /* | 804 | /* |
@@ -829,7 +809,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
829 | 809 | ||
830 | if (unlikely(need_resched())) { | 810 | if (unlikely(need_resched())) { |
831 | current_thread_info()->status |= TS_POLLING; | 811 | current_thread_info()->status |= TS_POLLING; |
832 | local_irq_enable(); | ||
833 | return -EINVAL; | 812 | return -EINVAL; |
834 | } | 813 | } |
835 | } | 814 | } |
@@ -843,22 +822,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
843 | if (cx->type == ACPI_STATE_C3) | 822 | if (cx->type == ACPI_STATE_C3) |
844 | ACPI_FLUSH_CPU_CACHE(); | 823 | ACPI_FLUSH_CPU_CACHE(); |
845 | 824 | ||
846 | kt1 = ktime_get_real(); | ||
847 | /* Tell the scheduler that we are going deep-idle: */ | 825 | /* Tell the scheduler that we are going deep-idle: */ |
848 | sched_clock_idle_sleep_event(); | 826 | sched_clock_idle_sleep_event(); |
849 | acpi_idle_do_entry(cx); | 827 | acpi_idle_do_entry(cx); |
850 | kt2 = ktime_get_real(); | ||
851 | idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); | ||
852 | idle_time = idle_time_ns; | ||
853 | do_div(idle_time, NSEC_PER_USEC); | ||
854 | 828 | ||
855 | /* Update device last_residency*/ | 829 | sched_clock_idle_wakeup_event(0); |
856 | dev->last_residency = (int)idle_time; | ||
857 | 830 | ||
858 | /* Tell the scheduler how much we idled: */ | ||
859 | sched_clock_idle_wakeup_event(idle_time_ns); | ||
860 | |||
861 | local_irq_enable(); | ||
862 | if (cx->entry_method != ACPI_CSTATE_FFH) | 831 | if (cx->entry_method != ACPI_CSTATE_FFH) |
863 | current_thread_info()->status |= TS_POLLING; | 832 | current_thread_info()->status |= TS_POLLING; |
864 | 833 | ||
@@ -883,13 +852,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
883 | struct acpi_processor *pr; | 852 | struct acpi_processor *pr; |
884 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | 853 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
885 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); | 854 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); |
886 | ktime_t kt1, kt2; | ||
887 | s64 idle_time_ns; | ||
888 | s64 idle_time; | ||
889 | |||
890 | 855 | ||
891 | pr = __this_cpu_read(processors); | 856 | pr = __this_cpu_read(processors); |
892 | dev->last_residency = 0; | ||
893 | 857 | ||
894 | if (unlikely(!pr)) | 858 | if (unlikely(!pr)) |
895 | return -EINVAL; | 859 | return -EINVAL; |
@@ -899,16 +863,11 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
899 | return drv->states[drv->safe_state_index].enter(dev, | 863 | return drv->states[drv->safe_state_index].enter(dev, |
900 | drv, drv->safe_state_index); | 864 | drv, drv->safe_state_index); |
901 | } else { | 865 | } else { |
902 | local_irq_disable(); | ||
903 | acpi_safe_halt(); | 866 | acpi_safe_halt(); |
904 | local_irq_enable(); | ||
905 | return -EBUSY; | 867 | return -EBUSY; |
906 | } | 868 | } |
907 | } | 869 | } |
908 | 870 | ||
909 | local_irq_disable(); | ||
910 | |||
911 | |||
912 | if (cx->entry_method != ACPI_CSTATE_FFH) { | 871 | if (cx->entry_method != ACPI_CSTATE_FFH) { |
913 | current_thread_info()->status &= ~TS_POLLING; | 872 | current_thread_info()->status &= ~TS_POLLING; |
914 | /* | 873 | /* |
@@ -919,7 +878,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
919 | 878 | ||
920 | if (unlikely(need_resched())) { | 879 | if (unlikely(need_resched())) { |
921 | current_thread_info()->status |= TS_POLLING; | 880 | current_thread_info()->status |= TS_POLLING; |
922 | local_irq_enable(); | ||
923 | return -EINVAL; | 881 | return -EINVAL; |
924 | } | 882 | } |
925 | } | 883 | } |
@@ -934,7 +892,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
934 | */ | 892 | */ |
935 | lapic_timer_state_broadcast(pr, cx, 1); | 893 | lapic_timer_state_broadcast(pr, cx, 1); |
936 | 894 | ||
937 | kt1 = ktime_get_real(); | ||
938 | /* | 895 | /* |
939 | * disable bus master | 896 | * disable bus master |
940 | * bm_check implies we need ARB_DIS | 897 | * bm_check implies we need ARB_DIS |
@@ -965,18 +922,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
965 | c3_cpu_count--; | 922 | c3_cpu_count--; |
966 | raw_spin_unlock(&c3_lock); | 923 | raw_spin_unlock(&c3_lock); |
967 | } | 924 | } |
968 | kt2 = ktime_get_real(); | ||
969 | idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); | ||
970 | idle_time = idle_time_ns; | ||
971 | do_div(idle_time, NSEC_PER_USEC); | ||
972 | |||
973 | /* Update device last_residency*/ | ||
974 | dev->last_residency = (int)idle_time; | ||
975 | 925 | ||
976 | /* Tell the scheduler how much we idled: */ | 926 | sched_clock_idle_wakeup_event(0); |
977 | sched_clock_idle_wakeup_event(idle_time_ns); | ||
978 | 927 | ||
979 | local_irq_enable(); | ||
980 | if (cx->entry_method != ACPI_CSTATE_FFH) | 928 | if (cx->entry_method != ACPI_CSTATE_FFH) |
981 | current_thread_info()->status |= TS_POLLING; | 929 | current_thread_info()->status |= TS_POLLING; |
982 | 930 | ||
@@ -987,6 +935,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
987 | struct cpuidle_driver acpi_idle_driver = { | 935 | struct cpuidle_driver acpi_idle_driver = { |
988 | .name = "acpi_idle", | 936 | .name = "acpi_idle", |
989 | .owner = THIS_MODULE, | 937 | .owner = THIS_MODULE, |
938 | .en_core_tk_irqen = 1, | ||
990 | }; | 939 | }; |
991 | 940 | ||
992 | /** | 941 | /** |
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig index a76b689e553b..234ae651b38f 100644 --- a/drivers/cpuidle/Kconfig +++ b/drivers/cpuidle/Kconfig | |||
@@ -9,6 +9,15 @@ config CPU_IDLE | |||
9 | 9 | ||
10 | If you're using an ACPI-enabled platform, you should say Y here. | 10 | If you're using an ACPI-enabled platform, you should say Y here. |
11 | 11 | ||
12 | config CPU_IDLE_MULTIPLE_DRIVERS | ||
13 | bool "Support multiple cpuidle drivers" | ||
14 | depends on CPU_IDLE | ||
15 | default n | ||
16 | help | ||
17 | Allows the cpuidle framework to use different drivers for each CPU. | ||
18 | This is useful if you have a system with different CPU latencies and | ||
19 | states. If unsure say N. | ||
20 | |||
12 | config CPU_IDLE_GOV_LADDER | 21 | config CPU_IDLE_GOV_LADDER |
13 | bool | 22 | bool |
14 | depends on CPU_IDLE | 23 | depends on CPU_IDLE |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 7f15b8514a18..8df53dd8dbe1 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -68,7 +68,7 @@ static cpuidle_enter_t cpuidle_enter_ops; | |||
68 | int cpuidle_play_dead(void) | 68 | int cpuidle_play_dead(void) |
69 | { | 69 | { |
70 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 70 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
71 | struct cpuidle_driver *drv = cpuidle_get_driver(); | 71 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
72 | int i, dead_state = -1; | 72 | int i, dead_state = -1; |
73 | int power_usage = -1; | 73 | int power_usage = -1; |
74 | 74 | ||
@@ -109,8 +109,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, | |||
109 | /* This can be moved to within driver enter routine | 109 | /* This can be moved to within driver enter routine |
110 | * but that results in multiple copies of same code. | 110 | * but that results in multiple copies of same code. |
111 | */ | 111 | */ |
112 | dev->states_usage[entered_state].time += | 112 | dev->states_usage[entered_state].time += dev->last_residency; |
113 | (unsigned long long)dev->last_residency; | ||
114 | dev->states_usage[entered_state].usage++; | 113 | dev->states_usage[entered_state].usage++; |
115 | } else { | 114 | } else { |
116 | dev->last_residency = 0; | 115 | dev->last_residency = 0; |
@@ -128,7 +127,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, | |||
128 | int cpuidle_idle_call(void) | 127 | int cpuidle_idle_call(void) |
129 | { | 128 | { |
130 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 129 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
131 | struct cpuidle_driver *drv = cpuidle_get_driver(); | 130 | struct cpuidle_driver *drv; |
132 | int next_state, entered_state; | 131 | int next_state, entered_state; |
133 | 132 | ||
134 | if (off) | 133 | if (off) |
@@ -141,9 +140,15 @@ int cpuidle_idle_call(void) | |||
141 | if (!dev || !dev->enabled) | 140 | if (!dev || !dev->enabled) |
142 | return -EBUSY; | 141 | return -EBUSY; |
143 | 142 | ||
143 | drv = cpuidle_get_cpu_driver(dev); | ||
144 | |||
144 | /* ask the governor for the next state */ | 145 | /* ask the governor for the next state */ |
145 | next_state = cpuidle_curr_governor->select(drv, dev); | 146 | next_state = cpuidle_curr_governor->select(drv, dev); |
146 | if (need_resched()) { | 147 | if (need_resched()) { |
148 | dev->last_residency = 0; | ||
149 | /* give the governor an opportunity to reflect on the outcome */ | ||
150 | if (cpuidle_curr_governor->reflect) | ||
151 | cpuidle_curr_governor->reflect(dev, next_state); | ||
147 | local_irq_enable(); | 152 | local_irq_enable(); |
148 | return 0; | 153 | return 0; |
149 | } | 154 | } |
@@ -308,15 +313,19 @@ static void poll_idle_init(struct cpuidle_driver *drv) {} | |||
308 | int cpuidle_enable_device(struct cpuidle_device *dev) | 313 | int cpuidle_enable_device(struct cpuidle_device *dev) |
309 | { | 314 | { |
310 | int ret, i; | 315 | int ret, i; |
311 | struct cpuidle_driver *drv = cpuidle_get_driver(); | 316 | struct cpuidle_driver *drv; |
312 | 317 | ||
313 | if (!dev) | 318 | if (!dev) |
314 | return -EINVAL; | 319 | return -EINVAL; |
315 | 320 | ||
316 | if (dev->enabled) | 321 | if (dev->enabled) |
317 | return 0; | 322 | return 0; |
323 | |||
324 | drv = cpuidle_get_cpu_driver(dev); | ||
325 | |||
318 | if (!drv || !cpuidle_curr_governor) | 326 | if (!drv || !cpuidle_curr_governor) |
319 | return -EIO; | 327 | return -EIO; |
328 | |||
320 | if (!dev->state_count) | 329 | if (!dev->state_count) |
321 | dev->state_count = drv->state_count; | 330 | dev->state_count = drv->state_count; |
322 | 331 | ||
@@ -331,7 +340,8 @@ int cpuidle_enable_device(struct cpuidle_device *dev) | |||
331 | 340 | ||
332 | poll_idle_init(drv); | 341 | poll_idle_init(drv); |
333 | 342 | ||
334 | if ((ret = cpuidle_add_state_sysfs(dev))) | 343 | ret = cpuidle_add_device_sysfs(dev); |
344 | if (ret) | ||
335 | return ret; | 345 | return ret; |
336 | 346 | ||
337 | if (cpuidle_curr_governor->enable && | 347 | if (cpuidle_curr_governor->enable && |
@@ -352,7 +362,7 @@ int cpuidle_enable_device(struct cpuidle_device *dev) | |||
352 | return 0; | 362 | return 0; |
353 | 363 | ||
354 | fail_sysfs: | 364 | fail_sysfs: |
355 | cpuidle_remove_state_sysfs(dev); | 365 | cpuidle_remove_device_sysfs(dev); |
356 | 366 | ||
357 | return ret; | 367 | return ret; |
358 | } | 368 | } |
@@ -368,17 +378,20 @@ EXPORT_SYMBOL_GPL(cpuidle_enable_device); | |||
368 | */ | 378 | */ |
369 | void cpuidle_disable_device(struct cpuidle_device *dev) | 379 | void cpuidle_disable_device(struct cpuidle_device *dev) |
370 | { | 380 | { |
381 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | ||
382 | |||
371 | if (!dev || !dev->enabled) | 383 | if (!dev || !dev->enabled) |
372 | return; | 384 | return; |
373 | if (!cpuidle_get_driver() || !cpuidle_curr_governor) | 385 | |
386 | if (!drv || !cpuidle_curr_governor) | ||
374 | return; | 387 | return; |
375 | 388 | ||
376 | dev->enabled = 0; | 389 | dev->enabled = 0; |
377 | 390 | ||
378 | if (cpuidle_curr_governor->disable) | 391 | if (cpuidle_curr_governor->disable) |
379 | cpuidle_curr_governor->disable(cpuidle_get_driver(), dev); | 392 | cpuidle_curr_governor->disable(drv, dev); |
380 | 393 | ||
381 | cpuidle_remove_state_sysfs(dev); | 394 | cpuidle_remove_device_sysfs(dev); |
382 | enabled_devices--; | 395 | enabled_devices--; |
383 | } | 396 | } |
384 | 397 | ||
@@ -394,17 +407,14 @@ EXPORT_SYMBOL_GPL(cpuidle_disable_device); | |||
394 | static int __cpuidle_register_device(struct cpuidle_device *dev) | 407 | static int __cpuidle_register_device(struct cpuidle_device *dev) |
395 | { | 408 | { |
396 | int ret; | 409 | int ret; |
397 | struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); | 410 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
398 | struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); | ||
399 | 411 | ||
400 | if (!try_module_get(cpuidle_driver->owner)) | 412 | if (!try_module_get(drv->owner)) |
401 | return -EINVAL; | 413 | return -EINVAL; |
402 | 414 | ||
403 | init_completion(&dev->kobj_unregister); | ||
404 | |||
405 | per_cpu(cpuidle_devices, dev->cpu) = dev; | 415 | per_cpu(cpuidle_devices, dev->cpu) = dev; |
406 | list_add(&dev->device_list, &cpuidle_detected_devices); | 416 | list_add(&dev->device_list, &cpuidle_detected_devices); |
407 | ret = cpuidle_add_sysfs(cpu_dev); | 417 | ret = cpuidle_add_sysfs(dev); |
408 | if (ret) | 418 | if (ret) |
409 | goto err_sysfs; | 419 | goto err_sysfs; |
410 | 420 | ||
@@ -416,12 +426,11 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) | |||
416 | return 0; | 426 | return 0; |
417 | 427 | ||
418 | err_coupled: | 428 | err_coupled: |
419 | cpuidle_remove_sysfs(cpu_dev); | 429 | cpuidle_remove_sysfs(dev); |
420 | wait_for_completion(&dev->kobj_unregister); | ||
421 | err_sysfs: | 430 | err_sysfs: |
422 | list_del(&dev->device_list); | 431 | list_del(&dev->device_list); |
423 | per_cpu(cpuidle_devices, dev->cpu) = NULL; | 432 | per_cpu(cpuidle_devices, dev->cpu) = NULL; |
424 | module_put(cpuidle_driver->owner); | 433 | module_put(drv->owner); |
425 | return ret; | 434 | return ret; |
426 | } | 435 | } |
427 | 436 | ||
@@ -460,8 +469,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device); | |||
460 | */ | 469 | */ |
461 | void cpuidle_unregister_device(struct cpuidle_device *dev) | 470 | void cpuidle_unregister_device(struct cpuidle_device *dev) |
462 | { | 471 | { |
463 | struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); | 472 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
464 | struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); | ||
465 | 473 | ||
466 | if (dev->registered == 0) | 474 | if (dev->registered == 0) |
467 | return; | 475 | return; |
@@ -470,16 +478,15 @@ void cpuidle_unregister_device(struct cpuidle_device *dev) | |||
470 | 478 | ||
471 | cpuidle_disable_device(dev); | 479 | cpuidle_disable_device(dev); |
472 | 480 | ||
473 | cpuidle_remove_sysfs(cpu_dev); | 481 | cpuidle_remove_sysfs(dev); |
474 | list_del(&dev->device_list); | 482 | list_del(&dev->device_list); |
475 | wait_for_completion(&dev->kobj_unregister); | ||
476 | per_cpu(cpuidle_devices, dev->cpu) = NULL; | 483 | per_cpu(cpuidle_devices, dev->cpu) = NULL; |
477 | 484 | ||
478 | cpuidle_coupled_unregister_device(dev); | 485 | cpuidle_coupled_unregister_device(dev); |
479 | 486 | ||
480 | cpuidle_resume_and_unlock(); | 487 | cpuidle_resume_and_unlock(); |
481 | 488 | ||
482 | module_put(cpuidle_driver->owner); | 489 | module_put(drv->owner); |
483 | } | 490 | } |
484 | 491 | ||
485 | EXPORT_SYMBOL_GPL(cpuidle_unregister_device); | 492 | EXPORT_SYMBOL_GPL(cpuidle_unregister_device); |
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h index 76e7f696ad8c..ee97e9672ecf 100644 --- a/drivers/cpuidle/cpuidle.h +++ b/drivers/cpuidle/cpuidle.h | |||
@@ -5,8 +5,6 @@ | |||
5 | #ifndef __DRIVER_CPUIDLE_H | 5 | #ifndef __DRIVER_CPUIDLE_H |
6 | #define __DRIVER_CPUIDLE_H | 6 | #define __DRIVER_CPUIDLE_H |
7 | 7 | ||
8 | #include <linux/device.h> | ||
9 | |||
10 | /* For internal use only */ | 8 | /* For internal use only */ |
11 | extern struct cpuidle_governor *cpuidle_curr_governor; | 9 | extern struct cpuidle_governor *cpuidle_curr_governor; |
12 | extern struct list_head cpuidle_governors; | 10 | extern struct list_head cpuidle_governors; |
@@ -25,12 +23,15 @@ extern void cpuidle_uninstall_idle_handler(void); | |||
25 | extern int cpuidle_switch_governor(struct cpuidle_governor *gov); | 23 | extern int cpuidle_switch_governor(struct cpuidle_governor *gov); |
26 | 24 | ||
27 | /* sysfs */ | 25 | /* sysfs */ |
26 | |||
27 | struct device; | ||
28 | |||
28 | extern int cpuidle_add_interface(struct device *dev); | 29 | extern int cpuidle_add_interface(struct device *dev); |
29 | extern void cpuidle_remove_interface(struct device *dev); | 30 | extern void cpuidle_remove_interface(struct device *dev); |
30 | extern int cpuidle_add_state_sysfs(struct cpuidle_device *device); | 31 | extern int cpuidle_add_device_sysfs(struct cpuidle_device *device); |
31 | extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device); | 32 | extern void cpuidle_remove_device_sysfs(struct cpuidle_device *device); |
32 | extern int cpuidle_add_sysfs(struct device *dev); | 33 | extern int cpuidle_add_sysfs(struct cpuidle_device *dev); |
33 | extern void cpuidle_remove_sysfs(struct device *dev); | 34 | extern void cpuidle_remove_sysfs(struct cpuidle_device *dev); |
34 | 35 | ||
35 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED | 36 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED |
36 | bool cpuidle_state_is_coupled(struct cpuidle_device *dev, | 37 | bool cpuidle_state_is_coupled(struct cpuidle_device *dev, |
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 87db3877fead..3af841fb397a 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c | |||
@@ -14,9 +14,10 @@ | |||
14 | 14 | ||
15 | #include "cpuidle.h" | 15 | #include "cpuidle.h" |
16 | 16 | ||
17 | static struct cpuidle_driver *cpuidle_curr_driver; | ||
18 | DEFINE_SPINLOCK(cpuidle_driver_lock); | 17 | DEFINE_SPINLOCK(cpuidle_driver_lock); |
19 | int cpuidle_driver_refcount; | 18 | |
19 | static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu); | ||
20 | static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu); | ||
20 | 21 | ||
21 | static void set_power_states(struct cpuidle_driver *drv) | 22 | static void set_power_states(struct cpuidle_driver *drv) |
22 | { | 23 | { |
@@ -40,11 +41,15 @@ static void set_power_states(struct cpuidle_driver *drv) | |||
40 | drv->states[i].power_usage = -1 - i; | 41 | drv->states[i].power_usage = -1 - i; |
41 | } | 42 | } |
42 | 43 | ||
43 | /** | 44 | static void __cpuidle_driver_init(struct cpuidle_driver *drv) |
44 | * cpuidle_register_driver - registers a driver | 45 | { |
45 | * @drv: the driver | 46 | drv->refcnt = 0; |
46 | */ | 47 | |
47 | int cpuidle_register_driver(struct cpuidle_driver *drv) | 48 | if (!drv->power_specified) |
49 | set_power_states(drv); | ||
50 | } | ||
51 | |||
52 | static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu) | ||
48 | { | 53 | { |
49 | if (!drv || !drv->state_count) | 54 | if (!drv || !drv->state_count) |
50 | return -EINVAL; | 55 | return -EINVAL; |
@@ -52,31 +57,145 @@ int cpuidle_register_driver(struct cpuidle_driver *drv) | |||
52 | if (cpuidle_disabled()) | 57 | if (cpuidle_disabled()) |
53 | return -ENODEV; | 58 | return -ENODEV; |
54 | 59 | ||
55 | spin_lock(&cpuidle_driver_lock); | 60 | if (__cpuidle_get_cpu_driver(cpu)) |
56 | if (cpuidle_curr_driver) { | ||
57 | spin_unlock(&cpuidle_driver_lock); | ||
58 | return -EBUSY; | 61 | return -EBUSY; |
62 | |||
63 | __cpuidle_driver_init(drv); | ||
64 | |||
65 | __cpuidle_set_cpu_driver(drv, cpu); | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static void __cpuidle_unregister_driver(struct cpuidle_driver *drv, int cpu) | ||
71 | { | ||
72 | if (drv != __cpuidle_get_cpu_driver(cpu)) | ||
73 | return; | ||
74 | |||
75 | if (!WARN_ON(drv->refcnt > 0)) | ||
76 | __cpuidle_set_cpu_driver(NULL, cpu); | ||
77 | } | ||
78 | |||
79 | #ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS | ||
80 | |||
81 | static DEFINE_PER_CPU(struct cpuidle_driver *, cpuidle_drivers); | ||
82 | |||
83 | static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu) | ||
84 | { | ||
85 | per_cpu(cpuidle_drivers, cpu) = drv; | ||
86 | } | ||
87 | |||
88 | static struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu) | ||
89 | { | ||
90 | return per_cpu(cpuidle_drivers, cpu); | ||
91 | } | ||
92 | |||
93 | static void __cpuidle_unregister_all_cpu_driver(struct cpuidle_driver *drv) | ||
94 | { | ||
95 | int cpu; | ||
96 | for_each_present_cpu(cpu) | ||
97 | __cpuidle_unregister_driver(drv, cpu); | ||
98 | } | ||
99 | |||
100 | static int __cpuidle_register_all_cpu_driver(struct cpuidle_driver *drv) | ||
101 | { | ||
102 | int ret = 0; | ||
103 | int i, cpu; | ||
104 | |||
105 | for_each_present_cpu(cpu) { | ||
106 | ret = __cpuidle_register_driver(drv, cpu); | ||
107 | if (ret) | ||
108 | break; | ||
59 | } | 109 | } |
60 | 110 | ||
61 | if (!drv->power_specified) | 111 | if (ret) |
62 | set_power_states(drv); | 112 | for_each_present_cpu(i) { |
113 | if (i == cpu) | ||
114 | break; | ||
115 | __cpuidle_unregister_driver(drv, i); | ||
116 | } | ||
63 | 117 | ||
64 | cpuidle_curr_driver = drv; | ||
65 | 118 | ||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | int cpuidle_register_cpu_driver(struct cpuidle_driver *drv, int cpu) | ||
123 | { | ||
124 | int ret; | ||
125 | |||
126 | spin_lock(&cpuidle_driver_lock); | ||
127 | ret = __cpuidle_register_driver(drv, cpu); | ||
66 | spin_unlock(&cpuidle_driver_lock); | 128 | spin_unlock(&cpuidle_driver_lock); |
67 | 129 | ||
68 | return 0; | 130 | return ret; |
131 | } | ||
132 | |||
133 | void cpuidle_unregister_cpu_driver(struct cpuidle_driver *drv, int cpu) | ||
134 | { | ||
135 | spin_lock(&cpuidle_driver_lock); | ||
136 | __cpuidle_unregister_driver(drv, cpu); | ||
137 | spin_unlock(&cpuidle_driver_lock); | ||
138 | } | ||
139 | |||
140 | /** | ||
141 | * cpuidle_register_driver - registers a driver | ||
142 | * @drv: the driver | ||
143 | */ | ||
144 | int cpuidle_register_driver(struct cpuidle_driver *drv) | ||
145 | { | ||
146 | int ret; | ||
147 | |||
148 | spin_lock(&cpuidle_driver_lock); | ||
149 | ret = __cpuidle_register_all_cpu_driver(drv); | ||
150 | spin_unlock(&cpuidle_driver_lock); | ||
151 | |||
152 | return ret; | ||
69 | } | 153 | } |
70 | EXPORT_SYMBOL_GPL(cpuidle_register_driver); | 154 | EXPORT_SYMBOL_GPL(cpuidle_register_driver); |
71 | 155 | ||
72 | /** | 156 | /** |
73 | * cpuidle_get_driver - return the current driver | 157 | * cpuidle_unregister_driver - unregisters a driver |
158 | * @drv: the driver | ||
74 | */ | 159 | */ |
75 | struct cpuidle_driver *cpuidle_get_driver(void) | 160 | void cpuidle_unregister_driver(struct cpuidle_driver *drv) |
161 | { | ||
162 | spin_lock(&cpuidle_driver_lock); | ||
163 | __cpuidle_unregister_all_cpu_driver(drv); | ||
164 | spin_unlock(&cpuidle_driver_lock); | ||
165 | } | ||
166 | EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); | ||
167 | |||
168 | #else | ||
169 | |||
170 | static struct cpuidle_driver *cpuidle_curr_driver; | ||
171 | |||
172 | static inline void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu) | ||
173 | { | ||
174 | cpuidle_curr_driver = drv; | ||
175 | } | ||
176 | |||
177 | static inline struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu) | ||
76 | { | 178 | { |
77 | return cpuidle_curr_driver; | 179 | return cpuidle_curr_driver; |
78 | } | 180 | } |
79 | EXPORT_SYMBOL_GPL(cpuidle_get_driver); | 181 | |
182 | /** | ||
183 | * cpuidle_register_driver - registers a driver | ||
184 | * @drv: the driver | ||
185 | */ | ||
186 | int cpuidle_register_driver(struct cpuidle_driver *drv) | ||
187 | { | ||
188 | int ret, cpu; | ||
189 | |||
190 | cpu = get_cpu(); | ||
191 | spin_lock(&cpuidle_driver_lock); | ||
192 | ret = __cpuidle_register_driver(drv, cpu); | ||
193 | spin_unlock(&cpuidle_driver_lock); | ||
194 | put_cpu(); | ||
195 | |||
196 | return ret; | ||
197 | } | ||
198 | EXPORT_SYMBOL_GPL(cpuidle_register_driver); | ||
80 | 199 | ||
81 | /** | 200 | /** |
82 | * cpuidle_unregister_driver - unregisters a driver | 201 | * cpuidle_unregister_driver - unregisters a driver |
@@ -84,20 +203,50 @@ EXPORT_SYMBOL_GPL(cpuidle_get_driver); | |||
84 | */ | 203 | */ |
85 | void cpuidle_unregister_driver(struct cpuidle_driver *drv) | 204 | void cpuidle_unregister_driver(struct cpuidle_driver *drv) |
86 | { | 205 | { |
87 | if (drv != cpuidle_curr_driver) { | 206 | int cpu; |
88 | WARN(1, "invalid cpuidle_unregister_driver(%s)\n", | ||
89 | drv->name); | ||
90 | return; | ||
91 | } | ||
92 | 207 | ||
208 | cpu = get_cpu(); | ||
93 | spin_lock(&cpuidle_driver_lock); | 209 | spin_lock(&cpuidle_driver_lock); |
210 | __cpuidle_unregister_driver(drv, cpu); | ||
211 | spin_unlock(&cpuidle_driver_lock); | ||
212 | put_cpu(); | ||
213 | } | ||
214 | EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); | ||
215 | #endif | ||
216 | |||
217 | /** | ||
218 | * cpuidle_get_driver - return the current driver | ||
219 | */ | ||
220 | struct cpuidle_driver *cpuidle_get_driver(void) | ||
221 | { | ||
222 | struct cpuidle_driver *drv; | ||
223 | int cpu; | ||
94 | 224 | ||
95 | if (!WARN_ON(cpuidle_driver_refcount > 0)) | 225 | cpu = get_cpu(); |
96 | cpuidle_curr_driver = NULL; | 226 | drv = __cpuidle_get_cpu_driver(cpu); |
227 | put_cpu(); | ||
97 | 228 | ||
229 | return drv; | ||
230 | } | ||
231 | EXPORT_SYMBOL_GPL(cpuidle_get_driver); | ||
232 | |||
233 | /** | ||
234 | * cpuidle_get_cpu_driver - return the driver tied with a cpu | ||
235 | */ | ||
236 | struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev) | ||
237 | { | ||
238 | struct cpuidle_driver *drv; | ||
239 | |||
240 | if (!dev) | ||
241 | return NULL; | ||
242 | |||
243 | spin_lock(&cpuidle_driver_lock); | ||
244 | drv = __cpuidle_get_cpu_driver(dev->cpu); | ||
98 | spin_unlock(&cpuidle_driver_lock); | 245 | spin_unlock(&cpuidle_driver_lock); |
246 | |||
247 | return drv; | ||
99 | } | 248 | } |
100 | EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); | 249 | EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver); |
101 | 250 | ||
102 | struct cpuidle_driver *cpuidle_driver_ref(void) | 251 | struct cpuidle_driver *cpuidle_driver_ref(void) |
103 | { | 252 | { |
@@ -105,8 +254,8 @@ struct cpuidle_driver *cpuidle_driver_ref(void) | |||
105 | 254 | ||
106 | spin_lock(&cpuidle_driver_lock); | 255 | spin_lock(&cpuidle_driver_lock); |
107 | 256 | ||
108 | drv = cpuidle_curr_driver; | 257 | drv = cpuidle_get_driver(); |
109 | cpuidle_driver_refcount++; | 258 | drv->refcnt++; |
110 | 259 | ||
111 | spin_unlock(&cpuidle_driver_lock); | 260 | spin_unlock(&cpuidle_driver_lock); |
112 | return drv; | 261 | return drv; |
@@ -114,10 +263,12 @@ struct cpuidle_driver *cpuidle_driver_ref(void) | |||
114 | 263 | ||
115 | void cpuidle_driver_unref(void) | 264 | void cpuidle_driver_unref(void) |
116 | { | 265 | { |
266 | struct cpuidle_driver *drv = cpuidle_get_driver(); | ||
267 | |||
117 | spin_lock(&cpuidle_driver_lock); | 268 | spin_lock(&cpuidle_driver_lock); |
118 | 269 | ||
119 | if (!WARN_ON(cpuidle_driver_refcount <= 0)) | 270 | if (drv && !WARN_ON(drv->refcnt <= 0)) |
120 | cpuidle_driver_refcount--; | 271 | drv->refcnt--; |
121 | 272 | ||
122 | spin_unlock(&cpuidle_driver_lock); | 273 | spin_unlock(&cpuidle_driver_lock); |
123 | } | 274 | } |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 5b1f2c372c1f..bd40b943b6db 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -28,6 +28,13 @@ | |||
28 | #define MAX_INTERESTING 50000 | 28 | #define MAX_INTERESTING 50000 |
29 | #define STDDEV_THRESH 400 | 29 | #define STDDEV_THRESH 400 |
30 | 30 | ||
31 | /* 60 * 60 > STDDEV_THRESH * INTERVALS = 400 * 8 */ | ||
32 | #define MAX_DEVIATION 60 | ||
33 | |||
34 | static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer); | ||
35 | static DEFINE_PER_CPU(int, hrtimer_status); | ||
36 | /* menu hrtimer mode */ | ||
37 | enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL}; | ||
31 | 38 | ||
32 | /* | 39 | /* |
33 | * Concepts and ideas behind the menu governor | 40 | * Concepts and ideas behind the menu governor |
@@ -109,6 +116,13 @@ | |||
109 | * | 116 | * |
110 | */ | 117 | */ |
111 | 118 | ||
119 | /* | ||
120 | * The C-state residency is so long that is is worthwhile to exit | ||
121 | * from the shallow C-state and re-enter into a deeper C-state. | ||
122 | */ | ||
123 | static unsigned int perfect_cstate_ms __read_mostly = 30; | ||
124 | module_param(perfect_cstate_ms, uint, 0000); | ||
125 | |||
112 | struct menu_device { | 126 | struct menu_device { |
113 | int last_state_idx; | 127 | int last_state_idx; |
114 | int needs_update; | 128 | int needs_update; |
@@ -191,40 +205,102 @@ static u64 div_round64(u64 dividend, u32 divisor) | |||
191 | return div_u64(dividend + (divisor / 2), divisor); | 205 | return div_u64(dividend + (divisor / 2), divisor); |
192 | } | 206 | } |
193 | 207 | ||
208 | /* Cancel the hrtimer if it is not triggered yet */ | ||
209 | void menu_hrtimer_cancel(void) | ||
210 | { | ||
211 | int cpu = smp_processor_id(); | ||
212 | struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu); | ||
213 | |||
214 | /* The timer is still not time out*/ | ||
215 | if (per_cpu(hrtimer_status, cpu)) { | ||
216 | hrtimer_cancel(hrtmr); | ||
217 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP; | ||
218 | } | ||
219 | } | ||
220 | EXPORT_SYMBOL_GPL(menu_hrtimer_cancel); | ||
221 | |||
222 | /* Call back for hrtimer is triggered */ | ||
223 | static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer) | ||
224 | { | ||
225 | int cpu = smp_processor_id(); | ||
226 | struct menu_device *data = &per_cpu(menu_devices, cpu); | ||
227 | |||
228 | /* In general case, the expected residency is much larger than | ||
229 | * deepest C-state target residency, but prediction logic still | ||
230 | * predicts a small predicted residency, so the prediction | ||
231 | * history is totally broken if the timer is triggered. | ||
232 | * So reset the correction factor. | ||
233 | */ | ||
234 | if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL) | ||
235 | data->correction_factor[data->bucket] = RESOLUTION * DECAY; | ||
236 | |||
237 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP; | ||
238 | |||
239 | return HRTIMER_NORESTART; | ||
240 | } | ||
241 | |||
194 | /* | 242 | /* |
195 | * Try detecting repeating patterns by keeping track of the last 8 | 243 | * Try detecting repeating patterns by keeping track of the last 8 |
196 | * intervals, and checking if the standard deviation of that set | 244 | * intervals, and checking if the standard deviation of that set |
197 | * of points is below a threshold. If it is... then use the | 245 | * of points is below a threshold. If it is... then use the |
198 | * average of these 8 points as the estimated value. | 246 | * average of these 8 points as the estimated value. |
199 | */ | 247 | */ |
200 | static void detect_repeating_patterns(struct menu_device *data) | 248 | static u32 get_typical_interval(struct menu_device *data) |
201 | { | 249 | { |
202 | int i; | 250 | int i = 0, divisor = 0; |
203 | uint64_t avg = 0; | 251 | uint64_t max = 0, avg = 0, stddev = 0; |
204 | uint64_t stddev = 0; /* contains the square of the std deviation */ | 252 | int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */ |
205 | 253 | unsigned int ret = 0; | |
206 | /* first calculate average and standard deviation of the past */ | ||
207 | for (i = 0; i < INTERVALS; i++) | ||
208 | avg += data->intervals[i]; | ||
209 | avg = avg / INTERVALS; | ||
210 | 254 | ||
211 | /* if the avg is beyond the known next tick, it's worthless */ | 255 | again: |
212 | if (avg > data->expected_us) | ||
213 | return; | ||
214 | 256 | ||
215 | for (i = 0; i < INTERVALS; i++) | 257 | /* first calculate average and standard deviation of the past */ |
216 | stddev += (data->intervals[i] - avg) * | 258 | max = avg = divisor = stddev = 0; |
217 | (data->intervals[i] - avg); | 259 | for (i = 0; i < INTERVALS; i++) { |
218 | 260 | int64_t value = data->intervals[i]; | |
219 | stddev = stddev / INTERVALS; | 261 | if (value <= thresh) { |
262 | avg += value; | ||
263 | divisor++; | ||
264 | if (value > max) | ||
265 | max = value; | ||
266 | } | ||
267 | } | ||
268 | do_div(avg, divisor); | ||
220 | 269 | ||
270 | for (i = 0; i < INTERVALS; i++) { | ||
271 | int64_t value = data->intervals[i]; | ||
272 | if (value <= thresh) { | ||
273 | int64_t diff = value - avg; | ||
274 | stddev += diff * diff; | ||
275 | } | ||
276 | } | ||
277 | do_div(stddev, divisor); | ||
278 | stddev = int_sqrt(stddev); | ||
221 | /* | 279 | /* |
222 | * now.. if stddev is small.. then assume we have a | 280 | * If we have outliers to the upside in our distribution, discard |
223 | * repeating pattern and predict we keep doing this. | 281 | * those by setting the threshold to exclude these outliers, then |
282 | * calculate the average and standard deviation again. Once we get | ||
283 | * down to the bottom 3/4 of our samples, stop excluding samples. | ||
284 | * | ||
285 | * This can deal with workloads that have long pauses interspersed | ||
286 | * with sporadic activity with a bunch of short pauses. | ||
287 | * | ||
288 | * The typical interval is obtained when standard deviation is small | ||
289 | * or standard deviation is small compared to the average interval. | ||
224 | */ | 290 | */ |
225 | 291 | if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3)) | |
226 | if (avg && stddev < STDDEV_THRESH) | 292 | || stddev <= 20) { |
227 | data->predicted_us = avg; | 293 | data->predicted_us = avg; |
294 | ret = 1; | ||
295 | return ret; | ||
296 | |||
297 | } else if ((divisor * 4) > INTERVALS * 3) { | ||
298 | /* Exclude the max interval */ | ||
299 | thresh = max - 1; | ||
300 | goto again; | ||
301 | } | ||
302 | |||
303 | return ret; | ||
228 | } | 304 | } |
229 | 305 | ||
230 | /** | 306 | /** |
@@ -240,6 +316,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
240 | int i; | 316 | int i; |
241 | int multiplier; | 317 | int multiplier; |
242 | struct timespec t; | 318 | struct timespec t; |
319 | int repeat = 0, low_predicted = 0; | ||
320 | int cpu = smp_processor_id(); | ||
321 | struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu); | ||
243 | 322 | ||
244 | if (data->needs_update) { | 323 | if (data->needs_update) { |
245 | menu_update(drv, dev); | 324 | menu_update(drv, dev); |
@@ -274,7 +353,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
274 | data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], | 353 | data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], |
275 | RESOLUTION * DECAY); | 354 | RESOLUTION * DECAY); |
276 | 355 | ||
277 | detect_repeating_patterns(data); | 356 | repeat = get_typical_interval(data); |
278 | 357 | ||
279 | /* | 358 | /* |
280 | * We want to default to C1 (hlt), not to busy polling | 359 | * We want to default to C1 (hlt), not to busy polling |
@@ -295,8 +374,10 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
295 | 374 | ||
296 | if (s->disabled || su->disable) | 375 | if (s->disabled || su->disable) |
297 | continue; | 376 | continue; |
298 | if (s->target_residency > data->predicted_us) | 377 | if (s->target_residency > data->predicted_us) { |
378 | low_predicted = 1; | ||
299 | continue; | 379 | continue; |
380 | } | ||
300 | if (s->exit_latency > latency_req) | 381 | if (s->exit_latency > latency_req) |
301 | continue; | 382 | continue; |
302 | if (s->exit_latency * multiplier > data->predicted_us) | 383 | if (s->exit_latency * multiplier > data->predicted_us) |
@@ -309,6 +390,44 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
309 | } | 390 | } |
310 | } | 391 | } |
311 | 392 | ||
393 | /* not deepest C-state chosen for low predicted residency */ | ||
394 | if (low_predicted) { | ||
395 | unsigned int timer_us = 0; | ||
396 | unsigned int perfect_us = 0; | ||
397 | |||
398 | /* | ||
399 | * Set a timer to detect whether this sleep is much | ||
400 | * longer than repeat mode predicted. If the timer | ||
401 | * triggers, the code will evaluate whether to put | ||
402 | * the CPU into a deeper C-state. | ||
403 | * The timer is cancelled on CPU wakeup. | ||
404 | */ | ||
405 | timer_us = 2 * (data->predicted_us + MAX_DEVIATION); | ||
406 | |||
407 | perfect_us = perfect_cstate_ms * 1000; | ||
408 | |||
409 | if (repeat && (4 * timer_us < data->expected_us)) { | ||
410 | RCU_NONIDLE(hrtimer_start(hrtmr, | ||
411 | ns_to_ktime(1000 * timer_us), | ||
412 | HRTIMER_MODE_REL_PINNED)); | ||
413 | /* In repeat case, menu hrtimer is started */ | ||
414 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT; | ||
415 | } else if (perfect_us < data->expected_us) { | ||
416 | /* | ||
417 | * The next timer is long. This could be because | ||
418 | * we did not make a useful prediction. | ||
419 | * In that case, it makes sense to re-enter | ||
420 | * into a deeper C-state after some time. | ||
421 | */ | ||
422 | RCU_NONIDLE(hrtimer_start(hrtmr, | ||
423 | ns_to_ktime(1000 * timer_us), | ||
424 | HRTIMER_MODE_REL_PINNED)); | ||
425 | /* In general case, menu hrtimer is started */ | ||
426 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL; | ||
427 | } | ||
428 | |||
429 | } | ||
430 | |||
312 | return data->last_state_idx; | 431 | return data->last_state_idx; |
313 | } | 432 | } |
314 | 433 | ||
@@ -399,6 +518,9 @@ static int menu_enable_device(struct cpuidle_driver *drv, | |||
399 | struct cpuidle_device *dev) | 518 | struct cpuidle_device *dev) |
400 | { | 519 | { |
401 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); | 520 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); |
521 | struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu); | ||
522 | hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
523 | t->function = menu_hrtimer_notify; | ||
402 | 524 | ||
403 | memset(data, 0, sizeof(struct menu_device)); | 525 | memset(data, 0, sizeof(struct menu_device)); |
404 | 526 | ||
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 5f809e337b89..340942946106 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/cpu.h> | 13 | #include <linux/cpu.h> |
14 | #include <linux/capability.h> | 14 | #include <linux/capability.h> |
15 | #include <linux/device.h> | ||
15 | 16 | ||
16 | #include "cpuidle.h" | 17 | #include "cpuidle.h" |
17 | 18 | ||
@@ -297,6 +298,13 @@ static struct attribute *cpuidle_state_default_attrs[] = { | |||
297 | NULL | 298 | NULL |
298 | }; | 299 | }; |
299 | 300 | ||
301 | struct cpuidle_state_kobj { | ||
302 | struct cpuidle_state *state; | ||
303 | struct cpuidle_state_usage *state_usage; | ||
304 | struct completion kobj_unregister; | ||
305 | struct kobject kobj; | ||
306 | }; | ||
307 | |||
300 | #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) | 308 | #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) |
301 | #define kobj_to_state(k) (kobj_to_state_obj(k)->state) | 309 | #define kobj_to_state(k) (kobj_to_state_obj(k)->state) |
302 | #define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage) | 310 | #define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage) |
@@ -356,17 +364,17 @@ static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i) | |||
356 | } | 364 | } |
357 | 365 | ||
358 | /** | 366 | /** |
359 | * cpuidle_add_driver_sysfs - adds driver-specific sysfs attributes | 367 | * cpuidle_add_state_sysfs - adds cpuidle states sysfs attributes |
360 | * @device: the target device | 368 | * @device: the target device |
361 | */ | 369 | */ |
362 | int cpuidle_add_state_sysfs(struct cpuidle_device *device) | 370 | static int cpuidle_add_state_sysfs(struct cpuidle_device *device) |
363 | { | 371 | { |
364 | int i, ret = -ENOMEM; | 372 | int i, ret = -ENOMEM; |
365 | struct cpuidle_state_kobj *kobj; | 373 | struct cpuidle_state_kobj *kobj; |
366 | struct cpuidle_driver *drv = cpuidle_get_driver(); | 374 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device); |
367 | 375 | ||
368 | /* state statistics */ | 376 | /* state statistics */ |
369 | for (i = 0; i < device->state_count; i++) { | 377 | for (i = 0; i < drv->state_count; i++) { |
370 | kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); | 378 | kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); |
371 | if (!kobj) | 379 | if (!kobj) |
372 | goto error_state; | 380 | goto error_state; |
@@ -374,8 +382,8 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device) | |||
374 | kobj->state_usage = &device->states_usage[i]; | 382 | kobj->state_usage = &device->states_usage[i]; |
375 | init_completion(&kobj->kobj_unregister); | 383 | init_completion(&kobj->kobj_unregister); |
376 | 384 | ||
377 | ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj, | 385 | ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, |
378 | "state%d", i); | 386 | &device->kobj, "state%d", i); |
379 | if (ret) { | 387 | if (ret) { |
380 | kfree(kobj); | 388 | kfree(kobj); |
381 | goto error_state; | 389 | goto error_state; |
@@ -393,10 +401,10 @@ error_state: | |||
393 | } | 401 | } |
394 | 402 | ||
395 | /** | 403 | /** |
396 | * cpuidle_remove_driver_sysfs - removes driver-specific sysfs attributes | 404 | * cpuidle_remove_driver_sysfs - removes the cpuidle states sysfs attributes |
397 | * @device: the target device | 405 | * @device: the target device |
398 | */ | 406 | */ |
399 | void cpuidle_remove_state_sysfs(struct cpuidle_device *device) | 407 | static void cpuidle_remove_state_sysfs(struct cpuidle_device *device) |
400 | { | 408 | { |
401 | int i; | 409 | int i; |
402 | 410 | ||
@@ -404,17 +412,179 @@ void cpuidle_remove_state_sysfs(struct cpuidle_device *device) | |||
404 | cpuidle_free_state_kobj(device, i); | 412 | cpuidle_free_state_kobj(device, i); |
405 | } | 413 | } |
406 | 414 | ||
415 | #ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS | ||
416 | #define kobj_to_driver_kobj(k) container_of(k, struct cpuidle_driver_kobj, kobj) | ||
417 | #define attr_to_driver_attr(a) container_of(a, struct cpuidle_driver_attr, attr) | ||
418 | |||
419 | #define define_one_driver_ro(_name, show) \ | ||
420 | static struct cpuidle_driver_attr attr_driver_##_name = \ | ||
421 | __ATTR(_name, 0644, show, NULL) | ||
422 | |||
423 | struct cpuidle_driver_kobj { | ||
424 | struct cpuidle_driver *drv; | ||
425 | struct completion kobj_unregister; | ||
426 | struct kobject kobj; | ||
427 | }; | ||
428 | |||
429 | struct cpuidle_driver_attr { | ||
430 | struct attribute attr; | ||
431 | ssize_t (*show)(struct cpuidle_driver *, char *); | ||
432 | ssize_t (*store)(struct cpuidle_driver *, const char *, size_t); | ||
433 | }; | ||
434 | |||
435 | static ssize_t show_driver_name(struct cpuidle_driver *drv, char *buf) | ||
436 | { | ||
437 | ssize_t ret; | ||
438 | |||
439 | spin_lock(&cpuidle_driver_lock); | ||
440 | ret = sprintf(buf, "%s\n", drv ? drv->name : "none"); | ||
441 | spin_unlock(&cpuidle_driver_lock); | ||
442 | |||
443 | return ret; | ||
444 | } | ||
445 | |||
446 | static void cpuidle_driver_sysfs_release(struct kobject *kobj) | ||
447 | { | ||
448 | struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj); | ||
449 | complete(&driver_kobj->kobj_unregister); | ||
450 | } | ||
451 | |||
452 | static ssize_t cpuidle_driver_show(struct kobject *kobj, struct attribute * attr, | ||
453 | char * buf) | ||
454 | { | ||
455 | int ret = -EIO; | ||
456 | struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj); | ||
457 | struct cpuidle_driver_attr *dattr = attr_to_driver_attr(attr); | ||
458 | |||
459 | if (dattr->show) | ||
460 | ret = dattr->show(driver_kobj->drv, buf); | ||
461 | |||
462 | return ret; | ||
463 | } | ||
464 | |||
465 | static ssize_t cpuidle_driver_store(struct kobject *kobj, struct attribute *attr, | ||
466 | const char *buf, size_t size) | ||
467 | { | ||
468 | int ret = -EIO; | ||
469 | struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj); | ||
470 | struct cpuidle_driver_attr *dattr = attr_to_driver_attr(attr); | ||
471 | |||
472 | if (dattr->store) | ||
473 | ret = dattr->store(driver_kobj->drv, buf, size); | ||
474 | |||
475 | return ret; | ||
476 | } | ||
477 | |||
478 | define_one_driver_ro(name, show_driver_name); | ||
479 | |||
480 | static const struct sysfs_ops cpuidle_driver_sysfs_ops = { | ||
481 | .show = cpuidle_driver_show, | ||
482 | .store = cpuidle_driver_store, | ||
483 | }; | ||
484 | |||
485 | static struct attribute *cpuidle_driver_default_attrs[] = { | ||
486 | &attr_driver_name.attr, | ||
487 | NULL | ||
488 | }; | ||
489 | |||
490 | static struct kobj_type ktype_driver_cpuidle = { | ||
491 | .sysfs_ops = &cpuidle_driver_sysfs_ops, | ||
492 | .default_attrs = cpuidle_driver_default_attrs, | ||
493 | .release = cpuidle_driver_sysfs_release, | ||
494 | }; | ||
495 | |||
496 | /** | ||
497 | * cpuidle_add_driver_sysfs - adds the driver name sysfs attribute | ||
498 | * @device: the target device | ||
499 | */ | ||
500 | static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) | ||
501 | { | ||
502 | struct cpuidle_driver_kobj *kdrv; | ||
503 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | ||
504 | int ret; | ||
505 | |||
506 | kdrv = kzalloc(sizeof(*kdrv), GFP_KERNEL); | ||
507 | if (!kdrv) | ||
508 | return -ENOMEM; | ||
509 | |||
510 | kdrv->drv = drv; | ||
511 | init_completion(&kdrv->kobj_unregister); | ||
512 | |||
513 | ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle, | ||
514 | &dev->kobj, "driver"); | ||
515 | if (ret) { | ||
516 | kfree(kdrv); | ||
517 | return ret; | ||
518 | } | ||
519 | |||
520 | kobject_uevent(&kdrv->kobj, KOBJ_ADD); | ||
521 | dev->kobj_driver = kdrv; | ||
522 | |||
523 | return ret; | ||
524 | } | ||
525 | |||
526 | /** | ||
527 | * cpuidle_remove_driver_sysfs - removes the driver name sysfs attribute | ||
528 | * @device: the target device | ||
529 | */ | ||
530 | static void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev) | ||
531 | { | ||
532 | struct cpuidle_driver_kobj *kdrv = dev->kobj_driver; | ||
533 | kobject_put(&kdrv->kobj); | ||
534 | wait_for_completion(&kdrv->kobj_unregister); | ||
535 | kfree(kdrv); | ||
536 | } | ||
537 | #else | ||
538 | static inline int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) | ||
539 | { | ||
540 | return 0; | ||
541 | } | ||
542 | |||
543 | static inline void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev) | ||
544 | { | ||
545 | ; | ||
546 | } | ||
547 | #endif | ||
548 | |||
549 | /** | ||
550 | * cpuidle_add_device_sysfs - adds device specific sysfs attributes | ||
551 | * @device: the target device | ||
552 | */ | ||
553 | int cpuidle_add_device_sysfs(struct cpuidle_device *device) | ||
554 | { | ||
555 | int ret; | ||
556 | |||
557 | ret = cpuidle_add_state_sysfs(device); | ||
558 | if (ret) | ||
559 | return ret; | ||
560 | |||
561 | ret = cpuidle_add_driver_sysfs(device); | ||
562 | if (ret) | ||
563 | cpuidle_remove_state_sysfs(device); | ||
564 | return ret; | ||
565 | } | ||
566 | |||
567 | /** | ||
568 | * cpuidle_remove_device_sysfs : removes device specific sysfs attributes | ||
569 | * @device : the target device | ||
570 | */ | ||
571 | void cpuidle_remove_device_sysfs(struct cpuidle_device *device) | ||
572 | { | ||
573 | cpuidle_remove_driver_sysfs(device); | ||
574 | cpuidle_remove_state_sysfs(device); | ||
575 | } | ||
576 | |||
407 | /** | 577 | /** |
408 | * cpuidle_add_sysfs - creates a sysfs instance for the target device | 578 | * cpuidle_add_sysfs - creates a sysfs instance for the target device |
409 | * @dev: the target device | 579 | * @dev: the target device |
410 | */ | 580 | */ |
411 | int cpuidle_add_sysfs(struct device *cpu_dev) | 581 | int cpuidle_add_sysfs(struct cpuidle_device *dev) |
412 | { | 582 | { |
413 | int cpu = cpu_dev->id; | 583 | struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); |
414 | struct cpuidle_device *dev; | ||
415 | int error; | 584 | int error; |
416 | 585 | ||
417 | dev = per_cpu(cpuidle_devices, cpu); | 586 | init_completion(&dev->kobj_unregister); |
587 | |||
418 | error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &cpu_dev->kobj, | 588 | error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &cpu_dev->kobj, |
419 | "cpuidle"); | 589 | "cpuidle"); |
420 | if (!error) | 590 | if (!error) |
@@ -426,11 +596,8 @@ int cpuidle_add_sysfs(struct device *cpu_dev) | |||
426 | * cpuidle_remove_sysfs - deletes a sysfs instance on the target device | 596 | * cpuidle_remove_sysfs - deletes a sysfs instance on the target device |
427 | * @dev: the target device | 597 | * @dev: the target device |
428 | */ | 598 | */ |
429 | void cpuidle_remove_sysfs(struct device *cpu_dev) | 599 | void cpuidle_remove_sysfs(struct cpuidle_device *dev) |
430 | { | 600 | { |
431 | int cpu = cpu_dev->id; | ||
432 | struct cpuidle_device *dev; | ||
433 | |||
434 | dev = per_cpu(cpuidle_devices, cpu); | ||
435 | kobject_put(&dev->kobj); | 601 | kobject_put(&dev->kobj); |
602 | wait_for_completion(&dev->kobj_unregister); | ||
436 | } | 603 | } |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index b0f6b4c8ee14..c49c04d9c2b0 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
@@ -56,7 +56,6 @@ | |||
56 | #include <linux/kernel.h> | 56 | #include <linux/kernel.h> |
57 | #include <linux/cpuidle.h> | 57 | #include <linux/cpuidle.h> |
58 | #include <linux/clockchips.h> | 58 | #include <linux/clockchips.h> |
59 | #include <linux/hrtimer.h> /* ktime_get_real() */ | ||
60 | #include <trace/events/power.h> | 59 | #include <trace/events/power.h> |
61 | #include <linux/sched.h> | 60 | #include <linux/sched.h> |
62 | #include <linux/notifier.h> | 61 | #include <linux/notifier.h> |
@@ -72,6 +71,7 @@ | |||
72 | static struct cpuidle_driver intel_idle_driver = { | 71 | static struct cpuidle_driver intel_idle_driver = { |
73 | .name = "intel_idle", | 72 | .name = "intel_idle", |
74 | .owner = THIS_MODULE, | 73 | .owner = THIS_MODULE, |
74 | .en_core_tk_irqen = 1, | ||
75 | }; | 75 | }; |
76 | /* intel_idle.max_cstate=0 disables driver */ | 76 | /* intel_idle.max_cstate=0 disables driver */ |
77 | static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; | 77 | static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; |
@@ -281,8 +281,6 @@ static int intel_idle(struct cpuidle_device *dev, | |||
281 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | 281 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
282 | unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage); | 282 | unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage); |
283 | unsigned int cstate; | 283 | unsigned int cstate; |
284 | ktime_t kt_before, kt_after; | ||
285 | s64 usec_delta; | ||
286 | int cpu = smp_processor_id(); | 284 | int cpu = smp_processor_id(); |
287 | 285 | ||
288 | cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; | 286 | cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; |
@@ -297,8 +295,6 @@ static int intel_idle(struct cpuidle_device *dev, | |||
297 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) | 295 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) |
298 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); | 296 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); |
299 | 297 | ||
300 | kt_before = ktime_get_real(); | ||
301 | |||
302 | stop_critical_timings(); | 298 | stop_critical_timings(); |
303 | if (!need_resched()) { | 299 | if (!need_resched()) { |
304 | 300 | ||
@@ -310,17 +306,9 @@ static int intel_idle(struct cpuidle_device *dev, | |||
310 | 306 | ||
311 | start_critical_timings(); | 307 | start_critical_timings(); |
312 | 308 | ||
313 | kt_after = ktime_get_real(); | ||
314 | usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before)); | ||
315 | |||
316 | local_irq_enable(); | ||
317 | |||
318 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) | 309 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) |
319 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); | 310 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); |
320 | 311 | ||
321 | /* Update cpuidle counters */ | ||
322 | dev->last_residency = (int)usec_delta; | ||
323 | |||
324 | return index; | 312 | return index; |
325 | } | 313 | } |
326 | 314 | ||