aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/processor_idle.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/processor_idle.c')
-rw-r--r--drivers/acpi/processor_idle.c266
1 files changed, 200 insertions, 66 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 431ab11c8c1b..0e8e2de2ed3e 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -37,7 +37,7 @@
37#include <linux/dmi.h> 37#include <linux/dmi.h>
38#include <linux/moduleparam.h> 38#include <linux/moduleparam.h>
39#include <linux/sched.h> /* need_resched() */ 39#include <linux/sched.h> /* need_resched() */
40#include <linux/pm_qos_params.h> 40#include <linux/pm_qos.h>
41#include <linux/clockchips.h> 41#include <linux/clockchips.h>
42#include <linux/cpuidle.h> 42#include <linux/cpuidle.h>
43#include <linux/irqflags.h> 43#include <linux/irqflags.h>
@@ -224,7 +224,6 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
224/* 224/*
225 * Suspend / resume control 225 * Suspend / resume control
226 */ 226 */
227static int acpi_idle_suspend;
228static u32 saved_bm_rld; 227static u32 saved_bm_rld;
229 228
230static void acpi_idle_bm_rld_save(void) 229static void acpi_idle_bm_rld_save(void)
@@ -243,21 +242,13 @@ static void acpi_idle_bm_rld_restore(void)
243 242
244int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) 243int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
245{ 244{
246 if (acpi_idle_suspend == 1)
247 return 0;
248
249 acpi_idle_bm_rld_save(); 245 acpi_idle_bm_rld_save();
250 acpi_idle_suspend = 1;
251 return 0; 246 return 0;
252} 247}
253 248
254int acpi_processor_resume(struct acpi_device * device) 249int acpi_processor_resume(struct acpi_device * device)
255{ 250{
256 if (acpi_idle_suspend == 0)
257 return 0;
258
259 acpi_idle_bm_rld_restore(); 251 acpi_idle_bm_rld_restore();
260 acpi_idle_suspend = 0;
261 return 0; 252 return 0;
262} 253}
263 254
@@ -741,66 +732,65 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
741/** 732/**
742 * acpi_idle_enter_c1 - enters an ACPI C1 state-type 733 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
743 * @dev: the target CPU 734 * @dev: the target CPU
744 * @state: the state data 735 * @drv: cpuidle driver containing cpuidle state info
736 * @index: index of target state
745 * 737 *
746 * This is equivalent to the HALT instruction. 738 * This is equivalent to the HALT instruction.
747 */ 739 */
748static int acpi_idle_enter_c1(struct cpuidle_device *dev, 740static int acpi_idle_enter_c1(struct cpuidle_device *dev,
749 struct cpuidle_state *state) 741 struct cpuidle_driver *drv, int index)
750{ 742{
751 ktime_t kt1, kt2; 743 ktime_t kt1, kt2;
752 s64 idle_time; 744 s64 idle_time;
753 struct acpi_processor *pr; 745 struct acpi_processor *pr;
754 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 746 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
747 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
755 748
756 pr = __this_cpu_read(processors); 749 pr = __this_cpu_read(processors);
750 dev->last_residency = 0;
757 751
758 if (unlikely(!pr)) 752 if (unlikely(!pr))
759 return 0; 753 return -EINVAL;
760 754
761 local_irq_disable(); 755 local_irq_disable();
762 756
763 /* Do not access any ACPI IO ports in suspend path */
764 if (acpi_idle_suspend) {
765 local_irq_enable();
766 cpu_relax();
767 return 0;
768 }
769
770 lapic_timer_state_broadcast(pr, cx, 1); 757 lapic_timer_state_broadcast(pr, cx, 1);
771 kt1 = ktime_get_real(); 758 kt1 = ktime_get_real();
772 acpi_idle_do_entry(cx); 759 acpi_idle_do_entry(cx);
773 kt2 = ktime_get_real(); 760 kt2 = ktime_get_real();
774 idle_time = ktime_to_us(ktime_sub(kt2, kt1)); 761 idle_time = ktime_to_us(ktime_sub(kt2, kt1));
775 762
763 /* Update device last_residency*/
764 dev->last_residency = (int)idle_time;
765
776 local_irq_enable(); 766 local_irq_enable();
777 cx->usage++; 767 cx->usage++;
778 lapic_timer_state_broadcast(pr, cx, 0); 768 lapic_timer_state_broadcast(pr, cx, 0);
779 769
780 return idle_time; 770 return index;
781} 771}
782 772
783/** 773/**
784 * acpi_idle_enter_simple - enters an ACPI state without BM handling 774 * acpi_idle_enter_simple - enters an ACPI state without BM handling
785 * @dev: the target CPU 775 * @dev: the target CPU
786 * @state: the state data 776 * @drv: cpuidle driver with cpuidle state information
777 * @index: the index of suggested state
787 */ 778 */
788static int acpi_idle_enter_simple(struct cpuidle_device *dev, 779static int acpi_idle_enter_simple(struct cpuidle_device *dev,
789 struct cpuidle_state *state) 780 struct cpuidle_driver *drv, int index)
790{ 781{
791 struct acpi_processor *pr; 782 struct acpi_processor *pr;
792 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 783 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
784 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
793 ktime_t kt1, kt2; 785 ktime_t kt1, kt2;
794 s64 idle_time_ns; 786 s64 idle_time_ns;
795 s64 idle_time; 787 s64 idle_time;
796 788
797 pr = __this_cpu_read(processors); 789 pr = __this_cpu_read(processors);
790 dev->last_residency = 0;
798 791
799 if (unlikely(!pr)) 792 if (unlikely(!pr))
800 return 0; 793 return -EINVAL;
801
802 if (acpi_idle_suspend)
803 return(acpi_idle_enter_c1(dev, state));
804 794
805 local_irq_disable(); 795 local_irq_disable();
806 796
@@ -815,7 +805,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
815 if (unlikely(need_resched())) { 805 if (unlikely(need_resched())) {
816 current_thread_info()->status |= TS_POLLING; 806 current_thread_info()->status |= TS_POLLING;
817 local_irq_enable(); 807 local_irq_enable();
818 return 0; 808 return -EINVAL;
819 } 809 }
820 } 810 }
821 811
@@ -837,6 +827,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
837 idle_time = idle_time_ns; 827 idle_time = idle_time_ns;
838 do_div(idle_time, NSEC_PER_USEC); 828 do_div(idle_time, NSEC_PER_USEC);
839 829
830 /* Update device last_residency*/
831 dev->last_residency = (int)idle_time;
832
840 /* Tell the scheduler how much we idled: */ 833 /* Tell the scheduler how much we idled: */
841 sched_clock_idle_wakeup_event(idle_time_ns); 834 sched_clock_idle_wakeup_event(idle_time_ns);
842 835
@@ -848,46 +841,46 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
848 841
849 lapic_timer_state_broadcast(pr, cx, 0); 842 lapic_timer_state_broadcast(pr, cx, 0);
850 cx->time += idle_time; 843 cx->time += idle_time;
851 return idle_time; 844 return index;
852} 845}
853 846
854static int c3_cpu_count; 847static int c3_cpu_count;
855static DEFINE_SPINLOCK(c3_lock); 848static DEFINE_RAW_SPINLOCK(c3_lock);
856 849
857/** 850/**
858 * acpi_idle_enter_bm - enters C3 with proper BM handling 851 * acpi_idle_enter_bm - enters C3 with proper BM handling
859 * @dev: the target CPU 852 * @dev: the target CPU
860 * @state: the state data 853 * @drv: cpuidle driver containing state data
854 * @index: the index of suggested state
861 * 855 *
862 * If BM is detected, the deepest non-C3 idle state is entered instead. 856 * If BM is detected, the deepest non-C3 idle state is entered instead.
863 */ 857 */
864static int acpi_idle_enter_bm(struct cpuidle_device *dev, 858static int acpi_idle_enter_bm(struct cpuidle_device *dev,
865 struct cpuidle_state *state) 859 struct cpuidle_driver *drv, int index)
866{ 860{
867 struct acpi_processor *pr; 861 struct acpi_processor *pr;
868 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 862 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
863 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
869 ktime_t kt1, kt2; 864 ktime_t kt1, kt2;
870 s64 idle_time_ns; 865 s64 idle_time_ns;
871 s64 idle_time; 866 s64 idle_time;
872 867
873 868
874 pr = __this_cpu_read(processors); 869 pr = __this_cpu_read(processors);
870 dev->last_residency = 0;
875 871
876 if (unlikely(!pr)) 872 if (unlikely(!pr))
877 return 0; 873 return -EINVAL;
878
879 if (acpi_idle_suspend)
880 return(acpi_idle_enter_c1(dev, state));
881 874
882 if (!cx->bm_sts_skip && acpi_idle_bm_check()) { 875 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
883 if (dev->safe_state) { 876 if (drv->safe_state_index >= 0) {
884 dev->last_state = dev->safe_state; 877 return drv->states[drv->safe_state_index].enter(dev,
885 return dev->safe_state->enter(dev, dev->safe_state); 878 drv, drv->safe_state_index);
886 } else { 879 } else {
887 local_irq_disable(); 880 local_irq_disable();
888 acpi_safe_halt(); 881 acpi_safe_halt();
889 local_irq_enable(); 882 local_irq_enable();
890 return 0; 883 return -EINVAL;
891 } 884 }
892 } 885 }
893 886
@@ -904,7 +897,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
904 if (unlikely(need_resched())) { 897 if (unlikely(need_resched())) {
905 current_thread_info()->status |= TS_POLLING; 898 current_thread_info()->status |= TS_POLLING;
906 local_irq_enable(); 899 local_irq_enable();
907 return 0; 900 return -EINVAL;
908 } 901 }
909 } 902 }
910 903
@@ -930,12 +923,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
930 * without doing anything. 923 * without doing anything.
931 */ 924 */
932 if (pr->flags.bm_check && pr->flags.bm_control) { 925 if (pr->flags.bm_check && pr->flags.bm_control) {
933 spin_lock(&c3_lock); 926 raw_spin_lock(&c3_lock);
934 c3_cpu_count++; 927 c3_cpu_count++;
935 /* Disable bus master arbitration when all CPUs are in C3 */ 928 /* Disable bus master arbitration when all CPUs are in C3 */
936 if (c3_cpu_count == num_online_cpus()) 929 if (c3_cpu_count == num_online_cpus())
937 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); 930 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
938 spin_unlock(&c3_lock); 931 raw_spin_unlock(&c3_lock);
939 } else if (!pr->flags.bm_check) { 932 } else if (!pr->flags.bm_check) {
940 ACPI_FLUSH_CPU_CACHE(); 933 ACPI_FLUSH_CPU_CACHE();
941 } 934 }
@@ -944,16 +937,19 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
944 937
945 /* Re-enable bus master arbitration */ 938 /* Re-enable bus master arbitration */
946 if (pr->flags.bm_check && pr->flags.bm_control) { 939 if (pr->flags.bm_check && pr->flags.bm_control) {
947 spin_lock(&c3_lock); 940 raw_spin_lock(&c3_lock);
948 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); 941 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
949 c3_cpu_count--; 942 c3_cpu_count--;
950 spin_unlock(&c3_lock); 943 raw_spin_unlock(&c3_lock);
951 } 944 }
952 kt2 = ktime_get_real(); 945 kt2 = ktime_get_real();
953 idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); 946 idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
954 idle_time = idle_time_ns; 947 idle_time = idle_time_ns;
955 do_div(idle_time, NSEC_PER_USEC); 948 do_div(idle_time, NSEC_PER_USEC);
956 949
950 /* Update device last_residency*/
951 dev->last_residency = (int)idle_time;
952
957 /* Tell the scheduler how much we idled: */ 953 /* Tell the scheduler how much we idled: */
958 sched_clock_idle_wakeup_event(idle_time_ns); 954 sched_clock_idle_wakeup_event(idle_time_ns);
959 955
@@ -965,7 +961,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
965 961
966 lapic_timer_state_broadcast(pr, cx, 0); 962 lapic_timer_state_broadcast(pr, cx, 0);
967 cx->time += idle_time; 963 cx->time += idle_time;
968 return idle_time; 964 return index;
969} 965}
970 966
971struct cpuidle_driver acpi_idle_driver = { 967struct cpuidle_driver acpi_idle_driver = {
@@ -974,14 +970,16 @@ struct cpuidle_driver acpi_idle_driver = {
974}; 970};
975 971
976/** 972/**
977 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE 973 * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
974 * device i.e. per-cpu data
975 *
978 * @pr: the ACPI processor 976 * @pr: the ACPI processor
979 */ 977 */
980static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) 978static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
981{ 979{
982 int i, count = CPUIDLE_DRIVER_STATE_START; 980 int i, count = CPUIDLE_DRIVER_STATE_START;
983 struct acpi_processor_cx *cx; 981 struct acpi_processor_cx *cx;
984 struct cpuidle_state *state; 982 struct cpuidle_state_usage *state_usage;
985 struct cpuidle_device *dev = &pr->power.dev; 983 struct cpuidle_device *dev = &pr->power.dev;
986 984
987 if (!pr->flags.power_setup_done) 985 if (!pr->flags.power_setup_done)
@@ -992,9 +990,62 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
992 } 990 }
993 991
994 dev->cpu = pr->id; 992 dev->cpu = pr->id;
993
994 if (max_cstate == 0)
995 max_cstate = 1;
996
997 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
998 cx = &pr->power.states[i];
999 state_usage = &dev->states_usage[count];
1000
1001 if (!cx->valid)
1002 continue;
1003
1004#ifdef CONFIG_HOTPLUG_CPU
1005 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
1006 !pr->flags.has_cst &&
1007 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1008 continue;
1009#endif
1010
1011 cpuidle_set_statedata(state_usage, cx);
1012
1013 count++;
1014 if (count == CPUIDLE_STATE_MAX)
1015 break;
1016 }
1017
1018 dev->state_count = count;
1019
1020 if (!count)
1021 return -EINVAL;
1022
1023 return 0;
1024}
1025
1026/**
1027 * acpi_processor_setup_cpuidle states- prepares and configures cpuidle
1028 * global state data i.e. idle routines
1029 *
1030 * @pr: the ACPI processor
1031 */
1032static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1033{
1034 int i, count = CPUIDLE_DRIVER_STATE_START;
1035 struct acpi_processor_cx *cx;
1036 struct cpuidle_state *state;
1037 struct cpuidle_driver *drv = &acpi_idle_driver;
1038
1039 if (!pr->flags.power_setup_done)
1040 return -EINVAL;
1041
1042 if (pr->flags.power == 0)
1043 return -EINVAL;
1044
1045 drv->safe_state_index = -1;
995 for (i = 0; i < CPUIDLE_STATE_MAX; i++) { 1046 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
996 dev->states[i].name[0] = '\0'; 1047 drv->states[i].name[0] = '\0';
997 dev->states[i].desc[0] = '\0'; 1048 drv->states[i].desc[0] = '\0';
998 } 1049 }
999 1050
1000 if (max_cstate == 0) 1051 if (max_cstate == 0)
@@ -1002,7 +1053,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1002 1053
1003 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 1054 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1004 cx = &pr->power.states[i]; 1055 cx = &pr->power.states[i];
1005 state = &dev->states[count];
1006 1056
1007 if (!cx->valid) 1057 if (!cx->valid)
1008 continue; 1058 continue;
@@ -1013,8 +1063,8 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1013 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 1063 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1014 continue; 1064 continue;
1015#endif 1065#endif
1016 cpuidle_set_statedata(state, cx);
1017 1066
1067 state = &drv->states[count];
1018 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); 1068 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1019 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 1069 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
1020 state->exit_latency = cx->latency; 1070 state->exit_latency = cx->latency;
@@ -1027,13 +1077,13 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1027 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1077 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1028 1078
1029 state->enter = acpi_idle_enter_c1; 1079 state->enter = acpi_idle_enter_c1;
1030 dev->safe_state = state; 1080 drv->safe_state_index = count;
1031 break; 1081 break;
1032 1082
1033 case ACPI_STATE_C2: 1083 case ACPI_STATE_C2:
1034 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1084 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1035 state->enter = acpi_idle_enter_simple; 1085 state->enter = acpi_idle_enter_simple;
1036 dev->safe_state = state; 1086 drv->safe_state_index = count;
1037 break; 1087 break;
1038 1088
1039 case ACPI_STATE_C3: 1089 case ACPI_STATE_C3:
@@ -1049,7 +1099,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1049 break; 1099 break;
1050 } 1100 }
1051 1101
1052 dev->state_count = count; 1102 drv->state_count = count;
1053 1103
1054 if (!count) 1104 if (!count)
1055 return -EINVAL; 1105 return -EINVAL;
@@ -1057,7 +1107,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1057 return 0; 1107 return 0;
1058} 1108}
1059 1109
1060int acpi_processor_cst_has_changed(struct acpi_processor *pr) 1110int acpi_processor_hotplug(struct acpi_processor *pr)
1061{ 1111{
1062 int ret = 0; 1112 int ret = 0;
1063 1113
@@ -1078,7 +1128,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1078 cpuidle_disable_device(&pr->power.dev); 1128 cpuidle_disable_device(&pr->power.dev);
1079 acpi_processor_get_power_info(pr); 1129 acpi_processor_get_power_info(pr);
1080 if (pr->flags.power) { 1130 if (pr->flags.power) {
1081 acpi_processor_setup_cpuidle(pr); 1131 acpi_processor_setup_cpuidle_cx(pr);
1082 ret = cpuidle_enable_device(&pr->power.dev); 1132 ret = cpuidle_enable_device(&pr->power.dev);
1083 } 1133 }
1084 cpuidle_resume_and_unlock(); 1134 cpuidle_resume_and_unlock();
@@ -1086,10 +1136,72 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1086 return ret; 1136 return ret;
1087} 1137}
1088 1138
1139int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1140{
1141 int cpu;
1142 struct acpi_processor *_pr;
1143
1144 if (disabled_by_idle_boot_param())
1145 return 0;
1146
1147 if (!pr)
1148 return -EINVAL;
1149
1150 if (nocst)
1151 return -ENODEV;
1152
1153 if (!pr->flags.power_setup_done)
1154 return -ENODEV;
1155
1156 /*
1157 * FIXME: Design the ACPI notification to make it once per
1158 * system instead of once per-cpu. This condition is a hack
1159 * to make the code that updates C-States be called once.
1160 */
1161
1162 if (smp_processor_id() == 0 &&
1163 cpuidle_get_driver() == &acpi_idle_driver) {
1164
1165 cpuidle_pause_and_lock();
1166 /* Protect against cpu-hotplug */
1167 get_online_cpus();
1168
1169 /* Disable all cpuidle devices */
1170 for_each_online_cpu(cpu) {
1171 _pr = per_cpu(processors, cpu);
1172 if (!_pr || !_pr->flags.power_setup_done)
1173 continue;
1174 cpuidle_disable_device(&_pr->power.dev);
1175 }
1176
1177 /* Populate Updated C-state information */
1178 acpi_processor_setup_cpuidle_states(pr);
1179
1180 /* Enable all cpuidle devices */
1181 for_each_online_cpu(cpu) {
1182 _pr = per_cpu(processors, cpu);
1183 if (!_pr || !_pr->flags.power_setup_done)
1184 continue;
1185 acpi_processor_get_power_info(_pr);
1186 if (_pr->flags.power) {
1187 acpi_processor_setup_cpuidle_cx(_pr);
1188 cpuidle_enable_device(&_pr->power.dev);
1189 }
1190 }
1191 put_online_cpus();
1192 cpuidle_resume_and_unlock();
1193 }
1194
1195 return 0;
1196}
1197
1198static int acpi_processor_registered;
1199
1089int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, 1200int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1090 struct acpi_device *device) 1201 struct acpi_device *device)
1091{ 1202{
1092 acpi_status status = 0; 1203 acpi_status status = 0;
1204 int retval;
1093 static int first_run; 1205 static int first_run;
1094 1206
1095 if (disabled_by_idle_boot_param()) 1207 if (disabled_by_idle_boot_param())
@@ -1126,9 +1238,26 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1126 * platforms that only support C1. 1238 * platforms that only support C1.
1127 */ 1239 */
1128 if (pr->flags.power) { 1240 if (pr->flags.power) {
1129 acpi_processor_setup_cpuidle(pr); 1241 /* Register acpi_idle_driver if not already registered */
1130 if (cpuidle_register_device(&pr->power.dev)) 1242 if (!acpi_processor_registered) {
1131 return -EIO; 1243 acpi_processor_setup_cpuidle_states(pr);
1244 retval = cpuidle_register_driver(&acpi_idle_driver);
1245 if (retval)
1246 return retval;
1247 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
1248 acpi_idle_driver.name);
1249 }
1250 /* Register per-cpu cpuidle_device. Cpuidle driver
1251 * must already be registered before registering device
1252 */
1253 acpi_processor_setup_cpuidle_cx(pr);
1254 retval = cpuidle_register_device(&pr->power.dev);
1255 if (retval) {
1256 if (acpi_processor_registered == 0)
1257 cpuidle_unregister_driver(&acpi_idle_driver);
1258 return retval;
1259 }
1260 acpi_processor_registered++;
1132 } 1261 }
1133 return 0; 1262 return 0;
1134} 1263}
@@ -1139,8 +1268,13 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
1139 if (disabled_by_idle_boot_param()) 1268 if (disabled_by_idle_boot_param())
1140 return 0; 1269 return 0;
1141 1270
1142 cpuidle_unregister_device(&pr->power.dev); 1271 if (pr->flags.power) {
1143 pr->flags.power_setup_done = 0; 1272 cpuidle_unregister_device(&pr->power.dev);
1273 acpi_processor_registered--;
1274 if (acpi_processor_registered == 0)
1275 cpuidle_unregister_driver(&acpi_idle_driver);
1276 }
1144 1277
1278 pr->flags.power_setup_done = 0;
1145 return 0; 1279 return 0;
1146} 1280}