aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/processor_idle.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/processor_idle.c')
-rw-r--r--drivers/acpi/processor_idle.c127
1 files changed, 35 insertions, 92 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 7bc22a471fe3..6fe121434ffb 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -64,7 +64,6 @@
64#define _COMPONENT ACPI_PROCESSOR_COMPONENT 64#define _COMPONENT ACPI_PROCESSOR_COMPONENT
65ACPI_MODULE_NAME("processor_idle"); 65ACPI_MODULE_NAME("processor_idle");
66#define ACPI_PROCESSOR_FILE_POWER "power" 66#define ACPI_PROCESSOR_FILE_POWER "power"
67#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
68#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) 67#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
69#define C2_OVERHEAD 1 /* 1us */ 68#define C2_OVERHEAD 1 /* 1us */
70#define C3_OVERHEAD 1 /* 1us */ 69#define C3_OVERHEAD 1 /* 1us */
@@ -78,6 +77,10 @@ module_param(nocst, uint, 0000);
78static unsigned int latency_factor __read_mostly = 2; 77static unsigned int latency_factor __read_mostly = 2;
79module_param(latency_factor, uint, 0644); 78module_param(latency_factor, uint, 0644);
80 79
80static s64 us_to_pm_timer_ticks(s64 t)
81{
82 return div64_u64(t * PM_TIMER_FREQUENCY, 1000000);
83}
81/* 84/*
82 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 85 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
83 * For now disable this. Probably a bug somewhere else. 86 * For now disable this. Probably a bug somewhere else.
@@ -101,57 +104,6 @@ static int set_max_cstate(const struct dmi_system_id *id)
101/* Actually this shouldn't be __cpuinitdata, would be better to fix the 104/* Actually this shouldn't be __cpuinitdata, would be better to fix the
102 callers to only run once -AK */ 105 callers to only run once -AK */
103static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { 106static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
104 { set_max_cstate, "IBM ThinkPad R40e", {
105 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
106 DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
107 { set_max_cstate, "IBM ThinkPad R40e", {
108 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
109 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
110 { set_max_cstate, "IBM ThinkPad R40e", {
111 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
112 DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
113 { set_max_cstate, "IBM ThinkPad R40e", {
114 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
115 DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
116 { set_max_cstate, "IBM ThinkPad R40e", {
117 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
118 DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
119 { set_max_cstate, "IBM ThinkPad R40e", {
120 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
121 DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
122 { set_max_cstate, "IBM ThinkPad R40e", {
123 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
124 DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
125 { set_max_cstate, "IBM ThinkPad R40e", {
126 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
127 DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
128 { set_max_cstate, "IBM ThinkPad R40e", {
129 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
130 DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
131 { set_max_cstate, "IBM ThinkPad R40e", {
132 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
133 DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
134 { set_max_cstate, "IBM ThinkPad R40e", {
135 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
136 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
137 { set_max_cstate, "IBM ThinkPad R40e", {
138 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
139 DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
140 { set_max_cstate, "IBM ThinkPad R40e", {
141 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
142 DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
143 { set_max_cstate, "IBM ThinkPad R40e", {
144 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
145 DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
146 { set_max_cstate, "IBM ThinkPad R40e", {
147 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
148 DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
149 { set_max_cstate, "IBM ThinkPad R40e", {
150 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
151 DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
152 { set_max_cstate, "Medion 41700", {
153 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
154 DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
155 { set_max_cstate, "Clevo 5600D", { 107 { set_max_cstate, "Clevo 5600D", {
156 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 108 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
157 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 109 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
@@ -159,25 +111,6 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
159 {}, 111 {},
160}; 112};
161 113
162static inline u32 ticks_elapsed(u32 t1, u32 t2)
163{
164 if (t2 >= t1)
165 return (t2 - t1);
166 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
167 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
168 else
169 return ((0xFFFFFFFF - t1) + t2);
170}
171
172static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
173{
174 if (t2 >= t1)
175 return PM_TIMER_TICKS_TO_US(t2 - t1);
176 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
177 return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
178 else
179 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
180}
181 114
182/* 115/*
183 * Callers should disable interrupts before the call and enable 116 * Callers should disable interrupts before the call and enable
@@ -212,6 +145,9 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr,
212 struct acpi_processor_power *pwr = &pr->power; 145 struct acpi_processor_power *pwr = &pr->power;
213 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; 146 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
214 147
148 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
149 return;
150
215 /* 151 /*
216 * Check, if one of the previous states already marked the lapic 152 * Check, if one of the previous states already marked the lapic
217 * unstable 153 * unstable
@@ -630,7 +566,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
630 * In either case, the proper way to 566 * In either case, the proper way to
631 * handle BM_RLD is to set it and leave it set. 567 * handle BM_RLD is to set it and leave it set.
632 */ 568 */
633 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); 569 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
634 570
635 return; 571 return;
636} 572}
@@ -800,9 +736,9 @@ static int acpi_idle_bm_check(void)
800{ 736{
801 u32 bm_status = 0; 737 u32 bm_status = 0;
802 738
803 acpi_get_register_unlocked(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 739 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
804 if (bm_status) 740 if (bm_status)
805 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 741 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
806 /* 742 /*
807 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 743 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
808 * the true state of bus mastering activity; forcing us to 744 * the true state of bus mastering activity; forcing us to
@@ -853,7 +789,8 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
853static int acpi_idle_enter_c1(struct cpuidle_device *dev, 789static int acpi_idle_enter_c1(struct cpuidle_device *dev,
854 struct cpuidle_state *state) 790 struct cpuidle_state *state)
855{ 791{
856 u32 t1, t2; 792 ktime_t kt1, kt2;
793 s64 idle_time;
857 struct acpi_processor *pr; 794 struct acpi_processor *pr;
858 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 795 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
859 796
@@ -871,14 +808,15 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
871 return 0; 808 return 0;
872 } 809 }
873 810
874 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 811 kt1 = ktime_get_real();
875 acpi_idle_do_entry(cx); 812 acpi_idle_do_entry(cx);
876 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 813 kt2 = ktime_get_real();
814 idle_time = ktime_to_us(ktime_sub(kt2, kt1));
877 815
878 local_irq_enable(); 816 local_irq_enable();
879 cx->usage++; 817 cx->usage++;
880 818
881 return ticks_elapsed_in_us(t1, t2); 819 return idle_time;
882} 820}
883 821
884/** 822/**
@@ -891,8 +829,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
891{ 829{
892 struct acpi_processor *pr; 830 struct acpi_processor *pr;
893 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 831 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
894 u32 t1, t2; 832 ktime_t kt1, kt2;
895 int sleep_ticks = 0; 833 s64 idle_time;
834 s64 sleep_ticks = 0;
896 835
897 pr = __get_cpu_var(processors); 836 pr = __get_cpu_var(processors);
898 837
@@ -925,18 +864,19 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
925 if (cx->type == ACPI_STATE_C3) 864 if (cx->type == ACPI_STATE_C3)
926 ACPI_FLUSH_CPU_CACHE(); 865 ACPI_FLUSH_CPU_CACHE();
927 866
928 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 867 kt1 = ktime_get_real();
929 /* Tell the scheduler that we are going deep-idle: */ 868 /* Tell the scheduler that we are going deep-idle: */
930 sched_clock_idle_sleep_event(); 869 sched_clock_idle_sleep_event();
931 acpi_idle_do_entry(cx); 870 acpi_idle_do_entry(cx);
932 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 871 kt2 = ktime_get_real();
872 idle_time = ktime_to_us(ktime_sub(kt2, kt1));
933 873
934#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) 874#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
935 /* TSC could halt in idle, so notify users */ 875 /* TSC could halt in idle, so notify users */
936 if (tsc_halts_in_c(cx->type)) 876 if (tsc_halts_in_c(cx->type))
937 mark_tsc_unstable("TSC halts in idle");; 877 mark_tsc_unstable("TSC halts in idle");;
938#endif 878#endif
939 sleep_ticks = ticks_elapsed(t1, t2); 879 sleep_ticks = us_to_pm_timer_ticks(idle_time);
940 880
941 /* Tell the scheduler how much we idled: */ 881 /* Tell the scheduler how much we idled: */
942 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 882 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
@@ -948,7 +888,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
948 888
949 acpi_state_timer_broadcast(pr, cx, 0); 889 acpi_state_timer_broadcast(pr, cx, 0);
950 cx->time += sleep_ticks; 890 cx->time += sleep_ticks;
951 return ticks_elapsed_in_us(t1, t2); 891 return idle_time;
952} 892}
953 893
954static int c3_cpu_count; 894static int c3_cpu_count;
@@ -966,8 +906,10 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
966{ 906{
967 struct acpi_processor *pr; 907 struct acpi_processor *pr;
968 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 908 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
969 u32 t1, t2; 909 ktime_t kt1, kt2;
970 int sleep_ticks = 0; 910 s64 idle_time;
911 s64 sleep_ticks = 0;
912
971 913
972 pr = __get_cpu_var(processors); 914 pr = __get_cpu_var(processors);
973 915
@@ -1028,20 +970,21 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1028 c3_cpu_count++; 970 c3_cpu_count++;
1029 /* Disable bus master arbitration when all CPUs are in C3 */ 971 /* Disable bus master arbitration when all CPUs are in C3 */
1030 if (c3_cpu_count == num_online_cpus()) 972 if (c3_cpu_count == num_online_cpus())
1031 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); 973 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
1032 spin_unlock(&c3_lock); 974 spin_unlock(&c3_lock);
1033 } else if (!pr->flags.bm_check) { 975 } else if (!pr->flags.bm_check) {
1034 ACPI_FLUSH_CPU_CACHE(); 976 ACPI_FLUSH_CPU_CACHE();
1035 } 977 }
1036 978
1037 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 979 kt1 = ktime_get_real();
1038 acpi_idle_do_entry(cx); 980 acpi_idle_do_entry(cx);
1039 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 981 kt2 = ktime_get_real();
982 idle_time = ktime_to_us(ktime_sub(kt2, kt1));
1040 983
1041 /* Re-enable bus master arbitration */ 984 /* Re-enable bus master arbitration */
1042 if (pr->flags.bm_check && pr->flags.bm_control) { 985 if (pr->flags.bm_check && pr->flags.bm_control) {
1043 spin_lock(&c3_lock); 986 spin_lock(&c3_lock);
1044 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); 987 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
1045 c3_cpu_count--; 988 c3_cpu_count--;
1046 spin_unlock(&c3_lock); 989 spin_unlock(&c3_lock);
1047 } 990 }
@@ -1051,7 +994,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1051 if (tsc_halts_in_c(ACPI_STATE_C3)) 994 if (tsc_halts_in_c(ACPI_STATE_C3))
1052 mark_tsc_unstable("TSC halts in idle"); 995 mark_tsc_unstable("TSC halts in idle");
1053#endif 996#endif
1054 sleep_ticks = ticks_elapsed(t1, t2); 997 sleep_ticks = us_to_pm_timer_ticks(idle_time);
1055 /* Tell the scheduler how much we idled: */ 998 /* Tell the scheduler how much we idled: */
1056 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 999 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1057 1000
@@ -1062,7 +1005,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1062 1005
1063 acpi_state_timer_broadcast(pr, cx, 0); 1006 acpi_state_timer_broadcast(pr, cx, 0);
1064 cx->time += sleep_ticks; 1007 cx->time += sleep_ticks;
1065 return ticks_elapsed_in_us(t1, t2); 1008 return idle_time;
1066} 1009}
1067 1010
1068struct cpuidle_driver acpi_idle_driver = { 1011struct cpuidle_driver acpi_idle_driver = {