aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig18
-rw-r--r--drivers/acpi/Makefile2
-rw-r--r--drivers/acpi/ac.c20
-rw-r--r--drivers/acpi/battery.c22
-rw-r--r--drivers/acpi/ec.c43
-rw-r--r--drivers/acpi/processor_idle.c111
-rw-r--r--drivers/acpi/sbs.c43
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c153
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c24
9 files changed, 263 insertions, 173 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 087a7028ae84..b9f923ef173d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -50,7 +50,6 @@ config ACPI_SLEEP
50config ACPI_PROCFS 50config ACPI_PROCFS
51 bool "Deprecated /proc/acpi files" 51 bool "Deprecated /proc/acpi files"
52 depends on PROC_FS 52 depends on PROC_FS
53 default y
54 ---help--- 53 ---help---
55 For backwards compatibility, this option allows 54 For backwards compatibility, this option allows
56 deprecated /proc/acpi/ files to exist, even when 55 deprecated /proc/acpi/ files to exist, even when
@@ -61,7 +60,6 @@ config ACPI_PROCFS
61 /proc/acpi/info (/sys/modules/acpi/parameters/acpica_version) 60 /proc/acpi/info (/sys/modules/acpi/parameters/acpica_version)
62 /proc/acpi/dsdt (/sys/firmware/acpi/tables/DSDT) 61 /proc/acpi/dsdt (/sys/firmware/acpi/tables/DSDT)
63 /proc/acpi/fadt (/sys/firmware/acpi/tables/FACP) 62 /proc/acpi/fadt (/sys/firmware/acpi/tables/FACP)
64 /proc/acpi/battery (/sys/class/power_supply)
65 /proc/acpi/debug_layer (/sys/module/acpi/parameters/debug_layer) 63 /proc/acpi/debug_layer (/sys/module/acpi/parameters/debug_layer)
66 /proc/acpi/debug_level (/sys/module/acpi/parameters/debug_level) 64 /proc/acpi/debug_level (/sys/module/acpi/parameters/debug_level)
67 65
@@ -69,7 +67,21 @@ config ACPI_PROCFS
69 and functions which do not yet exist in /sys. 67 and functions which do not yet exist in /sys.
70 68
71 Say N to delete /proc/acpi/ files that have moved to /sys/ 69 Say N to delete /proc/acpi/ files that have moved to /sys/
72 70config ACPI_PROCFS_POWER
71 bool "Deprecated power /proc/acpi folders"
72 depends on PROC_FS
73 default y
74 ---help---
75 For backwards compatibility, this option allows
76 deprecated power /proc/acpi/ folders to exist, even when
77 they have been replaced by functions in /sys.
78 The deprecated folders (and their replacements) include:
79 /proc/acpi/battery/* (/sys/class/power_supply/*)
80 /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
81 This option has no effect on /proc/acpi/ folders
82 and functions, which do not yet exist in /sys
83
84 Say N to delete power /proc/acpi/ folders that have moved to /sys/
73config ACPI_PROC_EVENT 85config ACPI_PROC_EVENT
74 bool "Deprecated /proc/acpi/event support" 86 bool "Deprecated /proc/acpi/event support"
75 depends on PROC_FS 87 depends on PROC_FS
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 54e3ab0e5fc0..456446f90077 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -58,6 +58,6 @@ obj-$(CONFIG_ACPI_NUMA) += numa.o
58obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o 58obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o
59obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o 59obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
60obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o 60obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o
61obj-y += cm_sbs.o 61obj-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
62obj-$(CONFIG_ACPI_SBS) += sbs.o 62obj-$(CONFIG_ACPI_SBS) += sbs.o
63obj-$(CONFIG_ACPI_SBS) += sbshc.o 63obj-$(CONFIG_ACPI_SBS) += sbshc.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 30238f6ff232..76ed4f52bebd 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -27,7 +27,7 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/types.h> 29#include <linux/types.h>
30#ifdef CONFIG_ACPI_PROCFS 30#ifdef CONFIG_ACPI_PROCFS_POWER
31#include <linux/proc_fs.h> 31#include <linux/proc_fs.h>
32#include <linux/seq_file.h> 32#include <linux/seq_file.h>
33#endif 33#endif
@@ -51,7 +51,7 @@ MODULE_AUTHOR("Paul Diefenbaugh");
51MODULE_DESCRIPTION("ACPI AC Adapter Driver"); 51MODULE_DESCRIPTION("ACPI AC Adapter Driver");
52MODULE_LICENSE("GPL"); 52MODULE_LICENSE("GPL");
53 53
54#ifdef CONFIG_ACPI_PROCFS 54#ifdef CONFIG_ACPI_PROCFS_POWER
55extern struct proc_dir_entry *acpi_lock_ac_dir(void); 55extern struct proc_dir_entry *acpi_lock_ac_dir(void);
56extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir); 56extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
57static int acpi_ac_open_fs(struct inode *inode, struct file *file); 57static int acpi_ac_open_fs(struct inode *inode, struct file *file);
@@ -86,7 +86,7 @@ struct acpi_ac {
86 86
87#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger); 87#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger);
88 88
89#ifdef CONFIG_ACPI_PROCFS 89#ifdef CONFIG_ACPI_PROCFS_POWER
90static const struct file_operations acpi_ac_fops = { 90static const struct file_operations acpi_ac_fops = {
91 .open = acpi_ac_open_fs, 91 .open = acpi_ac_open_fs,
92 .read = seq_read, 92 .read = seq_read,
@@ -136,7 +136,7 @@ static int acpi_ac_get_state(struct acpi_ac *ac)
136 return 0; 136 return 0;
137} 137}
138 138
139#ifdef CONFIG_ACPI_PROCFS 139#ifdef CONFIG_ACPI_PROCFS_POWER
140/* -------------------------------------------------------------------------- 140/* --------------------------------------------------------------------------
141 FS Interface (/proc) 141 FS Interface (/proc)
142 -------------------------------------------------------------------------- */ 142 -------------------------------------------------------------------------- */
@@ -275,7 +275,7 @@ static int acpi_ac_add(struct acpi_device *device)
275 if (result) 275 if (result)
276 goto end; 276 goto end;
277 277
278#ifdef CONFIG_ACPI_PROCFS 278#ifdef CONFIG_ACPI_PROCFS_POWER
279 result = acpi_ac_add_fs(device); 279 result = acpi_ac_add_fs(device);
280#endif 280#endif
281 if (result) 281 if (result)
@@ -300,7 +300,7 @@ static int acpi_ac_add(struct acpi_device *device)
300 300
301 end: 301 end:
302 if (result) { 302 if (result) {
303#ifdef CONFIG_ACPI_PROCFS 303#ifdef CONFIG_ACPI_PROCFS_POWER
304 acpi_ac_remove_fs(device); 304 acpi_ac_remove_fs(device);
305#endif 305#endif
306 kfree(ac); 306 kfree(ac);
@@ -339,7 +339,7 @@ static int acpi_ac_remove(struct acpi_device *device, int type)
339 ACPI_ALL_NOTIFY, acpi_ac_notify); 339 ACPI_ALL_NOTIFY, acpi_ac_notify);
340 if (ac->charger.dev) 340 if (ac->charger.dev)
341 power_supply_unregister(&ac->charger); 341 power_supply_unregister(&ac->charger);
342#ifdef CONFIG_ACPI_PROCFS 342#ifdef CONFIG_ACPI_PROCFS_POWER
343 acpi_ac_remove_fs(device); 343 acpi_ac_remove_fs(device);
344#endif 344#endif
345 345
@@ -355,7 +355,7 @@ static int __init acpi_ac_init(void)
355 if (acpi_disabled) 355 if (acpi_disabled)
356 return -ENODEV; 356 return -ENODEV;
357 357
358#ifdef CONFIG_ACPI_PROCFS 358#ifdef CONFIG_ACPI_PROCFS_POWER
359 acpi_ac_dir = acpi_lock_ac_dir(); 359 acpi_ac_dir = acpi_lock_ac_dir();
360 if (!acpi_ac_dir) 360 if (!acpi_ac_dir)
361 return -ENODEV; 361 return -ENODEV;
@@ -363,7 +363,7 @@ static int __init acpi_ac_init(void)
363 363
364 result = acpi_bus_register_driver(&acpi_ac_driver); 364 result = acpi_bus_register_driver(&acpi_ac_driver);
365 if (result < 0) { 365 if (result < 0) {
366#ifdef CONFIG_ACPI_PROCFS 366#ifdef CONFIG_ACPI_PROCFS_POWER
367 acpi_unlock_ac_dir(acpi_ac_dir); 367 acpi_unlock_ac_dir(acpi_ac_dir);
368#endif 368#endif
369 return -ENODEV; 369 return -ENODEV;
@@ -377,7 +377,7 @@ static void __exit acpi_ac_exit(void)
377 377
378 acpi_bus_unregister_driver(&acpi_ac_driver); 378 acpi_bus_unregister_driver(&acpi_ac_driver);
379 379
380#ifdef CONFIG_ACPI_PROCFS 380#ifdef CONFIG_ACPI_PROCFS_POWER
381 acpi_unlock_ac_dir(acpi_ac_dir); 381 acpi_unlock_ac_dir(acpi_ac_dir);
382#endif 382#endif
383 383
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 192c244f6190..7d6be23eff89 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -31,7 +31,7 @@
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/jiffies.h> 32#include <linux/jiffies.h>
33 33
34#ifdef CONFIG_ACPI_PROCFS 34#ifdef CONFIG_ACPI_PROCFS_POWER
35#include <linux/proc_fs.h> 35#include <linux/proc_fs.h>
36#include <linux/seq_file.h> 36#include <linux/seq_file.h>
37#include <asm/uaccess.h> 37#include <asm/uaccess.h>
@@ -63,7 +63,7 @@ static unsigned int cache_time = 1000;
63module_param(cache_time, uint, 0644); 63module_param(cache_time, uint, 0644);
64MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); 64MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
65 65
66#ifdef CONFIG_ACPI_PROCFS 66#ifdef CONFIG_ACPI_PROCFS_POWER
67extern struct proc_dir_entry *acpi_lock_battery_dir(void); 67extern struct proc_dir_entry *acpi_lock_battery_dir(void);
68extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir); 68extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
69 69
@@ -153,6 +153,8 @@ static int acpi_battery_get_property(struct power_supply *psy,
153 val->intval = POWER_SUPPLY_STATUS_CHARGING; 153 val->intval = POWER_SUPPLY_STATUS_CHARGING;
154 else if (battery->state == 0) 154 else if (battery->state == 0)
155 val->intval = POWER_SUPPLY_STATUS_FULL; 155 val->intval = POWER_SUPPLY_STATUS_FULL;
156 else
157 val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
156 break; 158 break;
157 case POWER_SUPPLY_PROP_PRESENT: 159 case POWER_SUPPLY_PROP_PRESENT:
158 val->intval = acpi_battery_present(battery); 160 val->intval = acpi_battery_present(battery);
@@ -221,7 +223,7 @@ static enum power_supply_property energy_battery_props[] = {
221 POWER_SUPPLY_PROP_MANUFACTURER, 223 POWER_SUPPLY_PROP_MANUFACTURER,
222}; 224};
223 225
224#ifdef CONFIG_ACPI_PROCFS 226#ifdef CONFIG_ACPI_PROCFS_POWER
225inline char *acpi_battery_units(struct acpi_battery *battery) 227inline char *acpi_battery_units(struct acpi_battery *battery)
226{ 228{
227 return (battery->power_unit)?"mA":"mW"; 229 return (battery->power_unit)?"mA":"mW";
@@ -479,7 +481,7 @@ static int acpi_battery_update(struct acpi_battery *battery)
479 FS Interface (/proc) 481 FS Interface (/proc)
480 -------------------------------------------------------------------------- */ 482 -------------------------------------------------------------------------- */
481 483
482#ifdef CONFIG_ACPI_PROCFS 484#ifdef CONFIG_ACPI_PROCFS_POWER
483static struct proc_dir_entry *acpi_battery_dir; 485static struct proc_dir_entry *acpi_battery_dir;
484 486
485static int acpi_battery_print_info(struct seq_file *seq, int result) 487static int acpi_battery_print_info(struct seq_file *seq, int result)
@@ -786,7 +788,7 @@ static int acpi_battery_add(struct acpi_device *device)
786 acpi_driver_data(device) = battery; 788 acpi_driver_data(device) = battery;
787 mutex_init(&battery->lock); 789 mutex_init(&battery->lock);
788 acpi_battery_update(battery); 790 acpi_battery_update(battery);
789#ifdef CONFIG_ACPI_PROCFS 791#ifdef CONFIG_ACPI_PROCFS_POWER
790 result = acpi_battery_add_fs(device); 792 result = acpi_battery_add_fs(device);
791 if (result) 793 if (result)
792 goto end; 794 goto end;
@@ -804,7 +806,7 @@ static int acpi_battery_add(struct acpi_device *device)
804 device->status.battery_present ? "present" : "absent"); 806 device->status.battery_present ? "present" : "absent");
805 end: 807 end:
806 if (result) { 808 if (result) {
807#ifdef CONFIG_ACPI_PROCFS 809#ifdef CONFIG_ACPI_PROCFS_POWER
808 acpi_battery_remove_fs(device); 810 acpi_battery_remove_fs(device);
809#endif 811#endif
810 kfree(battery); 812 kfree(battery);
@@ -823,7 +825,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
823 status = acpi_remove_notify_handler(device->handle, 825 status = acpi_remove_notify_handler(device->handle,
824 ACPI_ALL_NOTIFY, 826 ACPI_ALL_NOTIFY,
825 acpi_battery_notify); 827 acpi_battery_notify);
826#ifdef CONFIG_ACPI_PROCFS 828#ifdef CONFIG_ACPI_PROCFS_POWER
827 acpi_battery_remove_fs(device); 829 acpi_battery_remove_fs(device);
828#endif 830#endif
829 sysfs_remove_battery(battery); 831 sysfs_remove_battery(battery);
@@ -859,13 +861,13 @@ static int __init acpi_battery_init(void)
859{ 861{
860 if (acpi_disabled) 862 if (acpi_disabled)
861 return -ENODEV; 863 return -ENODEV;
862#ifdef CONFIG_ACPI_PROCFS 864#ifdef CONFIG_ACPI_PROCFS_POWER
863 acpi_battery_dir = acpi_lock_battery_dir(); 865 acpi_battery_dir = acpi_lock_battery_dir();
864 if (!acpi_battery_dir) 866 if (!acpi_battery_dir)
865 return -ENODEV; 867 return -ENODEV;
866#endif 868#endif
867 if (acpi_bus_register_driver(&acpi_battery_driver) < 0) { 869 if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
868#ifdef CONFIG_ACPI_PROCFS 870#ifdef CONFIG_ACPI_PROCFS_POWER
869 acpi_unlock_battery_dir(acpi_battery_dir); 871 acpi_unlock_battery_dir(acpi_battery_dir);
870#endif 872#endif
871 return -ENODEV; 873 return -ENODEV;
@@ -876,7 +878,7 @@ static int __init acpi_battery_init(void)
876static void __exit acpi_battery_exit(void) 878static void __exit acpi_battery_exit(void)
877{ 879{
878 acpi_bus_unregister_driver(&acpi_battery_driver); 880 acpi_bus_unregister_driver(&acpi_battery_driver);
879#ifdef CONFIG_ACPI_PROCFS 881#ifdef CONFIG_ACPI_PROCFS_POWER
880 acpi_unlock_battery_dir(acpi_battery_dir); 882 acpi_unlock_battery_dir(acpi_battery_dir);
881#endif 883#endif
882} 884}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 06b78e5e33a1..d6ddb547f2d9 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -75,7 +75,8 @@ enum {
75 EC_FLAGS_WAIT_GPE = 0, /* Don't check status until GPE arrives */ 75 EC_FLAGS_WAIT_GPE = 0, /* Don't check status until GPE arrives */
76 EC_FLAGS_QUERY_PENDING, /* Query is pending */ 76 EC_FLAGS_QUERY_PENDING, /* Query is pending */
77 EC_FLAGS_GPE_MODE, /* Expect GPE to be sent for status change */ 77 EC_FLAGS_GPE_MODE, /* Expect GPE to be sent for status change */
78 EC_FLAGS_ONLY_IBF_GPE, /* Expect GPE only for IBF = 0 event */ 78 EC_FLAGS_NO_ADDRESS_GPE, /* Expect GPE only for non-address event */
79 EC_FLAGS_ADDRESS, /* Address is being written */
79}; 80};
80 81
81static int acpi_ec_remove(struct acpi_device *device, int type); 82static int acpi_ec_remove(struct acpi_device *device, int type);
@@ -166,38 +167,45 @@ static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event)
166 167
167static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll) 168static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
168{ 169{
170 int ret = 0;
171 if (unlikely(test_bit(EC_FLAGS_ADDRESS, &ec->flags) &&
172 test_bit(EC_FLAGS_NO_ADDRESS_GPE, &ec->flags)))
173 force_poll = 1;
169 if (likely(test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) && 174 if (likely(test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) &&
170 likely(!force_poll)) { 175 likely(!force_poll)) {
171 if (wait_event_timeout(ec->wait, acpi_ec_check_status(ec, event), 176 if (wait_event_timeout(ec->wait, acpi_ec_check_status(ec, event),
172 msecs_to_jiffies(ACPI_EC_DELAY))) 177 msecs_to_jiffies(ACPI_EC_DELAY)))
173 return 0; 178 goto end;
174 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); 179 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
175 if (acpi_ec_check_status(ec, event)) { 180 if (acpi_ec_check_status(ec, event)) {
176 if (event == ACPI_EC_EVENT_OBF_1) { 181 if (test_bit(EC_FLAGS_ADDRESS, &ec->flags)) {
177 /* miss OBF = 1 GPE, don't expect it anymore */ 182 /* miss address GPE, don't expect it anymore */
178 printk(KERN_INFO PREFIX "missing OBF_1 confirmation," 183 printk(KERN_INFO PREFIX "missing address confirmation,"
179 "switching to degraded mode.\n"); 184 "don't expect it any longer.\n");
180 set_bit(EC_FLAGS_ONLY_IBF_GPE, &ec->flags); 185 set_bit(EC_FLAGS_NO_ADDRESS_GPE, &ec->flags);
181 } else { 186 } else {
182 /* missing GPEs, switch back to poll mode */ 187 /* missing GPEs, switch back to poll mode */
183 printk(KERN_INFO PREFIX "missing IBF_1 confirmations," 188 printk(KERN_INFO PREFIX "missing confirmations,"
184 "switch off interrupt mode.\n"); 189 "switch off interrupt mode.\n");
185 clear_bit(EC_FLAGS_GPE_MODE, &ec->flags); 190 clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
186 } 191 }
187 return 0; 192 goto end;
188 } 193 }
189 } else { 194 } else {
190 unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY); 195 unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
191 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); 196 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
192 while (time_before(jiffies, delay)) { 197 while (time_before(jiffies, delay)) {
193 if (acpi_ec_check_status(ec, event)) 198 if (acpi_ec_check_status(ec, event))
194 return 0; 199 goto end;
195 } 200 }
196 } 201 }
197 printk(KERN_ERR PREFIX "acpi_ec_wait timeout," 202 printk(KERN_ERR PREFIX "acpi_ec_wait timeout,"
198 " status = %d, expect_event = %d\n", 203 " status = %d, expect_event = %d\n",
199 acpi_ec_read_status(ec), event); 204 acpi_ec_read_status(ec), event);
200 return -ETIME; 205 ret = -ETIME;
206 end:
207 clear_bit(EC_FLAGS_ADDRESS, &ec->flags);
208 return ret;
201} 209}
202 210
203static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, 211static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
@@ -216,6 +224,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
216 "write_cmd timeout, command = %d\n", command); 224 "write_cmd timeout, command = %d\n", command);
217 goto end; 225 goto end;
218 } 226 }
227 /* mark the address byte written to EC */
228 if (rdata_len + wdata_len > 1)
229 set_bit(EC_FLAGS_ADDRESS, &ec->flags);
219 set_bit(EC_FLAGS_WAIT_GPE, &ec->flags); 230 set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
220 acpi_ec_write_data(ec, *(wdata++)); 231 acpi_ec_write_data(ec, *(wdata++));
221 } 232 }
@@ -231,8 +242,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
231 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); 242 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
232 243
233 for (; rdata_len > 0; --rdata_len) { 244 for (; rdata_len > 0; --rdata_len) {
234 if (test_bit(EC_FLAGS_ONLY_IBF_GPE, &ec->flags))
235 force_poll = 1;
236 result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, force_poll); 245 result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, force_poll);
237 if (result) { 246 if (result) {
238 printk(KERN_ERR PREFIX "read timeout, command = %d\n", 247 printk(KERN_ERR PREFIX "read timeout, command = %d\n",
@@ -881,12 +890,20 @@ int __init acpi_ec_ecdt_probe(void)
881 boot_ec->gpe = ecdt_ptr->gpe; 890 boot_ec->gpe = ecdt_ptr->gpe;
882 boot_ec->handle = ACPI_ROOT_OBJECT; 891 boot_ec->handle = ACPI_ROOT_OBJECT;
883 } else { 892 } else {
893 /* This workaround is needed only on some broken machines,
894 * which require early EC, but fail to provide ECDT */
895 acpi_handle x;
884 printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n"); 896 printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
885 status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, 897 status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
886 boot_ec, NULL); 898 boot_ec, NULL);
887 /* Check that acpi_get_devices actually find something */ 899 /* Check that acpi_get_devices actually find something */
888 if (ACPI_FAILURE(status) || !boot_ec->handle) 900 if (ACPI_FAILURE(status) || !boot_ec->handle)
889 goto error; 901 goto error;
902 /* We really need to limit this workaround, the only ASUS,
903 * which needs it, has fake EC._INI method, so use it as flag.
904 */
905 if (ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI", &x)))
906 goto error;
890 } 907 }
891 908
892 ret = ec_install_handlers(boot_ec); 909 ret = ec_install_handlers(boot_ec);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f996d0e37689..7b6c20eeeaff 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -197,6 +197,19 @@ static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
197 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); 197 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
198} 198}
199 199
200static void acpi_safe_halt(void)
201{
202 current_thread_info()->status &= ~TS_POLLING;
203 /*
204 * TS_POLLING-cleared state must be visible before we
205 * test NEED_RESCHED:
206 */
207 smp_mb();
208 if (!need_resched())
209 safe_halt();
210 current_thread_info()->status |= TS_POLLING;
211}
212
200#ifndef CONFIG_CPU_IDLE 213#ifndef CONFIG_CPU_IDLE
201 214
202static void 215static void
@@ -239,19 +252,6 @@ acpi_processor_power_activate(struct acpi_processor *pr,
239 return; 252 return;
240} 253}
241 254
242static void acpi_safe_halt(void)
243{
244 current_thread_info()->status &= ~TS_POLLING;
245 /*
246 * TS_POLLING-cleared state must be visible before we
247 * test NEED_RESCHED:
248 */
249 smp_mb();
250 if (!need_resched())
251 safe_halt();
252 current_thread_info()->status |= TS_POLLING;
253}
254
255static atomic_t c3_cpu_count; 255static atomic_t c3_cpu_count;
256 256
257/* Common C-state entry for C2, C3, .. */ 257/* Common C-state entry for C2, C3, .. */
@@ -1373,15 +1373,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
1373 if (pr->flags.bm_check) 1373 if (pr->flags.bm_check)
1374 acpi_idle_update_bm_rld(pr, cx); 1374 acpi_idle_update_bm_rld(pr, cx);
1375 1375
1376 current_thread_info()->status &= ~TS_POLLING; 1376 acpi_safe_halt();
1377 /*
1378 * TS_POLLING-cleared state must be visible before we test
1379 * NEED_RESCHED:
1380 */
1381 smp_mb();
1382 if (!need_resched())
1383 safe_halt();
1384 current_thread_info()->status |= TS_POLLING;
1385 1377
1386 cx->usage++; 1378 cx->usage++;
1387 1379
@@ -1399,6 +1391,8 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1399 struct acpi_processor *pr; 1391 struct acpi_processor *pr;
1400 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 1392 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1401 u32 t1, t2; 1393 u32 t1, t2;
1394 int sleep_ticks = 0;
1395
1402 pr = processors[smp_processor_id()]; 1396 pr = processors[smp_processor_id()];
1403 1397
1404 if (unlikely(!pr)) 1398 if (unlikely(!pr))
@@ -1428,6 +1422,8 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1428 ACPI_FLUSH_CPU_CACHE(); 1422 ACPI_FLUSH_CPU_CACHE();
1429 1423
1430 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 1424 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1425 /* Tell the scheduler that we are going deep-idle: */
1426 sched_clock_idle_sleep_event();
1431 acpi_state_timer_broadcast(pr, cx, 1); 1427 acpi_state_timer_broadcast(pr, cx, 1);
1432 acpi_idle_do_entry(cx); 1428 acpi_idle_do_entry(cx);
1433 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 1429 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
@@ -1436,6 +1432,10 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1436 /* TSC could halt in idle, so notify users */ 1432 /* TSC could halt in idle, so notify users */
1437 mark_tsc_unstable("TSC halts in idle");; 1433 mark_tsc_unstable("TSC halts in idle");;
1438#endif 1434#endif
1435 sleep_ticks = ticks_elapsed(t1, t2);
1436
1437 /* Tell the scheduler how much we idled: */
1438 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1439 1439
1440 local_irq_enable(); 1440 local_irq_enable();
1441 current_thread_info()->status |= TS_POLLING; 1441 current_thread_info()->status |= TS_POLLING;
@@ -1443,7 +1443,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1443 cx->usage++; 1443 cx->usage++;
1444 1444
1445 acpi_state_timer_broadcast(pr, cx, 0); 1445 acpi_state_timer_broadcast(pr, cx, 0);
1446 cx->time += ticks_elapsed(t1, t2); 1446 cx->time += sleep_ticks;
1447 return ticks_elapsed_in_us(t1, t2); 1447 return ticks_elapsed_in_us(t1, t2);
1448} 1448}
1449 1449
@@ -1463,6 +1463,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1463 struct acpi_processor *pr; 1463 struct acpi_processor *pr;
1464 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 1464 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1465 u32 t1, t2; 1465 u32 t1, t2;
1466 int sleep_ticks = 0;
1467
1466 pr = processors[smp_processor_id()]; 1468 pr = processors[smp_processor_id()];
1467 1469
1468 if (unlikely(!pr)) 1470 if (unlikely(!pr))
@@ -1471,6 +1473,15 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1471 if (acpi_idle_suspend) 1473 if (acpi_idle_suspend)
1472 return(acpi_idle_enter_c1(dev, state)); 1474 return(acpi_idle_enter_c1(dev, state));
1473 1475
1476 if (acpi_idle_bm_check()) {
1477 if (dev->safe_state) {
1478 return dev->safe_state->enter(dev, dev->safe_state);
1479 } else {
1480 acpi_safe_halt();
1481 return 0;
1482 }
1483 }
1484
1474 local_irq_disable(); 1485 local_irq_disable();
1475 current_thread_info()->status &= ~TS_POLLING; 1486 current_thread_info()->status &= ~TS_POLLING;
1476 /* 1487 /*
@@ -1485,38 +1496,45 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1485 return 0; 1496 return 0;
1486 } 1497 }
1487 1498
1499 /* Tell the scheduler that we are going deep-idle: */
1500 sched_clock_idle_sleep_event();
1488 /* 1501 /*
1489 * Must be done before busmaster disable as we might need to 1502 * Must be done before busmaster disable as we might need to
1490 * access HPET ! 1503 * access HPET !
1491 */ 1504 */
1492 acpi_state_timer_broadcast(pr, cx, 1); 1505 acpi_state_timer_broadcast(pr, cx, 1);
1493 1506
1494 if (acpi_idle_bm_check()) { 1507 acpi_idle_update_bm_rld(pr, cx);
1495 cx = pr->power.bm_state;
1496
1497 acpi_idle_update_bm_rld(pr, cx);
1498
1499 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1500 acpi_idle_do_entry(cx);
1501 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1502 } else {
1503 acpi_idle_update_bm_rld(pr, cx);
1504 1508
1509 /*
1510 * disable bus master
1511 * bm_check implies we need ARB_DIS
1512 * !bm_check implies we need cache flush
1513 * bm_control implies whether we can do ARB_DIS
1514 *
1515 * That leaves a case where bm_check is set and bm_control is
1516 * not set. In that case we cannot do much, we enter C3
1517 * without doing anything.
1518 */
1519 if (pr->flags.bm_check && pr->flags.bm_control) {
1505 spin_lock(&c3_lock); 1520 spin_lock(&c3_lock);
1506 c3_cpu_count++; 1521 c3_cpu_count++;
1507 /* Disable bus master arbitration when all CPUs are in C3 */ 1522 /* Disable bus master arbitration when all CPUs are in C3 */
1508 if (c3_cpu_count == num_online_cpus()) 1523 if (c3_cpu_count == num_online_cpus())
1509 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); 1524 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
1510 spin_unlock(&c3_lock); 1525 spin_unlock(&c3_lock);
1526 } else if (!pr->flags.bm_check) {
1527 ACPI_FLUSH_CPU_CACHE();
1528 }
1511 1529
1512 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 1530 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1513 acpi_idle_do_entry(cx); 1531 acpi_idle_do_entry(cx);
1514 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 1532 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1515 1533
1534 /* Re-enable bus master arbitration */
1535 if (pr->flags.bm_check && pr->flags.bm_control) {
1516 spin_lock(&c3_lock); 1536 spin_lock(&c3_lock);
1517 /* Re-enable bus master arbitration */ 1537 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
1518 if (c3_cpu_count == num_online_cpus())
1519 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
1520 c3_cpu_count--; 1538 c3_cpu_count--;
1521 spin_unlock(&c3_lock); 1539 spin_unlock(&c3_lock);
1522 } 1540 }
@@ -1525,6 +1543,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1525 /* TSC could halt in idle, so notify users */ 1543 /* TSC could halt in idle, so notify users */
1526 mark_tsc_unstable("TSC halts in idle"); 1544 mark_tsc_unstable("TSC halts in idle");
1527#endif 1545#endif
1546 sleep_ticks = ticks_elapsed(t1, t2);
1547 /* Tell the scheduler how much we idled: */
1548 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1528 1549
1529 local_irq_enable(); 1550 local_irq_enable();
1530 current_thread_info()->status |= TS_POLLING; 1551 current_thread_info()->status |= TS_POLLING;
@@ -1532,7 +1553,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1532 cx->usage++; 1553 cx->usage++;
1533 1554
1534 acpi_state_timer_broadcast(pr, cx, 0); 1555 acpi_state_timer_broadcast(pr, cx, 0);
1535 cx->time += ticks_elapsed(t1, t2); 1556 cx->time += sleep_ticks;
1536 return ticks_elapsed_in_us(t1, t2); 1557 return ticks_elapsed_in_us(t1, t2);
1537} 1558}
1538 1559
@@ -1584,12 +1605,14 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1584 case ACPI_STATE_C1: 1605 case ACPI_STATE_C1:
1585 state->flags |= CPUIDLE_FLAG_SHALLOW; 1606 state->flags |= CPUIDLE_FLAG_SHALLOW;
1586 state->enter = acpi_idle_enter_c1; 1607 state->enter = acpi_idle_enter_c1;
1608 dev->safe_state = state;
1587 break; 1609 break;
1588 1610
1589 case ACPI_STATE_C2: 1611 case ACPI_STATE_C2:
1590 state->flags |= CPUIDLE_FLAG_BALANCED; 1612 state->flags |= CPUIDLE_FLAG_BALANCED;
1591 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1613 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1592 state->enter = acpi_idle_enter_simple; 1614 state->enter = acpi_idle_enter_simple;
1615 dev->safe_state = state;
1593 break; 1616 break;
1594 1617
1595 case ACPI_STATE_C3: 1618 case ACPI_STATE_C3:
@@ -1610,14 +1633,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1610 if (!count) 1633 if (!count)
1611 return -EINVAL; 1634 return -EINVAL;
1612 1635
1613 /* find the deepest state that can handle active BM */
1614 if (pr->flags.bm_check) {
1615 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++)
1616 if (pr->power.states[i].type == ACPI_STATE_C3)
1617 break;
1618 pr->power.bm_state = &pr->power.states[i-1];
1619 }
1620
1621 return 0; 1636 return 0;
1622} 1637}
1623 1638
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 90fd09c65f95..6045cdbe176b 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -29,7 +29,7 @@
29#include <linux/moduleparam.h> 29#include <linux/moduleparam.h>
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31 31
32#ifdef CONFIG_ACPI_PROCFS 32#ifdef CONFIG_ACPI_PROCFS_POWER
33#include <linux/proc_fs.h> 33#include <linux/proc_fs.h>
34#include <linux/seq_file.h> 34#include <linux/seq_file.h>
35#include <asm/uaccess.h> 35#include <asm/uaccess.h>
@@ -88,7 +88,7 @@ MODULE_DEVICE_TABLE(acpi, sbs_device_ids);
88struct acpi_battery { 88struct acpi_battery {
89 struct power_supply bat; 89 struct power_supply bat;
90 struct acpi_sbs *sbs; 90 struct acpi_sbs *sbs;
91#ifdef CONFIG_ACPI_PROCFS 91#ifdef CONFIG_ACPI_PROCFS_POWER
92 struct proc_dir_entry *proc_entry; 92 struct proc_dir_entry *proc_entry;
93#endif 93#endif
94 unsigned long update_time; 94 unsigned long update_time;
@@ -113,6 +113,7 @@ struct acpi_battery {
113 u16 spec; 113 u16 spec;
114 u8 id; 114 u8 id;
115 u8 present:1; 115 u8 present:1;
116 u8 have_sysfs_alarm:1;
116}; 117};
117 118
118#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat); 119#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat);
@@ -122,7 +123,7 @@ struct acpi_sbs {
122 struct acpi_device *device; 123 struct acpi_device *device;
123 struct acpi_smb_hc *hc; 124 struct acpi_smb_hc *hc;
124 struct mutex lock; 125 struct mutex lock;
125#ifdef CONFIG_ACPI_PROCFS 126#ifdef CONFIG_ACPI_PROCFS_POWER
126 struct proc_dir_entry *charger_entry; 127 struct proc_dir_entry *charger_entry;
127#endif 128#endif
128 struct acpi_battery battery[MAX_SBS_BAT]; 129 struct acpi_battery battery[MAX_SBS_BAT];
@@ -468,7 +469,7 @@ static struct device_attribute alarm_attr = {
468 FS Interface (/proc/acpi) 469 FS Interface (/proc/acpi)
469 -------------------------------------------------------------------------- */ 470 -------------------------------------------------------------------------- */
470 471
471#ifdef CONFIG_ACPI_PROCFS 472#ifdef CONFIG_ACPI_PROCFS_POWER
472/* Generic Routines */ 473/* Generic Routines */
473static int 474static int
474acpi_sbs_add_fs(struct proc_dir_entry **dir, 475acpi_sbs_add_fs(struct proc_dir_entry **dir,
@@ -789,7 +790,7 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
789 return result; 790 return result;
790 791
791 sprintf(battery->name, ACPI_BATTERY_DIR_NAME, id); 792 sprintf(battery->name, ACPI_BATTERY_DIR_NAME, id);
792#ifdef CONFIG_ACPI_PROCFS 793#ifdef CONFIG_ACPI_PROCFS_POWER
793 acpi_sbs_add_fs(&battery->proc_entry, acpi_battery_dir, 794 acpi_sbs_add_fs(&battery->proc_entry, acpi_battery_dir,
794 battery->name, &acpi_battery_info_fops, 795 battery->name, &acpi_battery_info_fops,
795 &acpi_battery_state_fops, &acpi_battery_alarm_fops, 796 &acpi_battery_state_fops, &acpi_battery_alarm_fops,
@@ -808,7 +809,13 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
808 } 809 }
809 battery->bat.get_property = acpi_sbs_battery_get_property; 810 battery->bat.get_property = acpi_sbs_battery_get_property;
810 result = power_supply_register(&sbs->device->dev, &battery->bat); 811 result = power_supply_register(&sbs->device->dev, &battery->bat);
811 device_create_file(battery->bat.dev, &alarm_attr); 812 if (result)
813 goto end;
814 result = device_create_file(battery->bat.dev, &alarm_attr);
815 if (result)
816 goto end;
817 battery->have_sysfs_alarm = 1;
818 end:
812 printk(KERN_INFO PREFIX "%s [%s]: Battery Slot [%s] (battery %s)\n", 819 printk(KERN_INFO PREFIX "%s [%s]: Battery Slot [%s] (battery %s)\n",
813 ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device), 820 ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
814 battery->name, sbs->battery->present ? "present" : "absent"); 821 battery->name, sbs->battery->present ? "present" : "absent");
@@ -817,14 +824,16 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
817 824
818static void acpi_battery_remove(struct acpi_sbs *sbs, int id) 825static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
819{ 826{
820 if (sbs->battery[id].bat.dev) 827 struct acpi_battery *battery = &sbs->battery[id];
821 device_remove_file(sbs->battery[id].bat.dev, &alarm_attr); 828
822 power_supply_unregister(&sbs->battery[id].bat); 829 if (battery->bat.dev) {
823#ifdef CONFIG_ACPI_PROCFS 830 if (battery->have_sysfs_alarm)
824 if (sbs->battery[id].proc_entry) { 831 device_remove_file(battery->bat.dev, &alarm_attr);
825 acpi_sbs_remove_fs(&(sbs->battery[id].proc_entry), 832 power_supply_unregister(&battery->bat);
826 acpi_battery_dir);
827 } 833 }
834#ifdef CONFIG_ACPI_PROCFS_POWER
835 if (battery->proc_entry)
836 acpi_sbs_remove_fs(&battery->proc_entry, acpi_battery_dir);
828#endif 837#endif
829} 838}
830 839
@@ -835,7 +844,7 @@ static int acpi_charger_add(struct acpi_sbs *sbs)
835 result = acpi_ac_get_present(sbs); 844 result = acpi_ac_get_present(sbs);
836 if (result) 845 if (result)
837 goto end; 846 goto end;
838#ifdef CONFIG_ACPI_PROCFS 847#ifdef CONFIG_ACPI_PROCFS_POWER
839 result = acpi_sbs_add_fs(&sbs->charger_entry, acpi_ac_dir, 848 result = acpi_sbs_add_fs(&sbs->charger_entry, acpi_ac_dir,
840 ACPI_AC_DIR_NAME, NULL, 849 ACPI_AC_DIR_NAME, NULL,
841 &acpi_ac_state_fops, NULL, sbs); 850 &acpi_ac_state_fops, NULL, sbs);
@@ -859,7 +868,7 @@ static void acpi_charger_remove(struct acpi_sbs *sbs)
859{ 868{
860 if (sbs->charger.dev) 869 if (sbs->charger.dev)
861 power_supply_unregister(&sbs->charger); 870 power_supply_unregister(&sbs->charger);
862#ifdef CONFIG_ACPI_PROCFS 871#ifdef CONFIG_ACPI_PROCFS_POWER
863 if (sbs->charger_entry) 872 if (sbs->charger_entry)
864 acpi_sbs_remove_fs(&sbs->charger_entry, acpi_ac_dir); 873 acpi_sbs_remove_fs(&sbs->charger_entry, acpi_ac_dir);
865#endif 874#endif
@@ -965,7 +974,7 @@ static int acpi_sbs_remove(struct acpi_device *device, int type)
965 974
966static void acpi_sbs_rmdirs(void) 975static void acpi_sbs_rmdirs(void)
967{ 976{
968#ifdef CONFIG_ACPI_PROCFS 977#ifdef CONFIG_ACPI_PROCFS_POWER
969 if (acpi_ac_dir) { 978 if (acpi_ac_dir) {
970 acpi_unlock_ac_dir(acpi_ac_dir); 979 acpi_unlock_ac_dir(acpi_ac_dir);
971 acpi_ac_dir = NULL; 980 acpi_ac_dir = NULL;
@@ -1004,7 +1013,7 @@ static int __init acpi_sbs_init(void)
1004 1013
1005 if (acpi_disabled) 1014 if (acpi_disabled)
1006 return -ENODEV; 1015 return -ENODEV;
1007#ifdef CONFIG_ACPI_PROCFS 1016#ifdef CONFIG_ACPI_PROCFS_POWER
1008 acpi_ac_dir = acpi_lock_ac_dir(); 1017 acpi_ac_dir = acpi_lock_ac_dir();
1009 if (!acpi_ac_dir) 1018 if (!acpi_ac_dir)
1010 return -ENODEV; 1019 return -ENODEV;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 4bd33ce8a6f3..1bba99747f5b 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -37,17 +37,17 @@
37#define DEF_FREQUENCY_UP_THRESHOLD (80) 37#define DEF_FREQUENCY_UP_THRESHOLD (80)
38#define DEF_FREQUENCY_DOWN_THRESHOLD (20) 38#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
39 39
40/* 40/*
41 * The polling frequency of this governor depends on the capability of 41 * The polling frequency of this governor depends on the capability of
42 * the processor. Default polling frequency is 1000 times the transition 42 * the processor. Default polling frequency is 1000 times the transition
43 * latency of the processor. The governor will work on any processor with 43 * latency of the processor. The governor will work on any processor with
44 * transition latency <= 10mS, using appropriate sampling 44 * transition latency <= 10mS, using appropriate sampling
45 * rate. 45 * rate.
46 * For CPUs with transition latency > 10mS (mostly drivers 46 * For CPUs with transition latency > 10mS (mostly drivers
47 * with CPUFREQ_ETERNAL), this governor will not work. 47 * with CPUFREQ_ETERNAL), this governor will not work.
48 * All times here are in uS. 48 * All times here are in uS.
49 */ 49 */
50static unsigned int def_sampling_rate; 50static unsigned int def_sampling_rate;
51#define MIN_SAMPLING_RATE_RATIO (2) 51#define MIN_SAMPLING_RATE_RATIO (2)
52/* for correct statistics, we need at least 10 ticks between each measure */ 52/* for correct statistics, we need at least 10 ticks between each measure */
53#define MIN_STAT_SAMPLING_RATE \ 53#define MIN_STAT_SAMPLING_RATE \
@@ -63,12 +63,12 @@ static unsigned int def_sampling_rate;
63static void do_dbs_timer(struct work_struct *work); 63static void do_dbs_timer(struct work_struct *work);
64 64
65struct cpu_dbs_info_s { 65struct cpu_dbs_info_s {
66 struct cpufreq_policy *cur_policy; 66 struct cpufreq_policy *cur_policy;
67 unsigned int prev_cpu_idle_up; 67 unsigned int prev_cpu_idle_up;
68 unsigned int prev_cpu_idle_down; 68 unsigned int prev_cpu_idle_down;
69 unsigned int enable; 69 unsigned int enable;
70 unsigned int down_skip; 70 unsigned int down_skip;
71 unsigned int requested_freq; 71 unsigned int requested_freq;
72}; 72};
73static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 73static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
74 74
@@ -82,24 +82,24 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
82 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 82 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
83 * is recursive for the same process. -Venki 83 * is recursive for the same process. -Venki
84 */ 84 */
85static DEFINE_MUTEX (dbs_mutex); 85static DEFINE_MUTEX (dbs_mutex);
86static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); 86static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
87 87
88struct dbs_tuners { 88struct dbs_tuners {
89 unsigned int sampling_rate; 89 unsigned int sampling_rate;
90 unsigned int sampling_down_factor; 90 unsigned int sampling_down_factor;
91 unsigned int up_threshold; 91 unsigned int up_threshold;
92 unsigned int down_threshold; 92 unsigned int down_threshold;
93 unsigned int ignore_nice; 93 unsigned int ignore_nice;
94 unsigned int freq_step; 94 unsigned int freq_step;
95}; 95};
96 96
97static struct dbs_tuners dbs_tuners_ins = { 97static struct dbs_tuners dbs_tuners_ins = {
98 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 98 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
99 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, 99 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
100 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 100 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
101 .ignore_nice = 0, 101 .ignore_nice = 0,
102 .freq_step = 5, 102 .freq_step = 5,
103}; 103};
104 104
105static inline unsigned int get_cpu_idle_time(unsigned int cpu) 105static inline unsigned int get_cpu_idle_time(unsigned int cpu)
@@ -109,13 +109,34 @@ static inline unsigned int get_cpu_idle_time(unsigned int cpu)
109 if (dbs_tuners_ins.ignore_nice) 109 if (dbs_tuners_ins.ignore_nice)
110 add_nice = kstat_cpu(cpu).cpustat.nice; 110 add_nice = kstat_cpu(cpu).cpustat.nice;
111 111
112 ret = kstat_cpu(cpu).cpustat.idle + 112 ret = kstat_cpu(cpu).cpustat.idle +
113 kstat_cpu(cpu).cpustat.iowait + 113 kstat_cpu(cpu).cpustat.iowait +
114 add_nice; 114 add_nice;
115 115
116 return ret; 116 return ret;
117} 117}
118 118
119/* keep track of frequency transitions */
120static int
121dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
122 void *data)
123{
124 struct cpufreq_freqs *freq = data;
125 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info,
126 freq->cpu);
127
128 if (!this_dbs_info->enable)
129 return 0;
130
131 this_dbs_info->requested_freq = freq->new;
132
133 return 0;
134}
135
136static struct notifier_block dbs_cpufreq_notifier_block = {
137 .notifier_call = dbs_cpufreq_notifier
138};
139
119/************************** sysfs interface ************************/ 140/************************** sysfs interface ************************/
120static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 141static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
121{ 142{
@@ -127,8 +148,8 @@ static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
127 return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); 148 return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
128} 149}
129 150
130#define define_one_ro(_name) \ 151#define define_one_ro(_name) \
131static struct freq_attr _name = \ 152static struct freq_attr _name = \
132__ATTR(_name, 0444, show_##_name, NULL) 153__ATTR(_name, 0444, show_##_name, NULL)
133 154
134define_one_ro(sampling_rate_max); 155define_one_ro(sampling_rate_max);
@@ -148,7 +169,7 @@ show_one(down_threshold, down_threshold);
148show_one(ignore_nice_load, ignore_nice); 169show_one(ignore_nice_load, ignore_nice);
149show_one(freq_step, freq_step); 170show_one(freq_step, freq_step);
150 171
151static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, 172static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
152 const char *buf, size_t count) 173 const char *buf, size_t count)
153{ 174{
154 unsigned int input; 175 unsigned int input;
@@ -164,7 +185,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
164 return count; 185 return count;
165} 186}
166 187
167static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 188static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
168 const char *buf, size_t count) 189 const char *buf, size_t count)
169{ 190{
170 unsigned int input; 191 unsigned int input;
@@ -183,7 +204,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
183 return count; 204 return count;
184} 205}
185 206
186static ssize_t store_up_threshold(struct cpufreq_policy *unused, 207static ssize_t store_up_threshold(struct cpufreq_policy *unused,
187 const char *buf, size_t count) 208 const char *buf, size_t count)
188{ 209{
189 unsigned int input; 210 unsigned int input;
@@ -202,7 +223,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
202 return count; 223 return count;
203} 224}
204 225
205static ssize_t store_down_threshold(struct cpufreq_policy *unused, 226static ssize_t store_down_threshold(struct cpufreq_policy *unused,
206 const char *buf, size_t count) 227 const char *buf, size_t count)
207{ 228{
208 unsigned int input; 229 unsigned int input;
@@ -228,16 +249,16 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
228 int ret; 249 int ret;
229 250
230 unsigned int j; 251 unsigned int j;
231 252
232 ret = sscanf (buf, "%u", &input); 253 ret = sscanf(buf, "%u", &input);
233 if ( ret != 1 ) 254 if (ret != 1)
234 return -EINVAL; 255 return -EINVAL;
235 256
236 if ( input > 1 ) 257 if (input > 1)
237 input = 1; 258 input = 1;
238 259
239 mutex_lock(&dbs_mutex); 260 mutex_lock(&dbs_mutex);
240 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ 261 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
241 mutex_unlock(&dbs_mutex); 262 mutex_unlock(&dbs_mutex);
242 return count; 263 return count;
243 } 264 }
@@ -261,14 +282,14 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy,
261 unsigned int input; 282 unsigned int input;
262 int ret; 283 int ret;
263 284
264 ret = sscanf (buf, "%u", &input); 285 ret = sscanf(buf, "%u", &input);
265 286
266 if ( ret != 1 ) 287 if (ret != 1)
267 return -EINVAL; 288 return -EINVAL;
268 289
269 if ( input > 100 ) 290 if (input > 100)
270 input = 100; 291 input = 100;
271 292
272 /* no need to test here if freq_step is zero as the user might actually 293 /* no need to test here if freq_step is zero as the user might actually
273 * want this, they would be crazy though :) */ 294 * want this, they would be crazy though :) */
274 mutex_lock(&dbs_mutex); 295 mutex_lock(&dbs_mutex);
@@ -322,18 +343,18 @@ static void dbs_check_cpu(int cpu)
322 343
323 policy = this_dbs_info->cur_policy; 344 policy = this_dbs_info->cur_policy;
324 345
325 /* 346 /*
326 * The default safe range is 20% to 80% 347 * The default safe range is 20% to 80%
327 * Every sampling_rate, we check 348 * Every sampling_rate, we check
328 * - If current idle time is less than 20%, then we try to 349 * - If current idle time is less than 20%, then we try to
329 * increase frequency 350 * increase frequency
330 * Every sampling_rate*sampling_down_factor, we check 351 * Every sampling_rate*sampling_down_factor, we check
331 * - If current idle time is more than 80%, then we try to 352 * - If current idle time is more than 80%, then we try to
332 * decrease frequency 353 * decrease frequency
333 * 354 *
334 * Any frequency increase takes it to the maximum frequency. 355 * Any frequency increase takes it to the maximum frequency.
335 * Frequency reduction happens at minimum steps of 356 * Frequency reduction happens at minimum steps of
336 * 5% (default) of max_frequency 357 * 5% (default) of max_frequency
337 */ 358 */
338 359
339 /* Check for frequency increase */ 360 /* Check for frequency increase */
@@ -361,13 +382,13 @@ static void dbs_check_cpu(int cpu)
361 /* if we are already at full speed then break out early */ 382 /* if we are already at full speed then break out early */
362 if (this_dbs_info->requested_freq == policy->max) 383 if (this_dbs_info->requested_freq == policy->max)
363 return; 384 return;
364 385
365 freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; 386 freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
366 387
367 /* max freq cannot be less than 100. But who knows.... */ 388 /* max freq cannot be less than 100. But who knows.... */
368 if (unlikely(freq_step == 0)) 389 if (unlikely(freq_step == 0))
369 freq_step = 5; 390 freq_step = 5;
370 391
371 this_dbs_info->requested_freq += freq_step; 392 this_dbs_info->requested_freq += freq_step;
372 if (this_dbs_info->requested_freq > policy->max) 393 if (this_dbs_info->requested_freq > policy->max)
373 this_dbs_info->requested_freq = policy->max; 394 this_dbs_info->requested_freq = policy->max;
@@ -427,15 +448,15 @@ static void dbs_check_cpu(int cpu)
427} 448}
428 449
429static void do_dbs_timer(struct work_struct *work) 450static void do_dbs_timer(struct work_struct *work)
430{ 451{
431 int i; 452 int i;
432 mutex_lock(&dbs_mutex); 453 mutex_lock(&dbs_mutex);
433 for_each_online_cpu(i) 454 for_each_online_cpu(i)
434 dbs_check_cpu(i); 455 dbs_check_cpu(i);
435 schedule_delayed_work(&dbs_work, 456 schedule_delayed_work(&dbs_work,
436 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 457 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
437 mutex_unlock(&dbs_mutex); 458 mutex_unlock(&dbs_mutex);
438} 459}
439 460
440static inline void dbs_timer_init(void) 461static inline void dbs_timer_init(void)
441{ 462{
@@ -462,13 +483,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
462 483
463 switch (event) { 484 switch (event) {
464 case CPUFREQ_GOV_START: 485 case CPUFREQ_GOV_START:
465 if ((!cpu_online(cpu)) || 486 if ((!cpu_online(cpu)) || (!policy->cur))
466 (!policy->cur))
467 return -EINVAL; 487 return -EINVAL;
468 488
469 if (this_dbs_info->enable) /* Already enabled */ 489 if (this_dbs_info->enable) /* Already enabled */
470 break; 490 break;
471 491
472 mutex_lock(&dbs_mutex); 492 mutex_lock(&dbs_mutex);
473 493
474 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); 494 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
@@ -481,7 +501,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
481 struct cpu_dbs_info_s *j_dbs_info; 501 struct cpu_dbs_info_s *j_dbs_info;
482 j_dbs_info = &per_cpu(cpu_dbs_info, j); 502 j_dbs_info = &per_cpu(cpu_dbs_info, j);
483 j_dbs_info->cur_policy = policy; 503 j_dbs_info->cur_policy = policy;
484 504
485 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); 505 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu);
486 j_dbs_info->prev_cpu_idle_down 506 j_dbs_info->prev_cpu_idle_down
487 = j_dbs_info->prev_cpu_idle_up; 507 = j_dbs_info->prev_cpu_idle_up;
@@ -511,8 +531,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
511 dbs_tuners_ins.sampling_rate = def_sampling_rate; 531 dbs_tuners_ins.sampling_rate = def_sampling_rate;
512 532
513 dbs_timer_init(); 533 dbs_timer_init();
534 cpufreq_register_notifier(
535 &dbs_cpufreq_notifier_block,
536 CPUFREQ_TRANSITION_NOTIFIER);
514 } 537 }
515 538
516 mutex_unlock(&dbs_mutex); 539 mutex_unlock(&dbs_mutex);
517 break; 540 break;
518 541
@@ -525,9 +548,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
525 * Stop the timerschedule work, when this governor 548 * Stop the timerschedule work, when this governor
526 * is used for first time 549 * is used for first time
527 */ 550 */
528 if (dbs_enable == 0) 551 if (dbs_enable == 0) {
529 dbs_timer_exit(); 552 dbs_timer_exit();
530 553 cpufreq_unregister_notifier(
554 &dbs_cpufreq_notifier_block,
555 CPUFREQ_TRANSITION_NOTIFIER);
556 }
557
531 mutex_unlock(&dbs_mutex); 558 mutex_unlock(&dbs_mutex);
532 559
533 break; 560 break;
@@ -537,11 +564,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
537 if (policy->max < this_dbs_info->cur_policy->cur) 564 if (policy->max < this_dbs_info->cur_policy->cur)
538 __cpufreq_driver_target( 565 __cpufreq_driver_target(
539 this_dbs_info->cur_policy, 566 this_dbs_info->cur_policy,
540 policy->max, CPUFREQ_RELATION_H); 567 policy->max, CPUFREQ_RELATION_H);
541 else if (policy->min > this_dbs_info->cur_policy->cur) 568 else if (policy->min > this_dbs_info->cur_policy->cur)
542 __cpufreq_driver_target( 569 __cpufreq_driver_target(
543 this_dbs_info->cur_policy, 570 this_dbs_info->cur_policy,
544 policy->min, CPUFREQ_RELATION_L); 571 policy->min, CPUFREQ_RELATION_L);
545 mutex_unlock(&dbs_mutex); 572 mutex_unlock(&dbs_mutex);
546 break; 573 break;
547 } 574 }
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index cd0a204d96d1..11adab13f2b7 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -75,6 +75,7 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res,
75{ 75{
76 int i = 0; 76 int i = 0;
77 int irq; 77 int irq;
78 int p, t;
78 79
79 if (!valid_IRQ(gsi)) 80 if (!valid_IRQ(gsi))
80 return; 81 return;
@@ -85,15 +86,22 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res,
85 if (i >= PNP_MAX_IRQ) 86 if (i >= PNP_MAX_IRQ)
86 return; 87 return;
87 88
88#ifdef CONFIG_X86 89 /*
89 if (gsi < 16 && (triggering != ACPI_EDGE_SENSITIVE || 90 * in IO-APIC mode, use overrided attribute. Two reasons:
90 polarity != ACPI_ACTIVE_HIGH)) { 91 * 1. BIOS bug in DSDT
91 pnp_warn("BIOS BUG: legacy PNP IRQ %d should be edge trigger, " 92 * 2. BIOS uses IO-APIC mode Interrupt Source Override
92 "active high", gsi); 93 */
93 triggering = ACPI_EDGE_SENSITIVE; 94 if (!acpi_get_override_irq(gsi, &t, &p)) {
94 polarity = ACPI_ACTIVE_HIGH; 95 t = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
96 p = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
97
98 if (triggering != t || polarity != p) {
99 pnp_warn("IRQ %d override to %s, %s",
100 gsi, t ? "edge":"level", p ? "low":"high");
101 triggering = t;
102 polarity = p;
103 }
95 } 104 }
96#endif
97 105
98 res->irq_resource[i].flags = IORESOURCE_IRQ; // Also clears _UNSET flag 106 res->irq_resource[i].flags = IORESOURCE_IRQ; // Also clears _UNSET flag
99 res->irq_resource[i].flags |= irq_flags(triggering, polarity); 107 res->irq_resource[i].flags |= irq_flags(triggering, polarity);