diff options
Diffstat (limited to 'drivers')
241 files changed, 6291 insertions, 6076 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index 8f451449abd3..b3138fbb46a4 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig | |||
@@ -168,4 +168,6 @@ source "drivers/fmc/Kconfig" | |||
168 | 168 | ||
169 | source "drivers/phy/Kconfig" | 169 | source "drivers/phy/Kconfig" |
170 | 170 | ||
171 | source "drivers/powercap/Kconfig" | ||
172 | |||
171 | endmenu | 173 | endmenu |
diff --git a/drivers/Makefile b/drivers/Makefile index 687da899cadb..3cc8214f9b26 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -154,3 +154,4 @@ obj-$(CONFIG_VME_BUS) += vme/ | |||
154 | obj-$(CONFIG_IPACK_BUS) += ipack/ | 154 | obj-$(CONFIG_IPACK_BUS) += ipack/ |
155 | obj-$(CONFIG_NTB) += ntb/ | 155 | obj-$(CONFIG_NTB) += ntb/ |
156 | obj-$(CONFIG_FMC) += fmc/ | 156 | obj-$(CONFIG_FMC) += fmc/ |
157 | obj-$(CONFIG_POWERCAP) += powercap/ | ||
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index e11faae81ed9..c95df0b8c880 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -56,23 +56,6 @@ config ACPI_PROCFS | |||
56 | 56 | ||
57 | Say N to delete /proc/acpi/ files that have moved to /sys/ | 57 | Say N to delete /proc/acpi/ files that have moved to /sys/ |
58 | 58 | ||
59 | config ACPI_PROCFS_POWER | ||
60 | bool "Deprecated power /proc/acpi directories" | ||
61 | depends on PROC_FS | ||
62 | help | ||
63 | For backwards compatibility, this option allows | ||
64 | deprecated power /proc/acpi/ directories to exist, even when | ||
65 | they have been replaced by functions in /sys. | ||
66 | The deprecated directories (and their replacements) include: | ||
67 | /proc/acpi/battery/* (/sys/class/power_supply/*) | ||
68 | /proc/acpi/ac_adapter/* (sys/class/power_supply/*) | ||
69 | This option has no effect on /proc/acpi/ directories | ||
70 | and functions, which do not yet exist in /sys | ||
71 | This option, together with the proc directories, will be | ||
72 | deleted in 2.6.39. | ||
73 | |||
74 | Say N to delete power /proc/acpi/ directories that have moved to /sys/ | ||
75 | |||
76 | config ACPI_EC_DEBUGFS | 59 | config ACPI_EC_DEBUGFS |
77 | tristate "EC read/write access through /sys/kernel/debug/ec" | 60 | tristate "EC read/write access through /sys/kernel/debug/ec" |
78 | default n | 61 | default n |
@@ -175,9 +158,10 @@ config ACPI_PROCESSOR | |||
175 | 158 | ||
176 | To compile this driver as a module, choose M here: | 159 | To compile this driver as a module, choose M here: |
177 | the module will be called processor. | 160 | the module will be called processor. |
161 | |||
178 | config ACPI_IPMI | 162 | config ACPI_IPMI |
179 | tristate "IPMI" | 163 | tristate "IPMI" |
180 | depends on IPMI_SI && IPMI_HANDLER | 164 | depends on IPMI_SI |
181 | default n | 165 | default n |
182 | help | 166 | help |
183 | This driver enables the ACPI to access the BMC controller. And it | 167 | This driver enables the ACPI to access the BMC controller. And it |
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index bce34afadcd0..0331f91d56e6 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile | |||
@@ -47,7 +47,6 @@ acpi-y += sysfs.o | |||
47 | acpi-$(CONFIG_X86) += acpi_cmos_rtc.o | 47 | acpi-$(CONFIG_X86) += acpi_cmos_rtc.o |
48 | acpi-$(CONFIG_DEBUG_FS) += debugfs.o | 48 | acpi-$(CONFIG_DEBUG_FS) += debugfs.o |
49 | acpi-$(CONFIG_ACPI_NUMA) += numa.o | 49 | acpi-$(CONFIG_ACPI_NUMA) += numa.o |
50 | acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o | ||
51 | ifdef CONFIG_ACPI_VIDEO | 50 | ifdef CONFIG_ACPI_VIDEO |
52 | acpi-y += video_detect.o | 51 | acpi-y += video_detect.o |
53 | endif | 52 | endif |
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index f37beaa32750..b9f0d5f4bba5 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c | |||
@@ -30,10 +30,7 @@ | |||
30 | #include <linux/types.h> | 30 | #include <linux/types.h> |
31 | #include <linux/dmi.h> | 31 | #include <linux/dmi.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | #ifdef CONFIG_ACPI_PROCFS_POWER | 33 | #include <linux/platform_device.h> |
34 | #include <linux/proc_fs.h> | ||
35 | #include <linux/seq_file.h> | ||
36 | #endif | ||
37 | #include <linux/power_supply.h> | 34 | #include <linux/power_supply.h> |
38 | #include <acpi/acpi_bus.h> | 35 | #include <acpi/acpi_bus.h> |
39 | #include <acpi/acpi_drivers.h> | 36 | #include <acpi/acpi_drivers.h> |
@@ -55,75 +52,30 @@ MODULE_AUTHOR("Paul Diefenbaugh"); | |||
55 | MODULE_DESCRIPTION("ACPI AC Adapter Driver"); | 52 | MODULE_DESCRIPTION("ACPI AC Adapter Driver"); |
56 | MODULE_LICENSE("GPL"); | 53 | MODULE_LICENSE("GPL"); |
57 | 54 | ||
58 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
59 | extern struct proc_dir_entry *acpi_lock_ac_dir(void); | ||
60 | extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir); | ||
61 | static int acpi_ac_open_fs(struct inode *inode, struct file *file); | ||
62 | #endif | ||
63 | |||
64 | static int acpi_ac_add(struct acpi_device *device); | ||
65 | static int acpi_ac_remove(struct acpi_device *device); | ||
66 | static void acpi_ac_notify(struct acpi_device *device, u32 event); | ||
67 | |||
68 | static const struct acpi_device_id ac_device_ids[] = { | ||
69 | {"ACPI0003", 0}, | ||
70 | {"", 0}, | ||
71 | }; | ||
72 | MODULE_DEVICE_TABLE(acpi, ac_device_ids); | ||
73 | |||
74 | #ifdef CONFIG_PM_SLEEP | ||
75 | static int acpi_ac_resume(struct device *dev); | ||
76 | #endif | ||
77 | static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); | ||
78 | |||
79 | static int ac_sleep_before_get_state_ms; | 55 | static int ac_sleep_before_get_state_ms; |
80 | 56 | ||
81 | static struct acpi_driver acpi_ac_driver = { | ||
82 | .name = "ac", | ||
83 | .class = ACPI_AC_CLASS, | ||
84 | .ids = ac_device_ids, | ||
85 | .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, | ||
86 | .ops = { | ||
87 | .add = acpi_ac_add, | ||
88 | .remove = acpi_ac_remove, | ||
89 | .notify = acpi_ac_notify, | ||
90 | }, | ||
91 | .drv.pm = &acpi_ac_pm, | ||
92 | }; | ||
93 | |||
94 | struct acpi_ac { | 57 | struct acpi_ac { |
95 | struct power_supply charger; | 58 | struct power_supply charger; |
96 | struct acpi_device * device; | 59 | struct acpi_device *adev; |
60 | struct platform_device *pdev; | ||
97 | unsigned long long state; | 61 | unsigned long long state; |
98 | }; | 62 | }; |
99 | 63 | ||
100 | #define to_acpi_ac(x) container_of(x, struct acpi_ac, charger) | 64 | #define to_acpi_ac(x) container_of(x, struct acpi_ac, charger) |
101 | 65 | ||
102 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
103 | static const struct file_operations acpi_ac_fops = { | ||
104 | .owner = THIS_MODULE, | ||
105 | .open = acpi_ac_open_fs, | ||
106 | .read = seq_read, | ||
107 | .llseek = seq_lseek, | ||
108 | .release = single_release, | ||
109 | }; | ||
110 | #endif | ||
111 | |||
112 | /* -------------------------------------------------------------------------- | 66 | /* -------------------------------------------------------------------------- |
113 | AC Adapter Management | 67 | AC Adapter Management |
114 | -------------------------------------------------------------------------- */ | 68 | -------------------------------------------------------------------------- */ |
115 | 69 | ||
116 | static int acpi_ac_get_state(struct acpi_ac *ac) | 70 | static int acpi_ac_get_state(struct acpi_ac *ac) |
117 | { | 71 | { |
118 | acpi_status status = AE_OK; | 72 | acpi_status status; |
119 | |||
120 | |||
121 | if (!ac) | ||
122 | return -EINVAL; | ||
123 | 73 | ||
124 | status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL, &ac->state); | 74 | status = acpi_evaluate_integer(ac->adev->handle, "_PSR", NULL, |
75 | &ac->state); | ||
125 | if (ACPI_FAILURE(status)) { | 76 | if (ACPI_FAILURE(status)) { |
126 | ACPI_EXCEPTION((AE_INFO, status, "Error reading AC Adapter state")); | 77 | ACPI_EXCEPTION((AE_INFO, status, |
78 | "Error reading AC Adapter state")); | ||
127 | ac->state = ACPI_AC_STATUS_UNKNOWN; | 79 | ac->state = ACPI_AC_STATUS_UNKNOWN; |
128 | return -ENODEV; | 80 | return -ENODEV; |
129 | } | 81 | } |
@@ -160,91 +112,13 @@ static enum power_supply_property ac_props[] = { | |||
160 | POWER_SUPPLY_PROP_ONLINE, | 112 | POWER_SUPPLY_PROP_ONLINE, |
161 | }; | 113 | }; |
162 | 114 | ||
163 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
164 | /* -------------------------------------------------------------------------- | ||
165 | FS Interface (/proc) | ||
166 | -------------------------------------------------------------------------- */ | ||
167 | |||
168 | static struct proc_dir_entry *acpi_ac_dir; | ||
169 | |||
170 | static int acpi_ac_seq_show(struct seq_file *seq, void *offset) | ||
171 | { | ||
172 | struct acpi_ac *ac = seq->private; | ||
173 | |||
174 | |||
175 | if (!ac) | ||
176 | return 0; | ||
177 | |||
178 | if (acpi_ac_get_state(ac)) { | ||
179 | seq_puts(seq, "ERROR: Unable to read AC Adapter state\n"); | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | seq_puts(seq, "state: "); | ||
184 | switch (ac->state) { | ||
185 | case ACPI_AC_STATUS_OFFLINE: | ||
186 | seq_puts(seq, "off-line\n"); | ||
187 | break; | ||
188 | case ACPI_AC_STATUS_ONLINE: | ||
189 | seq_puts(seq, "on-line\n"); | ||
190 | break; | ||
191 | default: | ||
192 | seq_puts(seq, "unknown\n"); | ||
193 | break; | ||
194 | } | ||
195 | |||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | static int acpi_ac_open_fs(struct inode *inode, struct file *file) | ||
200 | { | ||
201 | return single_open(file, acpi_ac_seq_show, PDE_DATA(inode)); | ||
202 | } | ||
203 | |||
204 | static int acpi_ac_add_fs(struct acpi_device *device) | ||
205 | { | ||
206 | struct proc_dir_entry *entry = NULL; | ||
207 | |||
208 | printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded," | ||
209 | " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); | ||
210 | if (!acpi_device_dir(device)) { | ||
211 | acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), | ||
212 | acpi_ac_dir); | ||
213 | if (!acpi_device_dir(device)) | ||
214 | return -ENODEV; | ||
215 | } | ||
216 | |||
217 | /* 'state' [R] */ | ||
218 | entry = proc_create_data(ACPI_AC_FILE_STATE, | ||
219 | S_IRUGO, acpi_device_dir(device), | ||
220 | &acpi_ac_fops, acpi_driver_data(device)); | ||
221 | if (!entry) | ||
222 | return -ENODEV; | ||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | static int acpi_ac_remove_fs(struct acpi_device *device) | ||
227 | { | ||
228 | |||
229 | if (acpi_device_dir(device)) { | ||
230 | remove_proc_entry(ACPI_AC_FILE_STATE, acpi_device_dir(device)); | ||
231 | |||
232 | remove_proc_entry(acpi_device_bid(device), acpi_ac_dir); | ||
233 | acpi_device_dir(device) = NULL; | ||
234 | } | ||
235 | |||
236 | return 0; | ||
237 | } | ||
238 | #endif | ||
239 | |||
240 | /* -------------------------------------------------------------------------- | 115 | /* -------------------------------------------------------------------------- |
241 | Driver Model | 116 | Driver Model |
242 | -------------------------------------------------------------------------- */ | 117 | -------------------------------------------------------------------------- */ |
243 | 118 | ||
244 | static void acpi_ac_notify(struct acpi_device *device, u32 event) | 119 | static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) |
245 | { | 120 | { |
246 | struct acpi_ac *ac = acpi_driver_data(device); | 121 | struct acpi_ac *ac = data; |
247 | |||
248 | 122 | ||
249 | if (!ac) | 123 | if (!ac) |
250 | return; | 124 | return; |
@@ -267,10 +141,10 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event) | |||
267 | msleep(ac_sleep_before_get_state_ms); | 141 | msleep(ac_sleep_before_get_state_ms); |
268 | 142 | ||
269 | acpi_ac_get_state(ac); | 143 | acpi_ac_get_state(ac); |
270 | acpi_bus_generate_netlink_event(device->pnp.device_class, | 144 | acpi_bus_generate_netlink_event(ac->adev->pnp.device_class, |
271 | dev_name(&device->dev), event, | 145 | dev_name(&ac->pdev->dev), |
272 | (u32) ac->state); | 146 | event, (u32) ac->state); |
273 | acpi_notifier_call_chain(device, event, (u32) ac->state); | 147 | acpi_notifier_call_chain(ac->adev, event, (u32) ac->state); |
274 | kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); | 148 | kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); |
275 | } | 149 | } |
276 | 150 | ||
@@ -295,53 +169,55 @@ static struct dmi_system_id ac_dmi_table[] = { | |||
295 | {}, | 169 | {}, |
296 | }; | 170 | }; |
297 | 171 | ||
298 | static int acpi_ac_add(struct acpi_device *device) | 172 | static int acpi_ac_probe(struct platform_device *pdev) |
299 | { | 173 | { |
300 | int result = 0; | 174 | int result = 0; |
301 | struct acpi_ac *ac = NULL; | 175 | struct acpi_ac *ac = NULL; |
176 | struct acpi_device *adev; | ||
302 | 177 | ||
303 | 178 | if (!pdev) | |
304 | if (!device) | ||
305 | return -EINVAL; | 179 | return -EINVAL; |
306 | 180 | ||
181 | result = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev); | ||
182 | if (result) | ||
183 | return -ENODEV; | ||
184 | |||
307 | ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL); | 185 | ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL); |
308 | if (!ac) | 186 | if (!ac) |
309 | return -ENOMEM; | 187 | return -ENOMEM; |
310 | 188 | ||
311 | ac->device = device; | 189 | strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME); |
312 | strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME); | 190 | strcpy(acpi_device_class(adev), ACPI_AC_CLASS); |
313 | strcpy(acpi_device_class(device), ACPI_AC_CLASS); | 191 | ac->adev = adev; |
314 | device->driver_data = ac; | 192 | ac->pdev = pdev; |
193 | platform_set_drvdata(pdev, ac); | ||
315 | 194 | ||
316 | result = acpi_ac_get_state(ac); | 195 | result = acpi_ac_get_state(ac); |
317 | if (result) | 196 | if (result) |
318 | goto end; | 197 | goto end; |
319 | 198 | ||
320 | #ifdef CONFIG_ACPI_PROCFS_POWER | 199 | ac->charger.name = acpi_device_bid(adev); |
321 | result = acpi_ac_add_fs(device); | ||
322 | #endif | ||
323 | if (result) | ||
324 | goto end; | ||
325 | ac->charger.name = acpi_device_bid(device); | ||
326 | ac->charger.type = POWER_SUPPLY_TYPE_MAINS; | 200 | ac->charger.type = POWER_SUPPLY_TYPE_MAINS; |
327 | ac->charger.properties = ac_props; | 201 | ac->charger.properties = ac_props; |
328 | ac->charger.num_properties = ARRAY_SIZE(ac_props); | 202 | ac->charger.num_properties = ARRAY_SIZE(ac_props); |
329 | ac->charger.get_property = get_ac_property; | 203 | ac->charger.get_property = get_ac_property; |
330 | result = power_supply_register(&ac->device->dev, &ac->charger); | 204 | result = power_supply_register(&pdev->dev, &ac->charger); |
331 | if (result) | 205 | if (result) |
332 | goto end; | 206 | goto end; |
333 | 207 | ||
208 | result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev), | ||
209 | ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler, ac); | ||
210 | if (result) { | ||
211 | power_supply_unregister(&ac->charger); | ||
212 | goto end; | ||
213 | } | ||
334 | printk(KERN_INFO PREFIX "%s [%s] (%s)\n", | 214 | printk(KERN_INFO PREFIX "%s [%s] (%s)\n", |
335 | acpi_device_name(device), acpi_device_bid(device), | 215 | acpi_device_name(adev), acpi_device_bid(adev), |
336 | ac->state ? "on-line" : "off-line"); | 216 | ac->state ? "on-line" : "off-line"); |
337 | 217 | ||
338 | end: | 218 | end: |
339 | if (result) { | 219 | if (result) |
340 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
341 | acpi_ac_remove_fs(device); | ||
342 | #endif | ||
343 | kfree(ac); | 220 | kfree(ac); |
344 | } | ||
345 | 221 | ||
346 | dmi_check_system(ac_dmi_table); | 222 | dmi_check_system(ac_dmi_table); |
347 | return result; | 223 | return result; |
@@ -356,7 +232,7 @@ static int acpi_ac_resume(struct device *dev) | |||
356 | if (!dev) | 232 | if (!dev) |
357 | return -EINVAL; | 233 | return -EINVAL; |
358 | 234 | ||
359 | ac = acpi_driver_data(to_acpi_device(dev)); | 235 | ac = platform_get_drvdata(to_platform_device(dev)); |
360 | if (!ac) | 236 | if (!ac) |
361 | return -EINVAL; | 237 | return -EINVAL; |
362 | 238 | ||
@@ -368,28 +244,44 @@ static int acpi_ac_resume(struct device *dev) | |||
368 | return 0; | 244 | return 0; |
369 | } | 245 | } |
370 | #endif | 246 | #endif |
247 | static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume); | ||
371 | 248 | ||
372 | static int acpi_ac_remove(struct acpi_device *device) | 249 | static int acpi_ac_remove(struct platform_device *pdev) |
373 | { | 250 | { |
374 | struct acpi_ac *ac = NULL; | 251 | struct acpi_ac *ac; |
375 | |||
376 | 252 | ||
377 | if (!device || !acpi_driver_data(device)) | 253 | if (!pdev) |
378 | return -EINVAL; | 254 | return -EINVAL; |
379 | 255 | ||
380 | ac = acpi_driver_data(device); | 256 | acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), |
257 | ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler); | ||
381 | 258 | ||
259 | ac = platform_get_drvdata(pdev); | ||
382 | if (ac->charger.dev) | 260 | if (ac->charger.dev) |
383 | power_supply_unregister(&ac->charger); | 261 | power_supply_unregister(&ac->charger); |
384 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
385 | acpi_ac_remove_fs(device); | ||
386 | #endif | ||
387 | 262 | ||
388 | kfree(ac); | 263 | kfree(ac); |
389 | 264 | ||
390 | return 0; | 265 | return 0; |
391 | } | 266 | } |
392 | 267 | ||
268 | static const struct acpi_device_id acpi_ac_match[] = { | ||
269 | { "ACPI0003", 0 }, | ||
270 | { } | ||
271 | }; | ||
272 | MODULE_DEVICE_TABLE(acpi, acpi_ac_match); | ||
273 | |||
274 | static struct platform_driver acpi_ac_driver = { | ||
275 | .probe = acpi_ac_probe, | ||
276 | .remove = acpi_ac_remove, | ||
277 | .driver = { | ||
278 | .name = "acpi-ac", | ||
279 | .owner = THIS_MODULE, | ||
280 | .pm = &acpi_ac_pm_ops, | ||
281 | .acpi_match_table = ACPI_PTR(acpi_ac_match), | ||
282 | }, | ||
283 | }; | ||
284 | |||
393 | static int __init acpi_ac_init(void) | 285 | static int __init acpi_ac_init(void) |
394 | { | 286 | { |
395 | int result; | 287 | int result; |
@@ -397,34 +289,16 @@ static int __init acpi_ac_init(void) | |||
397 | if (acpi_disabled) | 289 | if (acpi_disabled) |
398 | return -ENODEV; | 290 | return -ENODEV; |
399 | 291 | ||
400 | #ifdef CONFIG_ACPI_PROCFS_POWER | 292 | result = platform_driver_register(&acpi_ac_driver); |
401 | acpi_ac_dir = acpi_lock_ac_dir(); | 293 | if (result < 0) |
402 | if (!acpi_ac_dir) | ||
403 | return -ENODEV; | 294 | return -ENODEV; |
404 | #endif | ||
405 | |||
406 | result = acpi_bus_register_driver(&acpi_ac_driver); | ||
407 | if (result < 0) { | ||
408 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
409 | acpi_unlock_ac_dir(acpi_ac_dir); | ||
410 | #endif | ||
411 | return -ENODEV; | ||
412 | } | ||
413 | 295 | ||
414 | return 0; | 296 | return 0; |
415 | } | 297 | } |
416 | 298 | ||
417 | static void __exit acpi_ac_exit(void) | 299 | static void __exit acpi_ac_exit(void) |
418 | { | 300 | { |
419 | 301 | platform_driver_unregister(&acpi_ac_driver); | |
420 | acpi_bus_unregister_driver(&acpi_ac_driver); | ||
421 | |||
422 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
423 | acpi_unlock_ac_dir(acpi_ac_dir); | ||
424 | #endif | ||
425 | |||
426 | return; | ||
427 | } | 302 | } |
428 | |||
429 | module_init(acpi_ac_init); | 303 | module_init(acpi_ac_init); |
430 | module_exit(acpi_ac_exit); | 304 | module_exit(acpi_ac_exit); |
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c index a6977e12d574..ac0f52f6df2b 100644 --- a/drivers/acpi/acpi_ipmi.c +++ b/drivers/acpi/acpi_ipmi.c | |||
@@ -1,8 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * acpi_ipmi.c - ACPI IPMI opregion | 2 | * acpi_ipmi.c - ACPI IPMI opregion |
3 | * | 3 | * |
4 | * Copyright (C) 2010 Intel Corporation | 4 | * Copyright (C) 2010, 2013 Intel Corporation |
5 | * Copyright (C) 2010 Zhao Yakui <yakui.zhao@intel.com> | 5 | * Author: Zhao Yakui <yakui.zhao@intel.com> |
6 | * Lv Zheng <lv.zheng@intel.com> | ||
6 | * | 7 | * |
7 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 8 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
8 | * | 9 | * |
@@ -23,60 +24,58 @@ | |||
23 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 24 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
24 | */ | 25 | */ |
25 | 26 | ||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/init.h> | 28 | #include <linux/acpi.h> |
29 | #include <linux/types.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/proc_fs.h> | ||
32 | #include <linux/seq_file.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/list.h> | ||
35 | #include <linux/spinlock.h> | ||
36 | #include <linux/io.h> | ||
37 | #include <acpi/acpi_bus.h> | ||
38 | #include <acpi/acpi_drivers.h> | ||
39 | #include <linux/ipmi.h> | 29 | #include <linux/ipmi.h> |
40 | #include <linux/device.h> | ||
41 | #include <linux/pnp.h> | ||
42 | #include <linux/spinlock.h> | 30 | #include <linux/spinlock.h> |
43 | 31 | ||
44 | MODULE_AUTHOR("Zhao Yakui"); | 32 | MODULE_AUTHOR("Zhao Yakui"); |
45 | MODULE_DESCRIPTION("ACPI IPMI Opregion driver"); | 33 | MODULE_DESCRIPTION("ACPI IPMI Opregion driver"); |
46 | MODULE_LICENSE("GPL"); | 34 | MODULE_LICENSE("GPL"); |
47 | 35 | ||
48 | #define IPMI_FLAGS_HANDLER_INSTALL 0 | ||
49 | |||
50 | #define ACPI_IPMI_OK 0 | 36 | #define ACPI_IPMI_OK 0 |
51 | #define ACPI_IPMI_TIMEOUT 0x10 | 37 | #define ACPI_IPMI_TIMEOUT 0x10 |
52 | #define ACPI_IPMI_UNKNOWN 0x07 | 38 | #define ACPI_IPMI_UNKNOWN 0x07 |
53 | /* the IPMI timeout is 5s */ | 39 | /* the IPMI timeout is 5s */ |
54 | #define IPMI_TIMEOUT (5 * HZ) | 40 | #define IPMI_TIMEOUT (5000) |
41 | #define ACPI_IPMI_MAX_MSG_LENGTH 64 | ||
55 | 42 | ||
56 | struct acpi_ipmi_device { | 43 | struct acpi_ipmi_device { |
57 | /* the device list attached to driver_data.ipmi_devices */ | 44 | /* the device list attached to driver_data.ipmi_devices */ |
58 | struct list_head head; | 45 | struct list_head head; |
46 | |||
59 | /* the IPMI request message list */ | 47 | /* the IPMI request message list */ |
60 | struct list_head tx_msg_list; | 48 | struct list_head tx_msg_list; |
61 | spinlock_t tx_msg_lock; | 49 | |
50 | spinlock_t tx_msg_lock; | ||
62 | acpi_handle handle; | 51 | acpi_handle handle; |
63 | struct pnp_dev *pnp_dev; | 52 | struct device *dev; |
64 | ipmi_user_t user_interface; | 53 | ipmi_user_t user_interface; |
65 | int ipmi_ifnum; /* IPMI interface number */ | 54 | int ipmi_ifnum; /* IPMI interface number */ |
66 | long curr_msgid; | 55 | long curr_msgid; |
67 | unsigned long flags; | 56 | bool dead; |
68 | struct ipmi_smi_info smi_data; | 57 | struct kref kref; |
69 | }; | 58 | }; |
70 | 59 | ||
71 | struct ipmi_driver_data { | 60 | struct ipmi_driver_data { |
72 | struct list_head ipmi_devices; | 61 | struct list_head ipmi_devices; |
73 | struct ipmi_smi_watcher bmc_events; | 62 | struct ipmi_smi_watcher bmc_events; |
74 | struct ipmi_user_hndl ipmi_hndlrs; | 63 | struct ipmi_user_hndl ipmi_hndlrs; |
75 | struct mutex ipmi_lock; | 64 | struct mutex ipmi_lock; |
65 | |||
66 | /* | ||
67 | * NOTE: IPMI System Interface Selection | ||
68 | * There is no system interface specified by the IPMI operation | ||
69 | * region access. We try to select one system interface with ACPI | ||
70 | * handle set. IPMI messages passed from the ACPI codes are sent | ||
71 | * to this selected global IPMI system interface. | ||
72 | */ | ||
73 | struct acpi_ipmi_device *selected_smi; | ||
76 | }; | 74 | }; |
77 | 75 | ||
78 | struct acpi_ipmi_msg { | 76 | struct acpi_ipmi_msg { |
79 | struct list_head head; | 77 | struct list_head head; |
78 | |||
80 | /* | 79 | /* |
81 | * General speaking the addr type should be SI_ADDR_TYPE. And | 80 | * General speaking the addr type should be SI_ADDR_TYPE. And |
82 | * the addr channel should be BMC. | 81 | * the addr channel should be BMC. |
@@ -86,30 +85,31 @@ struct acpi_ipmi_msg { | |||
86 | */ | 85 | */ |
87 | struct ipmi_addr addr; | 86 | struct ipmi_addr addr; |
88 | long tx_msgid; | 87 | long tx_msgid; |
88 | |||
89 | /* it is used to track whether the IPMI message is finished */ | 89 | /* it is used to track whether the IPMI message is finished */ |
90 | struct completion tx_complete; | 90 | struct completion tx_complete; |
91 | |||
91 | struct kernel_ipmi_msg tx_message; | 92 | struct kernel_ipmi_msg tx_message; |
92 | int msg_done; | 93 | int msg_done; |
93 | /* tx data . And copy it from ACPI object buffer */ | 94 | |
94 | u8 tx_data[64]; | 95 | /* tx/rx data . And copy it from/to ACPI object buffer */ |
95 | int tx_len; | 96 | u8 data[ACPI_IPMI_MAX_MSG_LENGTH]; |
96 | u8 rx_data[64]; | 97 | u8 rx_len; |
97 | int rx_len; | 98 | |
98 | struct acpi_ipmi_device *device; | 99 | struct acpi_ipmi_device *device; |
100 | struct kref kref; | ||
99 | }; | 101 | }; |
100 | 102 | ||
101 | /* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */ | 103 | /* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */ |
102 | struct acpi_ipmi_buffer { | 104 | struct acpi_ipmi_buffer { |
103 | u8 status; | 105 | u8 status; |
104 | u8 length; | 106 | u8 length; |
105 | u8 data[64]; | 107 | u8 data[ACPI_IPMI_MAX_MSG_LENGTH]; |
106 | }; | 108 | }; |
107 | 109 | ||
108 | static void ipmi_register_bmc(int iface, struct device *dev); | 110 | static void ipmi_register_bmc(int iface, struct device *dev); |
109 | static void ipmi_bmc_gone(int iface); | 111 | static void ipmi_bmc_gone(int iface); |
110 | static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data); | 112 | static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data); |
111 | static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device); | ||
112 | static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device); | ||
113 | 113 | ||
114 | static struct ipmi_driver_data driver_data = { | 114 | static struct ipmi_driver_data driver_data = { |
115 | .ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices), | 115 | .ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices), |
@@ -121,29 +121,142 @@ static struct ipmi_driver_data driver_data = { | |||
121 | .ipmi_hndlrs = { | 121 | .ipmi_hndlrs = { |
122 | .ipmi_recv_hndl = ipmi_msg_handler, | 122 | .ipmi_recv_hndl = ipmi_msg_handler, |
123 | }, | 123 | }, |
124 | .ipmi_lock = __MUTEX_INITIALIZER(driver_data.ipmi_lock) | ||
124 | }; | 125 | }; |
125 | 126 | ||
126 | static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi) | 127 | static struct acpi_ipmi_device * |
128 | ipmi_dev_alloc(int iface, struct device *dev, acpi_handle handle) | ||
129 | { | ||
130 | struct acpi_ipmi_device *ipmi_device; | ||
131 | int err; | ||
132 | ipmi_user_t user; | ||
133 | |||
134 | ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL); | ||
135 | if (!ipmi_device) | ||
136 | return NULL; | ||
137 | |||
138 | kref_init(&ipmi_device->kref); | ||
139 | INIT_LIST_HEAD(&ipmi_device->head); | ||
140 | INIT_LIST_HEAD(&ipmi_device->tx_msg_list); | ||
141 | spin_lock_init(&ipmi_device->tx_msg_lock); | ||
142 | ipmi_device->handle = handle; | ||
143 | ipmi_device->dev = get_device(dev); | ||
144 | ipmi_device->ipmi_ifnum = iface; | ||
145 | |||
146 | err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs, | ||
147 | ipmi_device, &user); | ||
148 | if (err) { | ||
149 | put_device(dev); | ||
150 | kfree(ipmi_device); | ||
151 | return NULL; | ||
152 | } | ||
153 | ipmi_device->user_interface = user; | ||
154 | |||
155 | return ipmi_device; | ||
156 | } | ||
157 | |||
158 | static void ipmi_dev_release(struct acpi_ipmi_device *ipmi_device) | ||
159 | { | ||
160 | ipmi_destroy_user(ipmi_device->user_interface); | ||
161 | put_device(ipmi_device->dev); | ||
162 | kfree(ipmi_device); | ||
163 | } | ||
164 | |||
165 | static void ipmi_dev_release_kref(struct kref *kref) | ||
166 | { | ||
167 | struct acpi_ipmi_device *ipmi = | ||
168 | container_of(kref, struct acpi_ipmi_device, kref); | ||
169 | |||
170 | ipmi_dev_release(ipmi); | ||
171 | } | ||
172 | |||
173 | static void __ipmi_dev_kill(struct acpi_ipmi_device *ipmi_device) | ||
174 | { | ||
175 | list_del(&ipmi_device->head); | ||
176 | if (driver_data.selected_smi == ipmi_device) | ||
177 | driver_data.selected_smi = NULL; | ||
178 | |||
179 | /* | ||
180 | * Always setting dead flag after deleting from the list or | ||
181 | * list_for_each_entry() codes must get changed. | ||
182 | */ | ||
183 | ipmi_device->dead = true; | ||
184 | } | ||
185 | |||
186 | static struct acpi_ipmi_device *acpi_ipmi_dev_get(void) | ||
187 | { | ||
188 | struct acpi_ipmi_device *ipmi_device = NULL; | ||
189 | |||
190 | mutex_lock(&driver_data.ipmi_lock); | ||
191 | if (driver_data.selected_smi) { | ||
192 | ipmi_device = driver_data.selected_smi; | ||
193 | kref_get(&ipmi_device->kref); | ||
194 | } | ||
195 | mutex_unlock(&driver_data.ipmi_lock); | ||
196 | |||
197 | return ipmi_device; | ||
198 | } | ||
199 | |||
200 | static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device) | ||
201 | { | ||
202 | kref_put(&ipmi_device->kref, ipmi_dev_release_kref); | ||
203 | } | ||
204 | |||
205 | static struct acpi_ipmi_msg *ipmi_msg_alloc(void) | ||
127 | { | 206 | { |
207 | struct acpi_ipmi_device *ipmi; | ||
128 | struct acpi_ipmi_msg *ipmi_msg; | 208 | struct acpi_ipmi_msg *ipmi_msg; |
129 | struct pnp_dev *pnp_dev = ipmi->pnp_dev; | 209 | |
210 | ipmi = acpi_ipmi_dev_get(); | ||
211 | if (!ipmi) | ||
212 | return NULL; | ||
130 | 213 | ||
131 | ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL); | 214 | ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL); |
132 | if (!ipmi_msg) { | 215 | if (!ipmi_msg) { |
133 | dev_warn(&pnp_dev->dev, "Can't allocate memory for ipmi_msg\n"); | 216 | acpi_ipmi_dev_put(ipmi); |
134 | return NULL; | 217 | return NULL; |
135 | } | 218 | } |
219 | |||
220 | kref_init(&ipmi_msg->kref); | ||
136 | init_completion(&ipmi_msg->tx_complete); | 221 | init_completion(&ipmi_msg->tx_complete); |
137 | INIT_LIST_HEAD(&ipmi_msg->head); | 222 | INIT_LIST_HEAD(&ipmi_msg->head); |
138 | ipmi_msg->device = ipmi; | 223 | ipmi_msg->device = ipmi; |
224 | ipmi_msg->msg_done = ACPI_IPMI_UNKNOWN; | ||
225 | |||
139 | return ipmi_msg; | 226 | return ipmi_msg; |
140 | } | 227 | } |
141 | 228 | ||
142 | #define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff) | 229 | static void ipmi_msg_release(struct acpi_ipmi_msg *tx_msg) |
143 | #define IPMI_OP_RGN_CMD(offset) (offset & 0xff) | 230 | { |
144 | static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg, | 231 | acpi_ipmi_dev_put(tx_msg->device); |
145 | acpi_physical_address address, | 232 | kfree(tx_msg); |
146 | acpi_integer *value) | 233 | } |
234 | |||
235 | static void ipmi_msg_release_kref(struct kref *kref) | ||
236 | { | ||
237 | struct acpi_ipmi_msg *tx_msg = | ||
238 | container_of(kref, struct acpi_ipmi_msg, kref); | ||
239 | |||
240 | ipmi_msg_release(tx_msg); | ||
241 | } | ||
242 | |||
243 | static struct acpi_ipmi_msg *acpi_ipmi_msg_get(struct acpi_ipmi_msg *tx_msg) | ||
244 | { | ||
245 | kref_get(&tx_msg->kref); | ||
246 | |||
247 | return tx_msg; | ||
248 | } | ||
249 | |||
250 | static void acpi_ipmi_msg_put(struct acpi_ipmi_msg *tx_msg) | ||
251 | { | ||
252 | kref_put(&tx_msg->kref, ipmi_msg_release_kref); | ||
253 | } | ||
254 | |||
255 | #define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff) | ||
256 | #define IPMI_OP_RGN_CMD(offset) (offset & 0xff) | ||
257 | static int acpi_format_ipmi_request(struct acpi_ipmi_msg *tx_msg, | ||
258 | acpi_physical_address address, | ||
259 | acpi_integer *value) | ||
147 | { | 260 | { |
148 | struct kernel_ipmi_msg *msg; | 261 | struct kernel_ipmi_msg *msg; |
149 | struct acpi_ipmi_buffer *buffer; | 262 | struct acpi_ipmi_buffer *buffer; |
@@ -151,21 +264,31 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg, | |||
151 | unsigned long flags; | 264 | unsigned long flags; |
152 | 265 | ||
153 | msg = &tx_msg->tx_message; | 266 | msg = &tx_msg->tx_message; |
267 | |||
154 | /* | 268 | /* |
155 | * IPMI network function and command are encoded in the address | 269 | * IPMI network function and command are encoded in the address |
156 | * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3. | 270 | * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3. |
157 | */ | 271 | */ |
158 | msg->netfn = IPMI_OP_RGN_NETFN(address); | 272 | msg->netfn = IPMI_OP_RGN_NETFN(address); |
159 | msg->cmd = IPMI_OP_RGN_CMD(address); | 273 | msg->cmd = IPMI_OP_RGN_CMD(address); |
160 | msg->data = tx_msg->tx_data; | 274 | msg->data = tx_msg->data; |
275 | |||
161 | /* | 276 | /* |
162 | * value is the parameter passed by the IPMI opregion space handler. | 277 | * value is the parameter passed by the IPMI opregion space handler. |
163 | * It points to the IPMI request message buffer | 278 | * It points to the IPMI request message buffer |
164 | */ | 279 | */ |
165 | buffer = (struct acpi_ipmi_buffer *)value; | 280 | buffer = (struct acpi_ipmi_buffer *)value; |
281 | |||
166 | /* copy the tx message data */ | 282 | /* copy the tx message data */ |
283 | if (buffer->length > ACPI_IPMI_MAX_MSG_LENGTH) { | ||
284 | dev_WARN_ONCE(tx_msg->device->dev, true, | ||
285 | "Unexpected request (msg len %d).\n", | ||
286 | buffer->length); | ||
287 | return -EINVAL; | ||
288 | } | ||
167 | msg->data_len = buffer->length; | 289 | msg->data_len = buffer->length; |
168 | memcpy(tx_msg->tx_data, buffer->data, msg->data_len); | 290 | memcpy(tx_msg->data, buffer->data, msg->data_len); |
291 | |||
169 | /* | 292 | /* |
170 | * now the default type is SYSTEM_INTERFACE and channel type is BMC. | 293 | * now the default type is SYSTEM_INTERFACE and channel type is BMC. |
171 | * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE, | 294 | * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE, |
@@ -179,14 +302,17 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg, | |||
179 | 302 | ||
180 | /* Get the msgid */ | 303 | /* Get the msgid */ |
181 | device = tx_msg->device; | 304 | device = tx_msg->device; |
305 | |||
182 | spin_lock_irqsave(&device->tx_msg_lock, flags); | 306 | spin_lock_irqsave(&device->tx_msg_lock, flags); |
183 | device->curr_msgid++; | 307 | device->curr_msgid++; |
184 | tx_msg->tx_msgid = device->curr_msgid; | 308 | tx_msg->tx_msgid = device->curr_msgid; |
185 | spin_unlock_irqrestore(&device->tx_msg_lock, flags); | 309 | spin_unlock_irqrestore(&device->tx_msg_lock, flags); |
310 | |||
311 | return 0; | ||
186 | } | 312 | } |
187 | 313 | ||
188 | static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, | 314 | static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, |
189 | acpi_integer *value, int rem_time) | 315 | acpi_integer *value) |
190 | { | 316 | { |
191 | struct acpi_ipmi_buffer *buffer; | 317 | struct acpi_ipmi_buffer *buffer; |
192 | 318 | ||
@@ -195,110 +321,158 @@ static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, | |||
195 | * IPMI message returned by IPMI command. | 321 | * IPMI message returned by IPMI command. |
196 | */ | 322 | */ |
197 | buffer = (struct acpi_ipmi_buffer *)value; | 323 | buffer = (struct acpi_ipmi_buffer *)value; |
198 | if (!rem_time && !msg->msg_done) { | 324 | |
199 | buffer->status = ACPI_IPMI_TIMEOUT; | ||
200 | return; | ||
201 | } | ||
202 | /* | 325 | /* |
203 | * If the flag of msg_done is not set or the recv length is zero, it | 326 | * If the flag of msg_done is not set, it means that the IPMI command is |
204 | * means that the IPMI command is not executed correctly. | 327 | * not executed correctly. |
205 | * The status code will be ACPI_IPMI_UNKNOWN. | ||
206 | */ | 328 | */ |
207 | if (!msg->msg_done || !msg->rx_len) { | 329 | buffer->status = msg->msg_done; |
208 | buffer->status = ACPI_IPMI_UNKNOWN; | 330 | if (msg->msg_done != ACPI_IPMI_OK) |
209 | return; | 331 | return; |
210 | } | 332 | |
211 | /* | 333 | /* |
212 | * If the IPMI response message is obtained correctly, the status code | 334 | * If the IPMI response message is obtained correctly, the status code |
213 | * will be ACPI_IPMI_OK | 335 | * will be ACPI_IPMI_OK |
214 | */ | 336 | */ |
215 | buffer->status = ACPI_IPMI_OK; | ||
216 | buffer->length = msg->rx_len; | 337 | buffer->length = msg->rx_len; |
217 | memcpy(buffer->data, msg->rx_data, msg->rx_len); | 338 | memcpy(buffer->data, msg->data, msg->rx_len); |
218 | } | 339 | } |
219 | 340 | ||
220 | static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi) | 341 | static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi) |
221 | { | 342 | { |
222 | struct acpi_ipmi_msg *tx_msg, *temp; | 343 | struct acpi_ipmi_msg *tx_msg; |
223 | int count = HZ / 10; | 344 | unsigned long flags; |
224 | struct pnp_dev *pnp_dev = ipmi->pnp_dev; | 345 | |
346 | /* | ||
347 | * NOTE: On-going ipmi_recv_msg | ||
348 | * ipmi_msg_handler() may still be invoked by ipmi_si after | ||
349 | * flushing. But it is safe to do a fast flushing on module_exit() | ||
350 | * without waiting for all ipmi_recv_msg(s) to complete from | ||
351 | * ipmi_msg_handler() as it is ensured by ipmi_si that all | ||
352 | * ipmi_recv_msg(s) are freed after invoking ipmi_destroy_user(). | ||
353 | */ | ||
354 | spin_lock_irqsave(&ipmi->tx_msg_lock, flags); | ||
355 | while (!list_empty(&ipmi->tx_msg_list)) { | ||
356 | tx_msg = list_first_entry(&ipmi->tx_msg_list, | ||
357 | struct acpi_ipmi_msg, | ||
358 | head); | ||
359 | list_del(&tx_msg->head); | ||
360 | spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags); | ||
225 | 361 | ||
226 | list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) { | ||
227 | /* wake up the sleep thread on the Tx msg */ | 362 | /* wake up the sleep thread on the Tx msg */ |
228 | complete(&tx_msg->tx_complete); | 363 | complete(&tx_msg->tx_complete); |
364 | acpi_ipmi_msg_put(tx_msg); | ||
365 | spin_lock_irqsave(&ipmi->tx_msg_lock, flags); | ||
229 | } | 366 | } |
367 | spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags); | ||
368 | } | ||
369 | |||
370 | static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi, | ||
371 | struct acpi_ipmi_msg *msg) | ||
372 | { | ||
373 | struct acpi_ipmi_msg *tx_msg, *temp; | ||
374 | bool msg_found = false; | ||
375 | unsigned long flags; | ||
230 | 376 | ||
231 | /* wait for about 100ms to flush the tx message list */ | 377 | spin_lock_irqsave(&ipmi->tx_msg_lock, flags); |
232 | while (count--) { | 378 | list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) { |
233 | if (list_empty(&ipmi->tx_msg_list)) | 379 | if (msg == tx_msg) { |
380 | msg_found = true; | ||
381 | list_del(&tx_msg->head); | ||
234 | break; | 382 | break; |
235 | schedule_timeout(1); | 383 | } |
236 | } | 384 | } |
237 | if (!list_empty(&ipmi->tx_msg_list)) | 385 | spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags); |
238 | dev_warn(&pnp_dev->dev, "tx msg list is not NULL\n"); | 386 | |
387 | if (msg_found) | ||
388 | acpi_ipmi_msg_put(tx_msg); | ||
239 | } | 389 | } |
240 | 390 | ||
241 | static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) | 391 | static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) |
242 | { | 392 | { |
243 | struct acpi_ipmi_device *ipmi_device = user_msg_data; | 393 | struct acpi_ipmi_device *ipmi_device = user_msg_data; |
244 | int msg_found = 0; | 394 | bool msg_found = false; |
245 | struct acpi_ipmi_msg *tx_msg; | 395 | struct acpi_ipmi_msg *tx_msg, *temp; |
246 | struct pnp_dev *pnp_dev = ipmi_device->pnp_dev; | 396 | struct device *dev = ipmi_device->dev; |
247 | unsigned long flags; | 397 | unsigned long flags; |
248 | 398 | ||
249 | if (msg->user != ipmi_device->user_interface) { | 399 | if (msg->user != ipmi_device->user_interface) { |
250 | dev_warn(&pnp_dev->dev, "Unexpected response is returned. " | 400 | dev_warn(dev, |
251 | "returned user %p, expected user %p\n", | 401 | "Unexpected response is returned. returned user %p, expected user %p\n", |
252 | msg->user, ipmi_device->user_interface); | 402 | msg->user, ipmi_device->user_interface); |
253 | ipmi_free_recv_msg(msg); | 403 | goto out_msg; |
254 | return; | ||
255 | } | 404 | } |
405 | |||
256 | spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); | 406 | spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); |
257 | list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) { | 407 | list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) { |
258 | if (msg->msgid == tx_msg->tx_msgid) { | 408 | if (msg->msgid == tx_msg->tx_msgid) { |
259 | msg_found = 1; | 409 | msg_found = true; |
410 | list_del(&tx_msg->head); | ||
260 | break; | 411 | break; |
261 | } | 412 | } |
262 | } | 413 | } |
263 | |||
264 | spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); | 414 | spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); |
415 | |||
265 | if (!msg_found) { | 416 | if (!msg_found) { |
266 | dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is " | 417 | dev_warn(dev, |
267 | "returned.\n", msg->msgid); | 418 | "Unexpected response (msg id %ld) is returned.\n", |
268 | ipmi_free_recv_msg(msg); | 419 | msg->msgid); |
269 | return; | 420 | goto out_msg; |
270 | } | 421 | } |
271 | 422 | ||
272 | if (msg->msg.data_len) { | 423 | /* copy the response data to Rx_data buffer */ |
273 | /* copy the response data to Rx_data buffer */ | 424 | if (msg->msg.data_len > ACPI_IPMI_MAX_MSG_LENGTH) { |
274 | memcpy(tx_msg->rx_data, msg->msg_data, msg->msg.data_len); | 425 | dev_WARN_ONCE(dev, true, |
275 | tx_msg->rx_len = msg->msg.data_len; | 426 | "Unexpected response (msg len %d).\n", |
276 | tx_msg->msg_done = 1; | 427 | msg->msg.data_len); |
428 | goto out_comp; | ||
277 | } | 429 | } |
430 | |||
431 | /* response msg is an error msg */ | ||
432 | msg->recv_type = IPMI_RESPONSE_RECV_TYPE; | ||
433 | if (msg->recv_type == IPMI_RESPONSE_RECV_TYPE && | ||
434 | msg->msg.data_len == 1) { | ||
435 | if (msg->msg.data[0] == IPMI_TIMEOUT_COMPLETION_CODE) { | ||
436 | dev_WARN_ONCE(dev, true, | ||
437 | "Unexpected response (timeout).\n"); | ||
438 | tx_msg->msg_done = ACPI_IPMI_TIMEOUT; | ||
439 | } | ||
440 | goto out_comp; | ||
441 | } | ||
442 | |||
443 | tx_msg->rx_len = msg->msg.data_len; | ||
444 | memcpy(tx_msg->data, msg->msg.data, tx_msg->rx_len); | ||
445 | tx_msg->msg_done = ACPI_IPMI_OK; | ||
446 | |||
447 | out_comp: | ||
278 | complete(&tx_msg->tx_complete); | 448 | complete(&tx_msg->tx_complete); |
449 | acpi_ipmi_msg_put(tx_msg); | ||
450 | out_msg: | ||
279 | ipmi_free_recv_msg(msg); | 451 | ipmi_free_recv_msg(msg); |
280 | }; | 452 | } |
281 | 453 | ||
282 | static void ipmi_register_bmc(int iface, struct device *dev) | 454 | static void ipmi_register_bmc(int iface, struct device *dev) |
283 | { | 455 | { |
284 | struct acpi_ipmi_device *ipmi_device, *temp; | 456 | struct acpi_ipmi_device *ipmi_device, *temp; |
285 | struct pnp_dev *pnp_dev; | ||
286 | ipmi_user_t user; | ||
287 | int err; | 457 | int err; |
288 | struct ipmi_smi_info smi_data; | 458 | struct ipmi_smi_info smi_data; |
289 | acpi_handle handle; | 459 | acpi_handle handle; |
290 | 460 | ||
291 | err = ipmi_get_smi_info(iface, &smi_data); | 461 | err = ipmi_get_smi_info(iface, &smi_data); |
292 | |||
293 | if (err) | 462 | if (err) |
294 | return; | 463 | return; |
295 | 464 | ||
296 | if (smi_data.addr_src != SI_ACPI) { | 465 | if (smi_data.addr_src != SI_ACPI) |
297 | put_device(smi_data.dev); | 466 | goto err_ref; |
298 | return; | ||
299 | } | ||
300 | |||
301 | handle = smi_data.addr_info.acpi_info.acpi_handle; | 467 | handle = smi_data.addr_info.acpi_info.acpi_handle; |
468 | if (!handle) | ||
469 | goto err_ref; | ||
470 | |||
471 | ipmi_device = ipmi_dev_alloc(iface, smi_data.dev, handle); | ||
472 | if (!ipmi_device) { | ||
473 | dev_warn(smi_data.dev, "Can't create IPMI user interface\n"); | ||
474 | goto err_ref; | ||
475 | } | ||
302 | 476 | ||
303 | mutex_lock(&driver_data.ipmi_lock); | 477 | mutex_lock(&driver_data.ipmi_lock); |
304 | list_for_each_entry(temp, &driver_data.ipmi_devices, head) { | 478 | list_for_each_entry(temp, &driver_data.ipmi_devices, head) { |
@@ -307,34 +481,20 @@ static void ipmi_register_bmc(int iface, struct device *dev) | |||
307 | * to the device list, don't add it again. | 481 | * to the device list, don't add it again. |
308 | */ | 482 | */ |
309 | if (temp->handle == handle) | 483 | if (temp->handle == handle) |
310 | goto out; | 484 | goto err_lock; |
311 | } | 485 | } |
312 | 486 | if (!driver_data.selected_smi) | |
313 | ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL); | 487 | driver_data.selected_smi = ipmi_device; |
314 | 488 | list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices); | |
315 | if (!ipmi_device) | ||
316 | goto out; | ||
317 | |||
318 | pnp_dev = to_pnp_dev(smi_data.dev); | ||
319 | ipmi_device->handle = handle; | ||
320 | ipmi_device->pnp_dev = pnp_dev; | ||
321 | |||
322 | err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs, | ||
323 | ipmi_device, &user); | ||
324 | if (err) { | ||
325 | dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n"); | ||
326 | kfree(ipmi_device); | ||
327 | goto out; | ||
328 | } | ||
329 | acpi_add_ipmi_device(ipmi_device); | ||
330 | ipmi_device->user_interface = user; | ||
331 | ipmi_device->ipmi_ifnum = iface; | ||
332 | mutex_unlock(&driver_data.ipmi_lock); | 489 | mutex_unlock(&driver_data.ipmi_lock); |
333 | memcpy(&ipmi_device->smi_data, &smi_data, sizeof(struct ipmi_smi_info)); | 490 | |
491 | put_device(smi_data.dev); | ||
334 | return; | 492 | return; |
335 | 493 | ||
336 | out: | 494 | err_lock: |
337 | mutex_unlock(&driver_data.ipmi_lock); | 495 | mutex_unlock(&driver_data.ipmi_lock); |
496 | ipmi_dev_release(ipmi_device); | ||
497 | err_ref: | ||
338 | put_device(smi_data.dev); | 498 | put_device(smi_data.dev); |
339 | return; | 499 | return; |
340 | } | 500 | } |
@@ -342,23 +502,29 @@ out: | |||
342 | static void ipmi_bmc_gone(int iface) | 502 | static void ipmi_bmc_gone(int iface) |
343 | { | 503 | { |
344 | struct acpi_ipmi_device *ipmi_device, *temp; | 504 | struct acpi_ipmi_device *ipmi_device, *temp; |
505 | bool dev_found = false; | ||
345 | 506 | ||
346 | mutex_lock(&driver_data.ipmi_lock); | 507 | mutex_lock(&driver_data.ipmi_lock); |
347 | list_for_each_entry_safe(ipmi_device, temp, | 508 | list_for_each_entry_safe(ipmi_device, temp, |
348 | &driver_data.ipmi_devices, head) { | 509 | &driver_data.ipmi_devices, head) { |
349 | if (ipmi_device->ipmi_ifnum != iface) | 510 | if (ipmi_device->ipmi_ifnum != iface) { |
350 | continue; | 511 | dev_found = true; |
351 | 512 | __ipmi_dev_kill(ipmi_device); | |
352 | acpi_remove_ipmi_device(ipmi_device); | 513 | break; |
353 | put_device(ipmi_device->smi_data.dev); | 514 | } |
354 | kfree(ipmi_device); | ||
355 | break; | ||
356 | } | 515 | } |
516 | if (!driver_data.selected_smi) | ||
517 | driver_data.selected_smi = list_first_entry_or_null( | ||
518 | &driver_data.ipmi_devices, | ||
519 | struct acpi_ipmi_device, head); | ||
357 | mutex_unlock(&driver_data.ipmi_lock); | 520 | mutex_unlock(&driver_data.ipmi_lock); |
521 | |||
522 | if (dev_found) { | ||
523 | ipmi_flush_tx_msg(ipmi_device); | ||
524 | acpi_ipmi_dev_put(ipmi_device); | ||
525 | } | ||
358 | } | 526 | } |
359 | /* -------------------------------------------------------------------------- | 527 | |
360 | * Address Space Management | ||
361 | * -------------------------------------------------------------------------- */ | ||
362 | /* | 528 | /* |
363 | * This is the IPMI opregion space handler. | 529 | * This is the IPMI opregion space handler. |
364 | * @function: indicates the read/write. In fact as the IPMI message is driven | 530 | * @function: indicates the read/write. In fact as the IPMI message is driven |
@@ -371,17 +537,17 @@ static void ipmi_bmc_gone(int iface) | |||
371 | * the response IPMI message returned by IPMI command. | 537 | * the response IPMI message returned by IPMI command. |
372 | * @handler_context: IPMI device context. | 538 | * @handler_context: IPMI device context. |
373 | */ | 539 | */ |
374 | |||
375 | static acpi_status | 540 | static acpi_status |
376 | acpi_ipmi_space_handler(u32 function, acpi_physical_address address, | 541 | acpi_ipmi_space_handler(u32 function, acpi_physical_address address, |
377 | u32 bits, acpi_integer *value, | 542 | u32 bits, acpi_integer *value, |
378 | void *handler_context, void *region_context) | 543 | void *handler_context, void *region_context) |
379 | { | 544 | { |
380 | struct acpi_ipmi_msg *tx_msg; | 545 | struct acpi_ipmi_msg *tx_msg; |
381 | struct acpi_ipmi_device *ipmi_device = handler_context; | 546 | struct acpi_ipmi_device *ipmi_device; |
382 | int err, rem_time; | 547 | int err; |
383 | acpi_status status; | 548 | acpi_status status; |
384 | unsigned long flags; | 549 | unsigned long flags; |
550 | |||
385 | /* | 551 | /* |
386 | * IPMI opregion message. | 552 | * IPMI opregion message. |
387 | * IPMI message is firstly written to the BMC and system software | 553 | * IPMI message is firstly written to the BMC and system software |
@@ -391,118 +557,75 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address, | |||
391 | if ((function & ACPI_IO_MASK) == ACPI_READ) | 557 | if ((function & ACPI_IO_MASK) == ACPI_READ) |
392 | return AE_TYPE; | 558 | return AE_TYPE; |
393 | 559 | ||
394 | if (!ipmi_device->user_interface) | 560 | tx_msg = ipmi_msg_alloc(); |
561 | if (!tx_msg) | ||
395 | return AE_NOT_EXIST; | 562 | return AE_NOT_EXIST; |
563 | ipmi_device = tx_msg->device; | ||
396 | 564 | ||
397 | tx_msg = acpi_alloc_ipmi_msg(ipmi_device); | 565 | if (acpi_format_ipmi_request(tx_msg, address, value) != 0) { |
398 | if (!tx_msg) | 566 | ipmi_msg_release(tx_msg); |
399 | return AE_NO_MEMORY; | 567 | return AE_TYPE; |
568 | } | ||
400 | 569 | ||
401 | acpi_format_ipmi_msg(tx_msg, address, value); | 570 | acpi_ipmi_msg_get(tx_msg); |
571 | mutex_lock(&driver_data.ipmi_lock); | ||
572 | /* Do not add a tx_msg that can not be flushed. */ | ||
573 | if (ipmi_device->dead) { | ||
574 | mutex_unlock(&driver_data.ipmi_lock); | ||
575 | ipmi_msg_release(tx_msg); | ||
576 | return AE_NOT_EXIST; | ||
577 | } | ||
402 | spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); | 578 | spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); |
403 | list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); | 579 | list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); |
404 | spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); | 580 | spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); |
581 | mutex_unlock(&driver_data.ipmi_lock); | ||
582 | |||
405 | err = ipmi_request_settime(ipmi_device->user_interface, | 583 | err = ipmi_request_settime(ipmi_device->user_interface, |
406 | &tx_msg->addr, | 584 | &tx_msg->addr, |
407 | tx_msg->tx_msgid, | 585 | tx_msg->tx_msgid, |
408 | &tx_msg->tx_message, | 586 | &tx_msg->tx_message, |
409 | NULL, 0, 0, 0); | 587 | NULL, 0, 0, IPMI_TIMEOUT); |
410 | if (err) { | 588 | if (err) { |
411 | status = AE_ERROR; | 589 | status = AE_ERROR; |
412 | goto end_label; | 590 | goto out_msg; |
413 | } | 591 | } |
414 | rem_time = wait_for_completion_timeout(&tx_msg->tx_complete, | 592 | wait_for_completion(&tx_msg->tx_complete); |
415 | IPMI_TIMEOUT); | 593 | |
416 | acpi_format_ipmi_response(tx_msg, value, rem_time); | 594 | acpi_format_ipmi_response(tx_msg, value); |
417 | status = AE_OK; | 595 | status = AE_OK; |
418 | 596 | ||
419 | end_label: | 597 | out_msg: |
420 | spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); | 598 | ipmi_cancel_tx_msg(ipmi_device, tx_msg); |
421 | list_del(&tx_msg->head); | 599 | acpi_ipmi_msg_put(tx_msg); |
422 | spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); | ||
423 | kfree(tx_msg); | ||
424 | return status; | 600 | return status; |
425 | } | 601 | } |
426 | 602 | ||
427 | static void ipmi_remove_space_handler(struct acpi_ipmi_device *ipmi) | 603 | static int __init acpi_ipmi_init(void) |
428 | { | ||
429 | if (!test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags)) | ||
430 | return; | ||
431 | |||
432 | acpi_remove_address_space_handler(ipmi->handle, | ||
433 | ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler); | ||
434 | |||
435 | clear_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags); | ||
436 | } | ||
437 | |||
438 | static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi) | ||
439 | { | 604 | { |
605 | int result; | ||
440 | acpi_status status; | 606 | acpi_status status; |
441 | 607 | ||
442 | if (test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags)) | 608 | if (acpi_disabled) |
443 | return 0; | 609 | return 0; |
444 | 610 | ||
445 | status = acpi_install_address_space_handler(ipmi->handle, | 611 | status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT, |
446 | ACPI_ADR_SPACE_IPMI, | 612 | ACPI_ADR_SPACE_IPMI, |
447 | &acpi_ipmi_space_handler, | 613 | &acpi_ipmi_space_handler, |
448 | NULL, ipmi); | 614 | NULL, NULL); |
449 | if (ACPI_FAILURE(status)) { | 615 | if (ACPI_FAILURE(status)) { |
450 | struct pnp_dev *pnp_dev = ipmi->pnp_dev; | 616 | pr_warn("Can't register IPMI opregion space handle\n"); |
451 | dev_warn(&pnp_dev->dev, "Can't register IPMI opregion space " | ||
452 | "handle\n"); | ||
453 | return -EINVAL; | 617 | return -EINVAL; |
454 | } | 618 | } |
455 | set_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags); | ||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device) | ||
460 | { | ||
461 | |||
462 | INIT_LIST_HEAD(&ipmi_device->head); | ||
463 | |||
464 | spin_lock_init(&ipmi_device->tx_msg_lock); | ||
465 | INIT_LIST_HEAD(&ipmi_device->tx_msg_list); | ||
466 | ipmi_install_space_handler(ipmi_device); | ||
467 | |||
468 | list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices); | ||
469 | } | ||
470 | |||
471 | static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device) | ||
472 | { | ||
473 | /* | ||
474 | * If the IPMI user interface is created, it should be | ||
475 | * destroyed. | ||
476 | */ | ||
477 | if (ipmi_device->user_interface) { | ||
478 | ipmi_destroy_user(ipmi_device->user_interface); | ||
479 | ipmi_device->user_interface = NULL; | ||
480 | } | ||
481 | /* flush the Tx_msg list */ | ||
482 | if (!list_empty(&ipmi_device->tx_msg_list)) | ||
483 | ipmi_flush_tx_msg(ipmi_device); | ||
484 | |||
485 | list_del(&ipmi_device->head); | ||
486 | ipmi_remove_space_handler(ipmi_device); | ||
487 | } | ||
488 | |||
489 | static int __init acpi_ipmi_init(void) | ||
490 | { | ||
491 | int result = 0; | ||
492 | |||
493 | if (acpi_disabled) | ||
494 | return result; | ||
495 | |||
496 | mutex_init(&driver_data.ipmi_lock); | ||
497 | |||
498 | result = ipmi_smi_watcher_register(&driver_data.bmc_events); | 619 | result = ipmi_smi_watcher_register(&driver_data.bmc_events); |
620 | if (result) | ||
621 | pr_err("Can't register IPMI system interface watcher\n"); | ||
499 | 622 | ||
500 | return result; | 623 | return result; |
501 | } | 624 | } |
502 | 625 | ||
503 | static void __exit acpi_ipmi_exit(void) | 626 | static void __exit acpi_ipmi_exit(void) |
504 | { | 627 | { |
505 | struct acpi_ipmi_device *ipmi_device, *temp; | 628 | struct acpi_ipmi_device *ipmi_device; |
506 | 629 | ||
507 | if (acpi_disabled) | 630 | if (acpi_disabled) |
508 | return; | 631 | return; |
@@ -516,13 +639,22 @@ static void __exit acpi_ipmi_exit(void) | |||
516 | * handler and free it. | 639 | * handler and free it. |
517 | */ | 640 | */ |
518 | mutex_lock(&driver_data.ipmi_lock); | 641 | mutex_lock(&driver_data.ipmi_lock); |
519 | list_for_each_entry_safe(ipmi_device, temp, | 642 | while (!list_empty(&driver_data.ipmi_devices)) { |
520 | &driver_data.ipmi_devices, head) { | 643 | ipmi_device = list_first_entry(&driver_data.ipmi_devices, |
521 | acpi_remove_ipmi_device(ipmi_device); | 644 | struct acpi_ipmi_device, |
522 | put_device(ipmi_device->smi_data.dev); | 645 | head); |
523 | kfree(ipmi_device); | 646 | __ipmi_dev_kill(ipmi_device); |
647 | mutex_unlock(&driver_data.ipmi_lock); | ||
648 | |||
649 | ipmi_flush_tx_msg(ipmi_device); | ||
650 | acpi_ipmi_dev_put(ipmi_device); | ||
651 | |||
652 | mutex_lock(&driver_data.ipmi_lock); | ||
524 | } | 653 | } |
525 | mutex_unlock(&driver_data.ipmi_lock); | 654 | mutex_unlock(&driver_data.ipmi_lock); |
655 | acpi_remove_address_space_handler(ACPI_ROOT_OBJECT, | ||
656 | ACPI_ADR_SPACE_IPMI, | ||
657 | &acpi_ipmi_space_handler); | ||
526 | } | 658 | } |
527 | 659 | ||
528 | module_init(acpi_ipmi_init); | 660 | module_init(acpi_ipmi_init); |
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index fb78bb9ad8f6..d3961014aad7 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
@@ -30,6 +30,7 @@ ACPI_MODULE_NAME("acpi_lpss"); | |||
30 | /* Offsets relative to LPSS_PRIVATE_OFFSET */ | 30 | /* Offsets relative to LPSS_PRIVATE_OFFSET */ |
31 | #define LPSS_GENERAL 0x08 | 31 | #define LPSS_GENERAL 0x08 |
32 | #define LPSS_GENERAL_LTR_MODE_SW BIT(2) | 32 | #define LPSS_GENERAL_LTR_MODE_SW BIT(2) |
33 | #define LPSS_GENERAL_UART_RTS_OVRD BIT(3) | ||
33 | #define LPSS_SW_LTR 0x10 | 34 | #define LPSS_SW_LTR 0x10 |
34 | #define LPSS_AUTO_LTR 0x14 | 35 | #define LPSS_AUTO_LTR 0x14 |
35 | #define LPSS_TX_INT 0x20 | 36 | #define LPSS_TX_INT 0x20 |
@@ -68,11 +69,16 @@ struct lpss_private_data { | |||
68 | 69 | ||
69 | static void lpss_uart_setup(struct lpss_private_data *pdata) | 70 | static void lpss_uart_setup(struct lpss_private_data *pdata) |
70 | { | 71 | { |
71 | unsigned int tx_int_offset = pdata->dev_desc->prv_offset + LPSS_TX_INT; | 72 | unsigned int offset; |
72 | u32 reg; | 73 | u32 reg; |
73 | 74 | ||
74 | reg = readl(pdata->mmio_base + tx_int_offset); | 75 | offset = pdata->dev_desc->prv_offset + LPSS_TX_INT; |
75 | writel(reg | LPSS_TX_INT_MASK, pdata->mmio_base + tx_int_offset); | 76 | reg = readl(pdata->mmio_base + offset); |
77 | writel(reg | LPSS_TX_INT_MASK, pdata->mmio_base + offset); | ||
78 | |||
79 | offset = pdata->dev_desc->prv_offset + LPSS_GENERAL; | ||
80 | reg = readl(pdata->mmio_base + offset); | ||
81 | writel(reg | LPSS_GENERAL_UART_RTS_OVRD, pdata->mmio_base + offset); | ||
76 | } | 82 | } |
77 | 83 | ||
78 | static struct lpss_device_desc lpt_dev_desc = { | 84 | static struct lpss_device_desc lpt_dev_desc = { |
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index 999adb5499c7..551dad712ffe 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c | |||
@@ -152,8 +152,9 @@ static int acpi_memory_check_device(struct acpi_memory_device *mem_device) | |||
152 | unsigned long long current_status; | 152 | unsigned long long current_status; |
153 | 153 | ||
154 | /* Get device present/absent information from the _STA */ | 154 | /* Get device present/absent information from the _STA */ |
155 | if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle, "_STA", | 155 | if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle, |
156 | NULL, ¤t_status))) | 156 | METHOD_NAME__STA, NULL, |
157 | ¤t_status))) | ||
157 | return -ENODEV; | 158 | return -ENODEV; |
158 | /* | 159 | /* |
159 | * Check for device status. Device should be | 160 | * Check for device status. Device should be |
@@ -281,7 +282,7 @@ static void acpi_memory_remove_memory(struct acpi_memory_device *mem_device) | |||
281 | if (!info->enabled) | 282 | if (!info->enabled) |
282 | continue; | 283 | continue; |
283 | 284 | ||
284 | if (nid < 0) | 285 | if (nid == NUMA_NO_NODE) |
285 | nid = memory_add_physaddr_to_nid(info->start_addr); | 286 | nid = memory_add_physaddr_to_nid(info->start_addr); |
286 | 287 | ||
287 | acpi_unbind_memory_blocks(info, handle); | 288 | acpi_unbind_memory_blocks(info, handle); |
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index 1bde12708f9e..8a4cfc7e71f0 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c | |||
@@ -29,6 +29,13 @@ ACPI_MODULE_NAME("platform"); | |||
29 | static const struct acpi_device_id acpi_platform_device_ids[] = { | 29 | static const struct acpi_device_id acpi_platform_device_ids[] = { |
30 | 30 | ||
31 | { "PNP0D40" }, | 31 | { "PNP0D40" }, |
32 | { "ACPI0003" }, | ||
33 | { "VPC2004" }, | ||
34 | { "BCM4752" }, | ||
35 | |||
36 | /* Intel Smart Sound Technology */ | ||
37 | { "INT33C8" }, | ||
38 | { "80860F28" }, | ||
32 | 39 | ||
33 | { } | 40 | { } |
34 | }; | 41 | }; |
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index f29e06efa479..3c1d6b0c09a4 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c | |||
@@ -140,15 +140,11 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev) | |||
140 | return 0; | 140 | return 0; |
141 | } | 141 | } |
142 | 142 | ||
143 | static int acpi_processor_errata(struct acpi_processor *pr) | 143 | static int acpi_processor_errata(void) |
144 | { | 144 | { |
145 | int result = 0; | 145 | int result = 0; |
146 | struct pci_dev *dev = NULL; | 146 | struct pci_dev *dev = NULL; |
147 | 147 | ||
148 | |||
149 | if (!pr) | ||
150 | return -EINVAL; | ||
151 | |||
152 | /* | 148 | /* |
153 | * PIIX4 | 149 | * PIIX4 |
154 | */ | 150 | */ |
@@ -181,7 +177,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr) | |||
181 | cpu_maps_update_begin(); | 177 | cpu_maps_update_begin(); |
182 | cpu_hotplug_begin(); | 178 | cpu_hotplug_begin(); |
183 | 179 | ||
184 | ret = acpi_map_lsapic(pr->handle, &pr->id); | 180 | ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id); |
185 | if (ret) | 181 | if (ret) |
186 | goto out; | 182 | goto out; |
187 | 183 | ||
@@ -219,11 +215,9 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
219 | int cpu_index, device_declaration = 0; | 215 | int cpu_index, device_declaration = 0; |
220 | acpi_status status = AE_OK; | 216 | acpi_status status = AE_OK; |
221 | static int cpu0_initialized; | 217 | static int cpu0_initialized; |
218 | unsigned long long value; | ||
222 | 219 | ||
223 | if (num_online_cpus() > 1) | 220 | acpi_processor_errata(); |
224 | errata.smp = TRUE; | ||
225 | |||
226 | acpi_processor_errata(pr); | ||
227 | 221 | ||
228 | /* | 222 | /* |
229 | * Check to see if we have bus mastering arbitration control. This | 223 | * Check to see if we have bus mastering arbitration control. This |
@@ -247,18 +241,12 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
247 | return -ENODEV; | 241 | return -ENODEV; |
248 | } | 242 | } |
249 | 243 | ||
250 | /* | ||
251 | * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP. | ||
252 | * >>> 'acpi_get_processor_id(acpi_id, &id)' in | ||
253 | * arch/xxx/acpi.c | ||
254 | */ | ||
255 | pr->acpi_id = object.processor.proc_id; | 244 | pr->acpi_id = object.processor.proc_id; |
256 | } else { | 245 | } else { |
257 | /* | 246 | /* |
258 | * Declared with "Device" statement; match _UID. | 247 | * Declared with "Device" statement; match _UID. |
259 | * Note that we don't handle string _UIDs yet. | 248 | * Note that we don't handle string _UIDs yet. |
260 | */ | 249 | */ |
261 | unsigned long long value; | ||
262 | status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, | 250 | status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, |
263 | NULL, &value); | 251 | NULL, &value); |
264 | if (ACPI_FAILURE(status)) { | 252 | if (ACPI_FAILURE(status)) { |
@@ -270,7 +258,9 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
270 | device_declaration = 1; | 258 | device_declaration = 1; |
271 | pr->acpi_id = value; | 259 | pr->acpi_id = value; |
272 | } | 260 | } |
273 | cpu_index = acpi_get_cpuid(pr->handle, device_declaration, pr->acpi_id); | 261 | pr->apic_id = acpi_get_apicid(pr->handle, device_declaration, |
262 | pr->acpi_id); | ||
263 | cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id); | ||
274 | 264 | ||
275 | /* Handle UP system running SMP kernel, with no LAPIC in MADT */ | 265 | /* Handle UP system running SMP kernel, with no LAPIC in MADT */ |
276 | if (!cpu0_initialized && (cpu_index == -1) && | 266 | if (!cpu0_initialized && (cpu_index == -1) && |
@@ -332,9 +322,9 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
332 | * ensure we get the right value in the "physical id" field | 322 | * ensure we get the right value in the "physical id" field |
333 | * of /proc/cpuinfo | 323 | * of /proc/cpuinfo |
334 | */ | 324 | */ |
335 | status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer); | 325 | status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value); |
336 | if (ACPI_SUCCESS(status)) | 326 | if (ACPI_SUCCESS(status)) |
337 | arch_fix_phys_package_id(pr->id, object.integer.value); | 327 | arch_fix_phys_package_id(pr->id, value); |
338 | 328 | ||
339 | return 0; | 329 | return 0; |
340 | } | 330 | } |
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h index 9feba08c29fe..a9fd0b872062 100644 --- a/drivers/acpi/acpica/acdebug.h +++ b/drivers/acpi/acpica/acdebug.h | |||
@@ -114,10 +114,12 @@ ACPI_HW_DEPENDENT_RETURN_VOID(void | |||
114 | acpi_db_generate_gpe(char *gpe_arg, | 114 | acpi_db_generate_gpe(char *gpe_arg, |
115 | char *block_arg)) | 115 | char *block_arg)) |
116 | 116 | ||
117 | ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_generate_sci(void)) | ||
118 | |||
117 | /* | 119 | /* |
118 | * dbconvert - miscellaneous conversion routines | 120 | * dbconvert - miscellaneous conversion routines |
119 | */ | 121 | */ |
120 | acpi_status acpi_db_hex_char_to_value(int hex_char, u8 *return_value); | 122 | acpi_status acpi_db_hex_char_to_value(int hex_char, u8 *return_value); |
121 | 123 | ||
122 | acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object); | 124 | acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object); |
123 | 125 | ||
@@ -154,6 +156,8 @@ void acpi_db_set_scope(char *name); | |||
154 | 156 | ||
155 | void acpi_db_dump_namespace(char *start_arg, char *depth_arg); | 157 | void acpi_db_dump_namespace(char *start_arg, char *depth_arg); |
156 | 158 | ||
159 | void acpi_db_dump_namespace_paths(void); | ||
160 | |||
157 | void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg); | 161 | void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg); |
158 | 162 | ||
159 | acpi_status acpi_db_find_name_in_namespace(char *name_arg); | 163 | acpi_status acpi_db_find_name_in_namespace(char *name_arg); |
@@ -240,6 +244,8 @@ void acpi_db_display_history(void); | |||
240 | 244 | ||
241 | char *acpi_db_get_from_history(char *command_num_arg); | 245 | char *acpi_db_get_from_history(char *command_num_arg); |
242 | 246 | ||
247 | char *acpi_db_get_history_by_index(u32 commandd_num); | ||
248 | |||
243 | /* | 249 | /* |
244 | * dbinput - user front-end to the AML debugger | 250 | * dbinput - user front-end to the AML debugger |
245 | */ | 251 | */ |
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index ab0e97710381..41abe552c7a3 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h | |||
@@ -71,7 +71,8 @@ acpi_status acpi_ev_init_global_lock_handler(void); | |||
71 | 71 | ||
72 | ACPI_HW_DEPENDENT_RETURN_OK(acpi_status | 72 | ACPI_HW_DEPENDENT_RETURN_OK(acpi_status |
73 | acpi_ev_acquire_global_lock(u16 timeout)) | 73 | acpi_ev_acquire_global_lock(u16 timeout)) |
74 | ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void)) | 74 | |
75 | ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void)) | ||
75 | acpi_status acpi_ev_remove_global_lock_handler(void); | 76 | acpi_status acpi_ev_remove_global_lock_handler(void); |
76 | 77 | ||
77 | /* | 78 | /* |
@@ -242,11 +243,11 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj, | |||
242 | */ | 243 | */ |
243 | u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context); | 244 | u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context); |
244 | 245 | ||
245 | u32 acpi_ev_install_sci_handler(void); | 246 | u32 acpi_ev_sci_dispatch(void); |
246 | 247 | ||
247 | acpi_status acpi_ev_remove_sci_handler(void); | 248 | u32 acpi_ev_install_sci_handler(void); |
248 | 249 | ||
249 | u32 acpi_ev_initialize_SCI(u32 program_SCI); | 250 | acpi_status acpi_ev_remove_all_sci_handlers(void); |
250 | 251 | ||
251 | ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_ev_terminate(void)) | 252 | ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_ev_terminate(void)) |
252 | #endif /* __ACEVENTS_H__ */ | 253 | #endif /* __ACEVENTS_H__ */ |
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 90e846f985fa..e9f1fc7f99c7 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h | |||
@@ -269,6 +269,7 @@ ACPI_EXTERN acpi_table_handler acpi_gbl_table_handler; | |||
269 | ACPI_EXTERN void *acpi_gbl_table_handler_context; | 269 | ACPI_EXTERN void *acpi_gbl_table_handler_context; |
270 | ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk; | 270 | ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk; |
271 | ACPI_EXTERN acpi_interface_handler acpi_gbl_interface_handler; | 271 | ACPI_EXTERN acpi_interface_handler acpi_gbl_interface_handler; |
272 | ACPI_EXTERN struct acpi_sci_handler_info *acpi_gbl_sci_handler_list; | ||
272 | 273 | ||
273 | /* Owner ID support */ | 274 | /* Owner ID support */ |
274 | 275 | ||
@@ -405,7 +406,9 @@ extern u32 acpi_gbl_nesting_level; | |||
405 | 406 | ||
406 | /* Event counters */ | 407 | /* Event counters */ |
407 | 408 | ||
409 | ACPI_EXTERN u32 acpi_method_count; | ||
408 | ACPI_EXTERN u32 acpi_gpe_count; | 410 | ACPI_EXTERN u32 acpi_gpe_count; |
411 | ACPI_EXTERN u32 acpi_sci_count; | ||
409 | ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]; | 412 | ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]; |
410 | 413 | ||
411 | /* Support for dynamic control method tracing mechanism */ | 414 | /* Support for dynamic control method tracing mechanism */ |
@@ -445,13 +448,6 @@ ACPI_EXTERN u8 acpi_gbl_db_opt_tables; | |||
445 | ACPI_EXTERN u8 acpi_gbl_db_opt_stats; | 448 | ACPI_EXTERN u8 acpi_gbl_db_opt_stats; |
446 | ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods; | 449 | ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods; |
447 | ACPI_EXTERN u8 acpi_gbl_db_opt_no_region_support; | 450 | ACPI_EXTERN u8 acpi_gbl_db_opt_no_region_support; |
448 | |||
449 | ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]; | ||
450 | ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]; | ||
451 | ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE]; | ||
452 | ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE]; | ||
453 | ACPI_EXTERN char acpi_gbl_db_scope_buf[80]; | ||
454 | ACPI_EXTERN char acpi_gbl_db_debug_filename[80]; | ||
455 | ACPI_EXTERN u8 acpi_gbl_db_output_to_file; | 451 | ACPI_EXTERN u8 acpi_gbl_db_output_to_file; |
456 | ACPI_EXTERN char *acpi_gbl_db_buffer; | 452 | ACPI_EXTERN char *acpi_gbl_db_buffer; |
457 | ACPI_EXTERN char *acpi_gbl_db_filename; | 453 | ACPI_EXTERN char *acpi_gbl_db_filename; |
@@ -459,6 +455,16 @@ ACPI_EXTERN u32 acpi_gbl_db_debug_level; | |||
459 | ACPI_EXTERN u32 acpi_gbl_db_console_debug_level; | 455 | ACPI_EXTERN u32 acpi_gbl_db_console_debug_level; |
460 | ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_db_scope_node; | 456 | ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_db_scope_node; |
461 | 457 | ||
458 | ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]; | ||
459 | ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]; | ||
460 | |||
461 | /* These buffers should all be the same size */ | ||
462 | |||
463 | ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE]; | ||
464 | ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE]; | ||
465 | ACPI_EXTERN char acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE]; | ||
466 | ACPI_EXTERN char acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]; | ||
467 | |||
462 | /* | 468 | /* |
463 | * Statistic globals | 469 | * Statistic globals |
464 | */ | 470 | */ |
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 0ed00669cd21..53ed1a8ba4f0 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h | |||
@@ -398,6 +398,14 @@ struct acpi_simple_repair_info { | |||
398 | * | 398 | * |
399 | ****************************************************************************/ | 399 | ****************************************************************************/ |
400 | 400 | ||
401 | /* Dispatch info for each host-installed SCI handler */ | ||
402 | |||
403 | struct acpi_sci_handler_info { | ||
404 | struct acpi_sci_handler_info *next; | ||
405 | acpi_sci_handler address; /* Address of handler */ | ||
406 | void *context; /* Context to be passed to handler */ | ||
407 | }; | ||
408 | |||
401 | /* Dispatch info for each GPE -- either a method or handler, cannot be both */ | 409 | /* Dispatch info for each GPE -- either a method or handler, cannot be both */ |
402 | 410 | ||
403 | struct acpi_gpe_handler_info { | 411 | struct acpi_gpe_handler_info { |
@@ -1064,7 +1072,7 @@ struct acpi_db_method_info { | |||
1064 | char *name; | 1072 | char *name; |
1065 | u32 flags; | 1073 | u32 flags; |
1066 | u32 num_loops; | 1074 | u32 num_loops; |
1067 | char pathname[128]; | 1075 | char pathname[ACPI_DB_LINE_BUFFER_SIZE]; |
1068 | char **args; | 1076 | char **args; |
1069 | acpi_object_type *types; | 1077 | acpi_object_type *types; |
1070 | 1078 | ||
@@ -1086,6 +1094,7 @@ struct acpi_integrity_info { | |||
1086 | u32 objects; | 1094 | u32 objects; |
1087 | }; | 1095 | }; |
1088 | 1096 | ||
1097 | #define ACPI_DB_DISABLE_OUTPUT 0x00 | ||
1089 | #define ACPI_DB_REDIRECTABLE_OUTPUT 0x01 | 1098 | #define ACPI_DB_REDIRECTABLE_OUTPUT 0x01 |
1090 | #define ACPI_DB_CONSOLE_OUTPUT 0x02 | 1099 | #define ACPI_DB_CONSOLE_OUTPUT 0x02 |
1091 | #define ACPI_DB_DUPLICATE_OUTPUT 0x03 | 1100 | #define ACPI_DB_DUPLICATE_OUTPUT 0x03 |
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index 530a2f8c1252..2a86c65d873b 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h | |||
@@ -410,37 +410,6 @@ | |||
410 | #endif | 410 | #endif |
411 | 411 | ||
412 | /* | 412 | /* |
413 | * Memory allocation tracking (DEBUG ONLY) | ||
414 | */ | ||
415 | #define ACPI_MEM_PARAMETERS _COMPONENT, _acpi_module_name, __LINE__ | ||
416 | |||
417 | #ifndef ACPI_DBG_TRACK_ALLOCATIONS | ||
418 | |||
419 | /* Memory allocation */ | ||
420 | |||
421 | #ifndef ACPI_ALLOCATE | ||
422 | #define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size) (a), ACPI_MEM_PARAMETERS) | ||
423 | #endif | ||
424 | #ifndef ACPI_ALLOCATE_ZEROED | ||
425 | #define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size) (a), ACPI_MEM_PARAMETERS) | ||
426 | #endif | ||
427 | #ifndef ACPI_FREE | ||
428 | #define ACPI_FREE(a) acpi_os_free(a) | ||
429 | #endif | ||
430 | #define ACPI_MEM_TRACKING(a) | ||
431 | |||
432 | #else | ||
433 | |||
434 | /* Memory allocation */ | ||
435 | |||
436 | #define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS) | ||
437 | #define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS) | ||
438 | #define ACPI_FREE(a) acpi_ut_free_and_track(a, ACPI_MEM_PARAMETERS) | ||
439 | #define ACPI_MEM_TRACKING(a) a | ||
440 | |||
441 | #endif /* ACPI_DBG_TRACK_ALLOCATIONS */ | ||
442 | |||
443 | /* | ||
444 | * Macros used for ACPICA utilities only | 413 | * Macros used for ACPICA utilities only |
445 | */ | 414 | */ |
446 | 415 | ||
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h index 40b04bd5579e..e6138ac4a160 100644 --- a/drivers/acpi/acpica/acnamesp.h +++ b/drivers/acpi/acpica/acnamesp.h | |||
@@ -213,6 +213,12 @@ acpi_ns_dump_objects(acpi_object_type type, | |||
213 | u8 display_type, | 213 | u8 display_type, |
214 | u32 max_depth, | 214 | u32 max_depth, |
215 | acpi_owner_id owner_id, acpi_handle start_handle); | 215 | acpi_owner_id owner_id, acpi_handle start_handle); |
216 | |||
217 | void | ||
218 | acpi_ns_dump_object_paths(acpi_object_type type, | ||
219 | u8 display_type, | ||
220 | u32 max_depth, | ||
221 | acpi_owner_id owner_id, acpi_handle start_handle); | ||
216 | #endif /* ACPI_FUTURE_USAGE */ | 222 | #endif /* ACPI_FUTURE_USAGE */ |
217 | 223 | ||
218 | /* | 224 | /* |
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index d5a62a6182bb..be8180c17d7e 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h | |||
@@ -628,6 +628,17 @@ u8 acpi_ut_valid_acpi_char(char character, u32 position); | |||
628 | 628 | ||
629 | void acpi_ut_repair_name(char *name); | 629 | void acpi_ut_repair_name(char *name); |
630 | 630 | ||
631 | #if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) | ||
632 | u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source); | ||
633 | |||
634 | u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source); | ||
635 | |||
636 | u8 | ||
637 | acpi_ut_safe_strncat(char *dest, | ||
638 | acpi_size dest_size, | ||
639 | char *source, acpi_size max_transfer_length); | ||
640 | #endif | ||
641 | |||
631 | /* | 642 | /* |
632 | * utmutex - mutex support | 643 | * utmutex - mutex support |
633 | */ | 644 | */ |
@@ -652,12 +663,6 @@ acpi_status | |||
652 | acpi_ut_initialize_buffer(struct acpi_buffer *buffer, | 663 | acpi_ut_initialize_buffer(struct acpi_buffer *buffer, |
653 | acpi_size required_length); | 664 | acpi_size required_length); |
654 | 665 | ||
655 | void *acpi_ut_allocate(acpi_size size, | ||
656 | u32 component, const char *module, u32 line); | ||
657 | |||
658 | void *acpi_ut_allocate_zeroed(acpi_size size, | ||
659 | u32 component, const char *module, u32 line); | ||
660 | |||
661 | #ifdef ACPI_DBG_TRACK_ALLOCATIONS | 666 | #ifdef ACPI_DBG_TRACK_ALLOCATIONS |
662 | void *acpi_ut_allocate_and_track(acpi_size size, | 667 | void *acpi_ut_allocate_and_track(acpi_size size, |
663 | u32 component, const char *module, u32 line); | 668 | u32 component, const char *module, u32 line); |
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c index fb09b08d7080..afdc6df17abf 100644 --- a/drivers/acpi/acpica/dsargs.c +++ b/drivers/acpi/acpica/dsargs.c | |||
@@ -158,7 +158,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node, | |||
158 | walk_state->deferred_node = node; | 158 | walk_state->deferred_node = node; |
159 | status = acpi_ps_parse_aml(walk_state); | 159 | status = acpi_ps_parse_aml(walk_state); |
160 | 160 | ||
161 | cleanup: | 161 | cleanup: |
162 | acpi_ps_delete_parse_tree(op); | 162 | acpi_ps_delete_parse_tree(op); |
163 | return_ACPI_STATUS(status); | 163 | return_ACPI_STATUS(status); |
164 | } | 164 | } |
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index d4bfe7b7f90a..2d4c07322576 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c | |||
@@ -259,7 +259,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op, | |||
259 | goto cleanup; | 259 | goto cleanup; |
260 | } | 260 | } |
261 | 261 | ||
262 | cleanup: | 262 | cleanup: |
263 | 263 | ||
264 | /* Remove local reference to the object */ | 264 | /* Remove local reference to the object */ |
265 | 265 | ||
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index a9ffd44c18fe..81a78ba84311 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c | |||
@@ -292,9 +292,10 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, | |||
292 | * reentered one more time (even if it is the same thread) | 292 | * reentered one more time (even if it is the same thread) |
293 | */ | 293 | */ |
294 | obj_desc->method.thread_count++; | 294 | obj_desc->method.thread_count++; |
295 | acpi_method_count++; | ||
295 | return_ACPI_STATUS(status); | 296 | return_ACPI_STATUS(status); |
296 | 297 | ||
297 | cleanup: | 298 | cleanup: |
298 | /* On error, must release the method mutex (if present) */ | 299 | /* On error, must release the method mutex (if present) */ |
299 | 300 | ||
300 | if (obj_desc->method.mutex) { | 301 | if (obj_desc->method.mutex) { |
@@ -424,7 +425,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, | |||
424 | 425 | ||
425 | return_ACPI_STATUS(status); | 426 | return_ACPI_STATUS(status); |
426 | 427 | ||
427 | cleanup: | 428 | cleanup: |
428 | 429 | ||
429 | /* On error, we must terminate the method properly */ | 430 | /* On error, we must terminate the method properly */ |
430 | 431 | ||
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c index 63f0d220ca3d..b1746a68dad1 100644 --- a/drivers/acpi/acpica/dsobject.c +++ b/drivers/acpi/acpica/dsobject.c | |||
@@ -240,7 +240,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, | |||
240 | return_ACPI_STATUS(status); | 240 | return_ACPI_STATUS(status); |
241 | } | 241 | } |
242 | 242 | ||
243 | exit: | 243 | exit: |
244 | *obj_desc_ptr = obj_desc; | 244 | *obj_desc_ptr = obj_desc; |
245 | return_ACPI_STATUS(status); | 245 | return_ACPI_STATUS(status); |
246 | } | 246 | } |
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index 1fc1ff114f26..5205edcf2c01 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c | |||
@@ -257,7 +257,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode, | |||
257 | (buffer_desc->common.reference_count + | 257 | (buffer_desc->common.reference_count + |
258 | obj_desc->common.reference_count); | 258 | obj_desc->common.reference_count); |
259 | 259 | ||
260 | cleanup: | 260 | cleanup: |
261 | 261 | ||
262 | /* Always delete the operands */ | 262 | /* Always delete the operands */ |
263 | 263 | ||
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c index c666fc014987..ade44e49deb4 100644 --- a/drivers/acpi/acpica/dsutils.c +++ b/drivers/acpi/acpica/dsutils.c | |||
@@ -299,7 +299,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op, | |||
299 | goto result_used; | 299 | goto result_used; |
300 | } | 300 | } |
301 | 301 | ||
302 | result_used: | 302 | result_used: |
303 | ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, | 303 | ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, |
304 | "Result of [%s] used by Parent [%s] Op=%p\n", | 304 | "Result of [%s] used by Parent [%s] Op=%p\n", |
305 | acpi_ps_get_opcode_name(op->common.aml_opcode), | 305 | acpi_ps_get_opcode_name(op->common.aml_opcode), |
@@ -308,7 +308,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op, | |||
308 | 308 | ||
309 | return_UINT8(TRUE); | 309 | return_UINT8(TRUE); |
310 | 310 | ||
311 | result_not_used: | 311 | result_not_used: |
312 | ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, | 312 | ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, |
313 | "Result of [%s] not used by Parent [%s] Op=%p\n", | 313 | "Result of [%s] not used by Parent [%s] Op=%p\n", |
314 | acpi_ps_get_opcode_name(op->common.aml_opcode), | 314 | acpi_ps_get_opcode_name(op->common.aml_opcode), |
@@ -752,7 +752,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state, | |||
752 | 752 | ||
753 | return_ACPI_STATUS(status); | 753 | return_ACPI_STATUS(status); |
754 | 754 | ||
755 | cleanup: | 755 | cleanup: |
756 | /* | 756 | /* |
757 | * We must undo everything done above; meaning that we must | 757 | * We must undo everything done above; meaning that we must |
758 | * pop everything off of the operand stack and delete those | 758 | * pop everything off of the operand stack and delete those |
@@ -851,7 +851,7 @@ acpi_status acpi_ds_evaluate_name_path(struct acpi_walk_state *walk_state) | |||
851 | goto exit; | 851 | goto exit; |
852 | } | 852 | } |
853 | 853 | ||
854 | push_result: | 854 | push_result: |
855 | 855 | ||
856 | walk_state->result_obj = new_obj_desc; | 856 | walk_state->result_obj = new_obj_desc; |
857 | 857 | ||
@@ -863,7 +863,7 @@ acpi_status acpi_ds_evaluate_name_path(struct acpi_walk_state *walk_state) | |||
863 | op->common.flags |= ACPI_PARSEOP_IN_STACK; | 863 | op->common.flags |= ACPI_PARSEOP_IN_STACK; |
864 | } | 864 | } |
865 | 865 | ||
866 | exit: | 866 | exit: |
867 | 867 | ||
868 | return_ACPI_STATUS(status); | 868 | return_ACPI_STATUS(status); |
869 | } | 869 | } |
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index 151d924817e1..1bbb22fd6fa0 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c | |||
@@ -170,7 +170,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state, | |||
170 | 170 | ||
171 | (void)acpi_ds_do_implicit_return(local_obj_desc, walk_state, TRUE); | 171 | (void)acpi_ds_do_implicit_return(local_obj_desc, walk_state, TRUE); |
172 | 172 | ||
173 | cleanup: | 173 | cleanup: |
174 | 174 | ||
175 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Completed a predicate eval=%X Op=%p\n", | 175 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Completed a predicate eval=%X Op=%p\n", |
176 | walk_state->control_state->common.value, | 176 | walk_state->control_state->common.value, |
@@ -335,7 +335,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state, | |||
335 | 335 | ||
336 | return_ACPI_STATUS(status); | 336 | return_ACPI_STATUS(status); |
337 | 337 | ||
338 | error_exit: | 338 | error_exit: |
339 | status = acpi_ds_method_error(status, walk_state); | 339 | status = acpi_ds_method_error(status, walk_state); |
340 | return_ACPI_STATUS(status); | 340 | return_ACPI_STATUS(status); |
341 | } | 341 | } |
@@ -722,7 +722,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) | |||
722 | walk_state->result_obj = NULL; | 722 | walk_state->result_obj = NULL; |
723 | } | 723 | } |
724 | 724 | ||
725 | cleanup: | 725 | cleanup: |
726 | 726 | ||
727 | if (walk_state->result_obj) { | 727 | if (walk_state->result_obj) { |
728 | 728 | ||
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index b1f8f4725c23..7f569d573027 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c | |||
@@ -728,7 +728,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) | |||
728 | break; | 728 | break; |
729 | } | 729 | } |
730 | 730 | ||
731 | cleanup: | 731 | cleanup: |
732 | 732 | ||
733 | /* Remove the Node pushed at the very beginning */ | 733 | /* Remove the Node pushed at the very beginning */ |
734 | 734 | ||
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c index fdb0a76e40a3..4c67193a9fa7 100644 --- a/drivers/acpi/acpica/evglock.c +++ b/drivers/acpi/acpica/evglock.c | |||
@@ -173,7 +173,7 @@ static u32 acpi_ev_global_lock_handler(void *context) | |||
173 | 173 | ||
174 | acpi_gbl_global_lock_pending = FALSE; | 174 | acpi_gbl_global_lock_pending = FALSE; |
175 | 175 | ||
176 | cleanup_and_exit: | 176 | cleanup_and_exit: |
177 | 177 | ||
178 | acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags); | 178 | acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags); |
179 | return (ACPI_INTERRUPT_HANDLED); | 179 | return (ACPI_INTERRUPT_HANDLED); |
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index c8a1f7d5931f..a9cb4a1a4bb8 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c | |||
@@ -458,7 +458,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
458 | gpe_block = gpe_block->next; | 458 | gpe_block = gpe_block->next; |
459 | } | 459 | } |
460 | 460 | ||
461 | unlock_and_exit: | 461 | unlock_and_exit: |
462 | 462 | ||
463 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | 463 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); |
464 | return (int_status); | 464 | return (int_status); |
@@ -522,6 +522,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
522 | 522 | ||
523 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); | 523 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); |
524 | if (ACPI_FAILURE(status)) { | 524 | if (ACPI_FAILURE(status)) { |
525 | ACPI_FREE(local_gpe_event_info); | ||
525 | return_VOID; | 526 | return_VOID; |
526 | } | 527 | } |
527 | 528 | ||
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index c1aa1eda26c3..a9e76bc4ad97 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c | |||
@@ -111,7 +111,7 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block, | |||
111 | gpe_block->xrupt_block = gpe_xrupt_block; | 111 | gpe_block->xrupt_block = gpe_xrupt_block; |
112 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | 112 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); |
113 | 113 | ||
114 | unlock_and_exit: | 114 | unlock_and_exit: |
115 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); | 115 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); |
116 | return_ACPI_STATUS(status); | 116 | return_ACPI_STATUS(status); |
117 | } | 117 | } |
@@ -178,7 +178,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block) | |||
178 | ACPI_FREE(gpe_block->event_info); | 178 | ACPI_FREE(gpe_block->event_info); |
179 | ACPI_FREE(gpe_block); | 179 | ACPI_FREE(gpe_block); |
180 | 180 | ||
181 | unlock_and_exit: | 181 | unlock_and_exit: |
182 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); | 182 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); |
183 | return_ACPI_STATUS(status); | 183 | return_ACPI_STATUS(status); |
184 | } | 184 | } |
@@ -302,7 +302,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) | |||
302 | 302 | ||
303 | return_ACPI_STATUS(AE_OK); | 303 | return_ACPI_STATUS(AE_OK); |
304 | 304 | ||
305 | error_exit: | 305 | error_exit: |
306 | if (gpe_register_info) { | 306 | if (gpe_register_info) { |
307 | ACPI_FREE(gpe_register_info); | 307 | ACPI_FREE(gpe_register_info); |
308 | } | 308 | } |
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c index 7842700346a4..a3e2f38aadf6 100644 --- a/drivers/acpi/acpica/evgpeinit.c +++ b/drivers/acpi/acpica/evgpeinit.c | |||
@@ -203,7 +203,7 @@ acpi_status acpi_ev_gpe_initialize(void) | |||
203 | goto cleanup; | 203 | goto cleanup; |
204 | } | 204 | } |
205 | 205 | ||
206 | cleanup: | 206 | cleanup: |
207 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 207 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); |
208 | return_ACPI_STATUS(AE_OK); | 208 | return_ACPI_STATUS(AE_OK); |
209 | } | 209 | } |
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c index b24dbb80fab8..d3f5e1e2a2b1 100644 --- a/drivers/acpi/acpica/evgpeutil.c +++ b/drivers/acpi/acpica/evgpeutil.c | |||
@@ -101,7 +101,7 @@ acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context) | |||
101 | gpe_xrupt_info = gpe_xrupt_info->next; | 101 | gpe_xrupt_info = gpe_xrupt_info->next; |
102 | } | 102 | } |
103 | 103 | ||
104 | unlock_and_exit: | 104 | unlock_and_exit: |
105 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | 105 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); |
106 | return_ACPI_STATUS(status); | 106 | return_ACPI_STATUS(status); |
107 | } | 107 | } |
@@ -196,7 +196,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
196 | * | 196 | * |
197 | * FUNCTION: acpi_ev_get_gpe_xrupt_block | 197 | * FUNCTION: acpi_ev_get_gpe_xrupt_block |
198 | * | 198 | * |
199 | * PARAMETERS: interrupt_number - Interrupt for a GPE block | 199 | * PARAMETERS: interrupt_number - Interrupt for a GPE block |
200 | * | 200 | * |
201 | * RETURN: A GPE interrupt block | 201 | * RETURN: A GPE interrupt block |
202 | * | 202 | * |
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c index 068af96134b8..e3157313eb27 100644 --- a/drivers/acpi/acpica/evhandler.c +++ b/drivers/acpi/acpica/evhandler.c | |||
@@ -129,7 +129,7 @@ acpi_status acpi_ev_install_region_handlers(void) | |||
129 | } | 129 | } |
130 | } | 130 | } |
131 | 131 | ||
132 | unlock_and_exit: | 132 | unlock_and_exit: |
133 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 133 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); |
134 | return_ACPI_STATUS(status); | 134 | return_ACPI_STATUS(status); |
135 | } | 135 | } |
@@ -531,6 +531,6 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node, | |||
531 | acpi_ev_install_handler, NULL, | 531 | acpi_ev_install_handler, NULL, |
532 | handler_obj, NULL); | 532 | handler_obj, NULL); |
533 | 533 | ||
534 | unlock_and_exit: | 534 | unlock_and_exit: |
535 | return_ACPI_STATUS(status); | 535 | return_ACPI_STATUS(status); |
536 | } | 536 | } |
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c index 1b111ef74903..a5687540e9a6 100644 --- a/drivers/acpi/acpica/evmisc.c +++ b/drivers/acpi/acpica/evmisc.c | |||
@@ -264,13 +264,6 @@ void acpi_ev_terminate(void) | |||
264 | 264 | ||
265 | status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL); | 265 | status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL); |
266 | 266 | ||
267 | /* Remove SCI handler */ | ||
268 | |||
269 | status = acpi_ev_remove_sci_handler(); | ||
270 | if (ACPI_FAILURE(status)) { | ||
271 | ACPI_ERROR((AE_INFO, "Could not remove SCI handler")); | ||
272 | } | ||
273 | |||
274 | status = acpi_ev_remove_global_lock_handler(); | 267 | status = acpi_ev_remove_global_lock_handler(); |
275 | if (ACPI_FAILURE(status)) { | 268 | if (ACPI_FAILURE(status)) { |
276 | ACPI_ERROR((AE_INFO, | 269 | ACPI_ERROR((AE_INFO, |
@@ -280,6 +273,13 @@ void acpi_ev_terminate(void) | |||
280 | acpi_gbl_events_initialized = FALSE; | 273 | acpi_gbl_events_initialized = FALSE; |
281 | } | 274 | } |
282 | 275 | ||
276 | /* Remove SCI handlers */ | ||
277 | |||
278 | status = acpi_ev_remove_all_sci_handlers(); | ||
279 | if (ACPI_FAILURE(status)) { | ||
280 | ACPI_ERROR((AE_INFO, "Could not remove SCI handler")); | ||
281 | } | ||
282 | |||
283 | /* Deallocate all handler objects installed within GPE info structs */ | 283 | /* Deallocate all handler objects installed within GPE info structs */ |
284 | 284 | ||
285 | status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL); | 285 | status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL); |
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index cea14d6fc76c..144cbb9b73bc 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c | |||
@@ -217,16 +217,11 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, | |||
217 | if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { | 217 | if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { |
218 | region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE; | 218 | region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE; |
219 | 219 | ||
220 | if (region_obj2->extra.region_context) { | 220 | /* |
221 | 221 | * Save the returned context for use in all accesses to | |
222 | /* The handler for this region was already installed */ | 222 | * the handler for this particular region |
223 | 223 | */ | |
224 | ACPI_FREE(region_context); | 224 | if (!(region_obj2->extra.region_context)) { |
225 | } else { | ||
226 | /* | ||
227 | * Save the returned context for use in all accesses to | ||
228 | * this particular region | ||
229 | */ | ||
230 | region_obj2->extra.region_context = | 225 | region_obj2->extra.region_context = |
231 | region_context; | 226 | region_context; |
232 | } | 227 | } |
@@ -402,6 +397,14 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj, | |||
402 | handler_obj->address_space. | 397 | handler_obj->address_space. |
403 | context, region_context); | 398 | context, region_context); |
404 | 399 | ||
400 | /* | ||
401 | * region_context should have been released by the deactivate | ||
402 | * operation. We don't need access to it anymore here. | ||
403 | */ | ||
404 | if (region_context) { | ||
405 | *region_context = NULL; | ||
406 | } | ||
407 | |||
405 | /* Init routine may fail, Just ignore errors */ | 408 | /* Init routine may fail, Just ignore errors */ |
406 | 409 | ||
407 | if (ACPI_FAILURE(status)) { | 410 | if (ACPI_FAILURE(status)) { |
@@ -570,10 +573,10 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function) | |||
570 | status = acpi_ns_evaluate(info); | 573 | status = acpi_ns_evaluate(info); |
571 | acpi_ut_remove_reference(args[1]); | 574 | acpi_ut_remove_reference(args[1]); |
572 | 575 | ||
573 | cleanup2: | 576 | cleanup2: |
574 | acpi_ut_remove_reference(args[0]); | 577 | acpi_ut_remove_reference(args[0]); |
575 | 578 | ||
576 | cleanup1: | 579 | cleanup1: |
577 | ACPI_FREE(info); | 580 | ACPI_FREE(info); |
578 | return_ACPI_STATUS(status); | 581 | return_ACPI_STATUS(status); |
579 | } | 582 | } |
@@ -758,7 +761,7 @@ acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node) | |||
758 | 761 | ||
759 | status = acpi_evaluate_object(reg_method, NULL, &args, NULL); | 762 | status = acpi_evaluate_object(reg_method, NULL, &args, NULL); |
760 | 763 | ||
761 | exit: | 764 | exit: |
762 | /* We ignore all errors from above, don't care */ | 765 | /* We ignore all errors from above, don't care */ |
763 | 766 | ||
764 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | 767 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); |
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c index b905acf7aacd..9e9e3454d893 100644 --- a/drivers/acpi/acpica/evsci.c +++ b/drivers/acpi/acpica/evsci.c | |||
@@ -54,6 +54,50 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context); | |||
54 | 54 | ||
55 | /******************************************************************************* | 55 | /******************************************************************************* |
56 | * | 56 | * |
57 | * FUNCTION: acpi_ev_sci_dispatch | ||
58 | * | ||
59 | * PARAMETERS: None | ||
60 | * | ||
61 | * RETURN: Status code indicates whether interrupt was handled. | ||
62 | * | ||
63 | * DESCRIPTION: Dispatch the SCI to all host-installed SCI handlers. | ||
64 | * | ||
65 | ******************************************************************************/ | ||
66 | |||
67 | u32 acpi_ev_sci_dispatch(void) | ||
68 | { | ||
69 | struct acpi_sci_handler_info *sci_handler; | ||
70 | acpi_cpu_flags flags; | ||
71 | u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; | ||
72 | |||
73 | ACPI_FUNCTION_NAME(ev_sci_dispatch); | ||
74 | |||
75 | /* Are there any host-installed SCI handlers? */ | ||
76 | |||
77 | if (!acpi_gbl_sci_handler_list) { | ||
78 | return (int_status); | ||
79 | } | ||
80 | |||
81 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
82 | |||
83 | /* Invoke all host-installed SCI handlers */ | ||
84 | |||
85 | sci_handler = acpi_gbl_sci_handler_list; | ||
86 | while (sci_handler) { | ||
87 | |||
88 | /* Invoke the installed handler (at interrupt level) */ | ||
89 | |||
90 | int_status |= sci_handler->address(sci_handler->context); | ||
91 | |||
92 | sci_handler = sci_handler->next; | ||
93 | } | ||
94 | |||
95 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
96 | return (int_status); | ||
97 | } | ||
98 | |||
99 | /******************************************************************************* | ||
100 | * | ||
57 | * FUNCTION: acpi_ev_sci_xrupt_handler | 101 | * FUNCTION: acpi_ev_sci_xrupt_handler |
58 | * | 102 | * |
59 | * PARAMETERS: context - Calling Context | 103 | * PARAMETERS: context - Calling Context |
@@ -89,6 +133,11 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context) | |||
89 | */ | 133 | */ |
90 | interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list); | 134 | interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list); |
91 | 135 | ||
136 | /* Invoke all host-installed SCI handlers */ | ||
137 | |||
138 | interrupt_handled |= acpi_ev_sci_dispatch(); | ||
139 | |||
140 | acpi_sci_count++; | ||
92 | return_UINT32(interrupt_handled); | 141 | return_UINT32(interrupt_handled); |
93 | } | 142 | } |
94 | 143 | ||
@@ -112,14 +161,13 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context) | |||
112 | ACPI_FUNCTION_TRACE(ev_gpe_xrupt_handler); | 161 | ACPI_FUNCTION_TRACE(ev_gpe_xrupt_handler); |
113 | 162 | ||
114 | /* | 163 | /* |
115 | * We are guaranteed by the ACPI CA initialization/shutdown code that | 164 | * We are guaranteed by the ACPICA initialization/shutdown code that |
116 | * if this interrupt handler is installed, ACPI is enabled. | 165 | * if this interrupt handler is installed, ACPI is enabled. |
117 | */ | 166 | */ |
118 | 167 | ||
119 | /* GPEs: Check for and dispatch any GPEs that have occurred */ | 168 | /* GPEs: Check for and dispatch any GPEs that have occurred */ |
120 | 169 | ||
121 | interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list); | 170 | interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list); |
122 | |||
123 | return_UINT32(interrupt_handled); | 171 | return_UINT32(interrupt_handled); |
124 | } | 172 | } |
125 | 173 | ||
@@ -150,15 +198,15 @@ u32 acpi_ev_install_sci_handler(void) | |||
150 | 198 | ||
151 | /****************************************************************************** | 199 | /****************************************************************************** |
152 | * | 200 | * |
153 | * FUNCTION: acpi_ev_remove_sci_handler | 201 | * FUNCTION: acpi_ev_remove_all_sci_handlers |
154 | * | 202 | * |
155 | * PARAMETERS: none | 203 | * PARAMETERS: none |
156 | * | 204 | * |
157 | * RETURN: E_OK if handler uninstalled OK, E_ERROR if handler was not | 205 | * RETURN: AE_OK if handler uninstalled, AE_ERROR if handler was not |
158 | * installed to begin with | 206 | * installed to begin with |
159 | * | 207 | * |
160 | * DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be | 208 | * DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be |
161 | * taken. | 209 | * taken. Remove all host-installed SCI handlers. |
162 | * | 210 | * |
163 | * Note: It doesn't seem important to disable all events or set the event | 211 | * Note: It doesn't seem important to disable all events or set the event |
164 | * enable registers to their original values. The OS should disable | 212 | * enable registers to their original values. The OS should disable |
@@ -167,11 +215,13 @@ u32 acpi_ev_install_sci_handler(void) | |||
167 | * | 215 | * |
168 | ******************************************************************************/ | 216 | ******************************************************************************/ |
169 | 217 | ||
170 | acpi_status acpi_ev_remove_sci_handler(void) | 218 | acpi_status acpi_ev_remove_all_sci_handlers(void) |
171 | { | 219 | { |
220 | struct acpi_sci_handler_info *sci_handler; | ||
221 | acpi_cpu_flags flags; | ||
172 | acpi_status status; | 222 | acpi_status status; |
173 | 223 | ||
174 | ACPI_FUNCTION_TRACE(ev_remove_sci_handler); | 224 | ACPI_FUNCTION_TRACE(ev_remove_all_sci_handlers); |
175 | 225 | ||
176 | /* Just let the OS remove the handler and disable the level */ | 226 | /* Just let the OS remove the handler and disable the level */ |
177 | 227 | ||
@@ -179,6 +229,21 @@ acpi_status acpi_ev_remove_sci_handler(void) | |||
179 | acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt, | 229 | acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt, |
180 | acpi_ev_sci_xrupt_handler); | 230 | acpi_ev_sci_xrupt_handler); |
181 | 231 | ||
232 | if (!acpi_gbl_sci_handler_list) { | ||
233 | return (status); | ||
234 | } | ||
235 | |||
236 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
237 | |||
238 | /* Free all host-installed SCI handlers */ | ||
239 | |||
240 | while (acpi_gbl_sci_handler_list) { | ||
241 | sci_handler = acpi_gbl_sci_handler_list; | ||
242 | acpi_gbl_sci_handler_list = sci_handler->next; | ||
243 | ACPI_FREE(sci_handler); | ||
244 | } | ||
245 | |||
246 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
182 | return_ACPI_STATUS(status); | 247 | return_ACPI_STATUS(status); |
183 | } | 248 | } |
184 | 249 | ||
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index ca5fba99c33b..23a7fadca412 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c | |||
@@ -41,7 +41,8 @@ | |||
41 | * POSSIBILITY OF SUCH DAMAGES. | 41 | * POSSIBILITY OF SUCH DAMAGES. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/export.h> | 44 | #define EXPORT_ACPI_INTERFACES |
45 | |||
45 | #include <acpi/acpi.h> | 46 | #include <acpi/acpi.h> |
46 | #include "accommon.h" | 47 | #include "accommon.h" |
47 | #include "acnamesp.h" | 48 | #include "acnamesp.h" |
@@ -374,7 +375,7 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler) | |||
374 | 375 | ||
375 | acpi_gbl_exception_handler = handler; | 376 | acpi_gbl_exception_handler = handler; |
376 | 377 | ||
377 | cleanup: | 378 | cleanup: |
378 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | 379 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); |
379 | return_ACPI_STATUS(status); | 380 | return_ACPI_STATUS(status); |
380 | } | 381 | } |
@@ -385,6 +386,144 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler) | |||
385 | #if (!ACPI_REDUCED_HARDWARE) | 386 | #if (!ACPI_REDUCED_HARDWARE) |
386 | /******************************************************************************* | 387 | /******************************************************************************* |
387 | * | 388 | * |
389 | * FUNCTION: acpi_install_sci_handler | ||
390 | * | ||
391 | * PARAMETERS: address - Address of the handler | ||
392 | * context - Value passed to the handler on each SCI | ||
393 | * | ||
394 | * RETURN: Status | ||
395 | * | ||
396 | * DESCRIPTION: Install a handler for a System Control Interrupt. | ||
397 | * | ||
398 | ******************************************************************************/ | ||
399 | acpi_status acpi_install_sci_handler(acpi_sci_handler address, void *context) | ||
400 | { | ||
401 | struct acpi_sci_handler_info *new_sci_handler; | ||
402 | struct acpi_sci_handler_info *sci_handler; | ||
403 | acpi_cpu_flags flags; | ||
404 | acpi_status status; | ||
405 | |||
406 | ACPI_FUNCTION_TRACE(acpi_install_sci_handler); | ||
407 | |||
408 | if (!address) { | ||
409 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
410 | } | ||
411 | |||
412 | /* Allocate and init a handler object */ | ||
413 | |||
414 | new_sci_handler = ACPI_ALLOCATE(sizeof(struct acpi_sci_handler_info)); | ||
415 | if (!new_sci_handler) { | ||
416 | return_ACPI_STATUS(AE_NO_MEMORY); | ||
417 | } | ||
418 | |||
419 | new_sci_handler->address = address; | ||
420 | new_sci_handler->context = context; | ||
421 | |||
422 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
423 | if (ACPI_FAILURE(status)) { | ||
424 | goto exit; | ||
425 | } | ||
426 | |||
427 | /* Lock list during installation */ | ||
428 | |||
429 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
430 | sci_handler = acpi_gbl_sci_handler_list; | ||
431 | |||
432 | /* Ensure handler does not already exist */ | ||
433 | |||
434 | while (sci_handler) { | ||
435 | if (address == sci_handler->address) { | ||
436 | status = AE_ALREADY_EXISTS; | ||
437 | goto unlock_and_exit; | ||
438 | } | ||
439 | |||
440 | sci_handler = sci_handler->next; | ||
441 | } | ||
442 | |||
443 | /* Install the new handler into the global list (at head) */ | ||
444 | |||
445 | new_sci_handler->next = acpi_gbl_sci_handler_list; | ||
446 | acpi_gbl_sci_handler_list = new_sci_handler; | ||
447 | |||
448 | unlock_and_exit: | ||
449 | |||
450 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
451 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
452 | |||
453 | exit: | ||
454 | if (ACPI_FAILURE(status)) { | ||
455 | ACPI_FREE(new_sci_handler); | ||
456 | } | ||
457 | return_ACPI_STATUS(status); | ||
458 | } | ||
459 | |||
460 | /******************************************************************************* | ||
461 | * | ||
462 | * FUNCTION: acpi_remove_sci_handler | ||
463 | * | ||
464 | * PARAMETERS: address - Address of the handler | ||
465 | * | ||
466 | * RETURN: Status | ||
467 | * | ||
468 | * DESCRIPTION: Remove a handler for a System Control Interrupt. | ||
469 | * | ||
470 | ******************************************************************************/ | ||
471 | |||
472 | acpi_status acpi_remove_sci_handler(acpi_sci_handler address) | ||
473 | { | ||
474 | struct acpi_sci_handler_info *prev_sci_handler; | ||
475 | struct acpi_sci_handler_info *next_sci_handler; | ||
476 | acpi_cpu_flags flags; | ||
477 | acpi_status status; | ||
478 | |||
479 | ACPI_FUNCTION_TRACE(acpi_remove_sci_handler); | ||
480 | |||
481 | if (!address) { | ||
482 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
483 | } | ||
484 | |||
485 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
486 | if (ACPI_FAILURE(status)) { | ||
487 | return_ACPI_STATUS(status); | ||
488 | } | ||
489 | |||
490 | /* Remove the SCI handler with lock */ | ||
491 | |||
492 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
493 | |||
494 | prev_sci_handler = NULL; | ||
495 | next_sci_handler = acpi_gbl_sci_handler_list; | ||
496 | while (next_sci_handler) { | ||
497 | if (next_sci_handler->address == address) { | ||
498 | |||
499 | /* Unlink and free the SCI handler info block */ | ||
500 | |||
501 | if (prev_sci_handler) { | ||
502 | prev_sci_handler->next = next_sci_handler->next; | ||
503 | } else { | ||
504 | acpi_gbl_sci_handler_list = | ||
505 | next_sci_handler->next; | ||
506 | } | ||
507 | |||
508 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
509 | ACPI_FREE(next_sci_handler); | ||
510 | goto unlock_and_exit; | ||
511 | } | ||
512 | |||
513 | prev_sci_handler = next_sci_handler; | ||
514 | next_sci_handler = next_sci_handler->next; | ||
515 | } | ||
516 | |||
517 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
518 | status = AE_NOT_EXIST; | ||
519 | |||
520 | unlock_and_exit: | ||
521 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
522 | return_ACPI_STATUS(status); | ||
523 | } | ||
524 | |||
525 | /******************************************************************************* | ||
526 | * | ||
388 | * FUNCTION: acpi_install_global_event_handler | 527 | * FUNCTION: acpi_install_global_event_handler |
389 | * | 528 | * |
390 | * PARAMETERS: handler - Pointer to the global event handler function | 529 | * PARAMETERS: handler - Pointer to the global event handler function |
@@ -398,6 +537,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler) | |||
398 | * Can be used to update event counters, etc. | 537 | * Can be used to update event counters, etc. |
399 | * | 538 | * |
400 | ******************************************************************************/ | 539 | ******************************************************************************/ |
540 | |||
401 | acpi_status | 541 | acpi_status |
402 | acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context) | 542 | acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context) |
403 | { | 543 | { |
@@ -426,7 +566,7 @@ acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context) | |||
426 | acpi_gbl_global_event_handler = handler; | 566 | acpi_gbl_global_event_handler = handler; |
427 | acpi_gbl_global_event_handler_context = context; | 567 | acpi_gbl_global_event_handler_context = context; |
428 | 568 | ||
429 | cleanup: | 569 | cleanup: |
430 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | 570 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); |
431 | return_ACPI_STATUS(status); | 571 | return_ACPI_STATUS(status); |
432 | } | 572 | } |
@@ -498,7 +638,7 @@ acpi_install_fixed_event_handler(u32 event, | |||
498 | handler)); | 638 | handler)); |
499 | } | 639 | } |
500 | 640 | ||
501 | cleanup: | 641 | cleanup: |
502 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | 642 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); |
503 | return_ACPI_STATUS(status); | 643 | return_ACPI_STATUS(status); |
504 | } | 644 | } |
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index 7039606a0ba8..39d06af5e347 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c | |||
@@ -41,7 +41,8 @@ | |||
41 | * POSSIBILITY OF SUCH DAMAGES. | 41 | * POSSIBILITY OF SUCH DAMAGES. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/export.h> | 44 | #define EXPORT_ACPI_INTERFACES |
45 | |||
45 | #include <acpi/acpi.h> | 46 | #include <acpi/acpi.h> |
46 | #include "accommon.h" | 47 | #include "accommon.h" |
47 | #include "actables.h" | 48 | #include "actables.h" |
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 7662f1a42ff6..5713da77c665 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c | |||
@@ -41,7 +41,8 @@ | |||
41 | * POSSIBILITY OF SUCH DAMAGES. | 41 | * POSSIBILITY OF SUCH DAMAGES. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/export.h> | 44 | #define EXPORT_ACPI_INTERFACES |
45 | |||
45 | #include <acpi/acpi.h> | 46 | #include <acpi/acpi.h> |
46 | #include "accommon.h" | 47 | #include "accommon.h" |
47 | #include "acevents.h" | 48 | #include "acevents.h" |
@@ -471,7 +472,7 @@ acpi_get_gpe_status(acpi_handle gpe_device, | |||
471 | if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) | 472 | if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) |
472 | *event_status |= ACPI_EVENT_FLAG_HANDLE; | 473 | *event_status |= ACPI_EVENT_FLAG_HANDLE; |
473 | 474 | ||
474 | unlock_and_exit: | 475 | unlock_and_exit: |
475 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | 476 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); |
476 | return_ACPI_STATUS(status); | 477 | return_ACPI_STATUS(status); |
477 | } | 478 | } |
@@ -624,7 +625,7 @@ acpi_install_gpe_block(acpi_handle gpe_device, | |||
624 | 625 | ||
625 | obj_desc->device.gpe_block = gpe_block; | 626 | obj_desc->device.gpe_block = gpe_block; |
626 | 627 | ||
627 | unlock_and_exit: | 628 | unlock_and_exit: |
628 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 629 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); |
629 | return_ACPI_STATUS(status); | 630 | return_ACPI_STATUS(status); |
630 | } | 631 | } |
@@ -679,7 +680,7 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device) | |||
679 | obj_desc->device.gpe_block = NULL; | 680 | obj_desc->device.gpe_block = NULL; |
680 | } | 681 | } |
681 | 682 | ||
682 | unlock_and_exit: | 683 | unlock_and_exit: |
683 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 684 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); |
684 | return_ACPI_STATUS(status); | 685 | return_ACPI_STATUS(status); |
685 | } | 686 | } |
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index 80cecf838591..02ed75ac56cd 100644 --- a/drivers/acpi/acpica/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c | |||
@@ -42,7 +42,8 @@ | |||
42 | * POSSIBILITY OF SUCH DAMAGES. | 42 | * POSSIBILITY OF SUCH DAMAGES. |
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <linux/export.h> | 45 | #define EXPORT_ACPI_INTERFACES |
46 | |||
46 | #include <acpi/acpi.h> | 47 | #include <acpi/acpi.h> |
47 | #include "accommon.h" | 48 | #include "accommon.h" |
48 | #include "acnamesp.h" | 49 | #include "acnamesp.h" |
@@ -147,7 +148,7 @@ acpi_install_address_space_handler(acpi_handle device, | |||
147 | 148 | ||
148 | status = acpi_ev_execute_reg_methods(node, space_id); | 149 | status = acpi_ev_execute_reg_methods(node, space_id); |
149 | 150 | ||
150 | unlock_and_exit: | 151 | unlock_and_exit: |
151 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 152 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); |
152 | return_ACPI_STATUS(status); | 153 | return_ACPI_STATUS(status); |
153 | } | 154 | } |
@@ -286,7 +287,7 @@ acpi_remove_address_space_handler(acpi_handle device, | |||
286 | 287 | ||
287 | status = AE_NOT_EXIST; | 288 | status = AE_NOT_EXIST; |
288 | 289 | ||
289 | unlock_and_exit: | 290 | unlock_and_exit: |
290 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 291 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); |
291 | return_ACPI_STATUS(status); | 292 | return_ACPI_STATUS(status); |
292 | } | 293 | } |
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c index 269e81d86ef4..3c2e6dcdad3e 100644 --- a/drivers/acpi/acpica/excreate.c +++ b/drivers/acpi/acpica/excreate.c | |||
@@ -193,7 +193,7 @@ acpi_status acpi_ex_create_event(struct acpi_walk_state *walk_state) | |||
193 | acpi_ns_attach_object((struct acpi_namespace_node *)walk_state-> | 193 | acpi_ns_attach_object((struct acpi_namespace_node *)walk_state-> |
194 | operands[0], obj_desc, ACPI_TYPE_EVENT); | 194 | operands[0], obj_desc, ACPI_TYPE_EVENT); |
195 | 195 | ||
196 | cleanup: | 196 | cleanup: |
197 | /* | 197 | /* |
198 | * Remove local reference to the object (on error, will cause deletion | 198 | * Remove local reference to the object (on error, will cause deletion |
199 | * of both object and semaphore if present.) | 199 | * of both object and semaphore if present.) |
@@ -248,7 +248,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state) | |||
248 | acpi_ns_attach_object(obj_desc->mutex.node, obj_desc, | 248 | acpi_ns_attach_object(obj_desc->mutex.node, obj_desc, |
249 | ACPI_TYPE_MUTEX); | 249 | ACPI_TYPE_MUTEX); |
250 | 250 | ||
251 | cleanup: | 251 | cleanup: |
252 | /* | 252 | /* |
253 | * Remove local reference to the object (on error, will cause deletion | 253 | * Remove local reference to the object (on error, will cause deletion |
254 | * of both object and semaphore if present.) | 254 | * of both object and semaphore if present.) |
@@ -347,7 +347,7 @@ acpi_ex_create_region(u8 * aml_start, | |||
347 | 347 | ||
348 | status = acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_REGION); | 348 | status = acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_REGION); |
349 | 349 | ||
350 | cleanup: | 350 | cleanup: |
351 | 351 | ||
352 | /* Remove local reference to the object */ | 352 | /* Remove local reference to the object */ |
353 | 353 | ||
@@ -520,7 +520,7 @@ acpi_ex_create_method(u8 * aml_start, | |||
520 | 520 | ||
521 | acpi_ut_remove_reference(obj_desc); | 521 | acpi_ut_remove_reference(obj_desc); |
522 | 522 | ||
523 | exit: | 523 | exit: |
524 | /* Remove a reference to the operand */ | 524 | /* Remove a reference to the operand */ |
525 | 525 | ||
526 | acpi_ut_remove_reference(operand[1]); | 526 | acpi_ut_remove_reference(operand[1]); |
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c index c2a65aaf29af..cfd875243421 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c | |||
@@ -197,7 +197,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state, | |||
197 | status = acpi_ex_extract_from_field(obj_desc, buffer, (u32) length); | 197 | status = acpi_ex_extract_from_field(obj_desc, buffer, (u32) length); |
198 | acpi_ex_release_global_lock(obj_desc->common_field.field_flags); | 198 | acpi_ex_release_global_lock(obj_desc->common_field.field_flags); |
199 | 199 | ||
200 | exit: | 200 | exit: |
201 | if (ACPI_FAILURE(status)) { | 201 | if (ACPI_FAILURE(status)) { |
202 | acpi_ut_remove_reference(buffer_desc); | 202 | acpi_ut_remove_reference(buffer_desc); |
203 | } else { | 203 | } else { |
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index 7e0afe72487e..49fb742d61b9 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c | |||
@@ -123,12 +123,6 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, | |||
123 | } | 123 | } |
124 | } | 124 | } |
125 | 125 | ||
126 | /* Exit if Address/Length have been disallowed by the host OS */ | ||
127 | |||
128 | if (rgn_desc->common.flags & AOPOBJ_INVALID) { | ||
129 | return_ACPI_STATUS(AE_AML_ILLEGAL_ADDRESS); | ||
130 | } | ||
131 | |||
132 | /* | 126 | /* |
133 | * Exit now for SMBus, GSBus or IPMI address space, it has a non-linear | 127 | * Exit now for SMBus, GSBus or IPMI address space, it has a non-linear |
134 | * address space and the request cannot be directly validated | 128 | * address space and the request cannot be directly validated |
@@ -1002,7 +996,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc, | |||
1002 | mask, merged_datum, | 996 | mask, merged_datum, |
1003 | field_offset); | 997 | field_offset); |
1004 | 998 | ||
1005 | exit: | 999 | exit: |
1006 | /* Free temporary buffer if we used one */ | 1000 | /* Free temporary buffer if we used one */ |
1007 | 1001 | ||
1008 | if (new_buffer) { | 1002 | if (new_buffer) { |
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c index 00bf29877574..65d93607f368 100644 --- a/drivers/acpi/acpica/exmisc.c +++ b/drivers/acpi/acpica/exmisc.c | |||
@@ -388,7 +388,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, | |||
388 | 388 | ||
389 | *actual_return_desc = return_desc; | 389 | *actual_return_desc = return_desc; |
390 | 390 | ||
391 | cleanup: | 391 | cleanup: |
392 | if (local_operand1 != operand1) { | 392 | if (local_operand1 != operand1) { |
393 | acpi_ut_remove_reference(local_operand1); | 393 | acpi_ut_remove_reference(local_operand1); |
394 | } | 394 | } |
@@ -718,7 +718,7 @@ acpi_ex_do_logical_op(u16 opcode, | |||
718 | } | 718 | } |
719 | } | 719 | } |
720 | 720 | ||
721 | cleanup: | 721 | cleanup: |
722 | 722 | ||
723 | /* New object was created if implicit conversion performed - delete */ | 723 | /* New object was created if implicit conversion performed - delete */ |
724 | 724 | ||
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index 2cdd41d8ade6..d74cea416ca0 100644 --- a/drivers/acpi/acpica/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c | |||
@@ -115,7 +115,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state) | |||
115 | break; | 115 | break; |
116 | } | 116 | } |
117 | 117 | ||
118 | cleanup: | 118 | cleanup: |
119 | 119 | ||
120 | /* Delete return object on error */ | 120 | /* Delete return object on error */ |
121 | 121 | ||
@@ -234,7 +234,7 @@ acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state) | |||
234 | goto cleanup; | 234 | goto cleanup; |
235 | } | 235 | } |
236 | 236 | ||
237 | cleanup: | 237 | cleanup: |
238 | 238 | ||
239 | return_ACPI_STATUS(status); | 239 | return_ACPI_STATUS(status); |
240 | } | 240 | } |
@@ -551,7 +551,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state) | |||
551 | status = acpi_ex_store(return_desc, operand[1], walk_state); | 551 | status = acpi_ex_store(return_desc, operand[1], walk_state); |
552 | } | 552 | } |
553 | 553 | ||
554 | cleanup: | 554 | cleanup: |
555 | 555 | ||
556 | /* Delete return object on error */ | 556 | /* Delete return object on error */ |
557 | 557 | ||
@@ -1054,7 +1054,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) | |||
1054 | goto cleanup; | 1054 | goto cleanup; |
1055 | } | 1055 | } |
1056 | 1056 | ||
1057 | cleanup: | 1057 | cleanup: |
1058 | 1058 | ||
1059 | /* Delete return object on error */ | 1059 | /* Delete return object on error */ |
1060 | 1060 | ||
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c index d5088f7030c7..d6fa0fce1fc9 100644 --- a/drivers/acpi/acpica/exoparg2.c +++ b/drivers/acpi/acpica/exoparg2.c | |||
@@ -215,7 +215,7 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state) | |||
215 | goto cleanup; | 215 | goto cleanup; |
216 | } | 216 | } |
217 | 217 | ||
218 | cleanup: | 218 | cleanup: |
219 | /* | 219 | /* |
220 | * Since the remainder is not returned indirectly, remove a reference to | 220 | * Since the remainder is not returned indirectly, remove a reference to |
221 | * it. Only the quotient is returned indirectly. | 221 | * it. Only the quotient is returned indirectly. |
@@ -445,7 +445,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state) | |||
445 | break; | 445 | break; |
446 | } | 446 | } |
447 | 447 | ||
448 | store_result_to_target: | 448 | store_result_to_target: |
449 | 449 | ||
450 | if (ACPI_SUCCESS(status)) { | 450 | if (ACPI_SUCCESS(status)) { |
451 | /* | 451 | /* |
@@ -462,7 +462,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state) | |||
462 | } | 462 | } |
463 | } | 463 | } |
464 | 464 | ||
465 | cleanup: | 465 | cleanup: |
466 | 466 | ||
467 | /* Delete return object on error */ | 467 | /* Delete return object on error */ |
468 | 468 | ||
@@ -553,7 +553,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state) | |||
553 | goto cleanup; | 553 | goto cleanup; |
554 | } | 554 | } |
555 | 555 | ||
556 | store_logical_result: | 556 | store_logical_result: |
557 | /* | 557 | /* |
558 | * Set return value to according to logical_result. logical TRUE (all ones) | 558 | * Set return value to according to logical_result. logical TRUE (all ones) |
559 | * Default is FALSE (zero) | 559 | * Default is FALSE (zero) |
@@ -562,7 +562,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state) | |||
562 | return_desc->integer.value = ACPI_UINT64_MAX; | 562 | return_desc->integer.value = ACPI_UINT64_MAX; |
563 | } | 563 | } |
564 | 564 | ||
565 | cleanup: | 565 | cleanup: |
566 | 566 | ||
567 | /* Delete return object on error */ | 567 | /* Delete return object on error */ |
568 | 568 | ||
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c index 37656f12f204..bc042adf8804 100644 --- a/drivers/acpi/acpica/exoparg3.c +++ b/drivers/acpi/acpica/exoparg3.c | |||
@@ -124,7 +124,7 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state) | |||
124 | goto cleanup; | 124 | goto cleanup; |
125 | } | 125 | } |
126 | 126 | ||
127 | cleanup: | 127 | cleanup: |
128 | 128 | ||
129 | return_ACPI_STATUS(status); | 129 | return_ACPI_STATUS(status); |
130 | } | 130 | } |
@@ -252,7 +252,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state) | |||
252 | 252 | ||
253 | status = acpi_ex_store(return_desc, operand[3], walk_state); | 253 | status = acpi_ex_store(return_desc, operand[3], walk_state); |
254 | 254 | ||
255 | cleanup: | 255 | cleanup: |
256 | 256 | ||
257 | /* Delete return object on error */ | 257 | /* Delete return object on error */ |
258 | 258 | ||
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c index 879b6cd8319c..4459e32c683d 100644 --- a/drivers/acpi/acpica/exoparg6.c +++ b/drivers/acpi/acpica/exoparg6.c | |||
@@ -314,7 +314,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state) | |||
314 | goto cleanup; | 314 | goto cleanup; |
315 | } | 315 | } |
316 | 316 | ||
317 | cleanup: | 317 | cleanup: |
318 | 318 | ||
319 | /* Delete return object on error */ | 319 | /* Delete return object on error */ |
320 | 320 | ||
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c index 303429bb4d5d..9d28867e60dc 100644 --- a/drivers/acpi/acpica/exregion.c +++ b/drivers/acpi/acpica/exregion.c | |||
@@ -400,6 +400,7 @@ acpi_ex_pci_config_space_handler(u32 function, | |||
400 | switch (function) { | 400 | switch (function) { |
401 | case ACPI_READ: | 401 | case ACPI_READ: |
402 | 402 | ||
403 | *value = 0; | ||
403 | status = acpi_os_read_pci_configuration(pci_id, pci_register, | 404 | status = acpi_os_read_pci_configuration(pci_id, pci_register, |
404 | value, bit_width); | 405 | value, bit_width); |
405 | break; | 406 | break; |
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c index ac04278ad28f..1606524312e3 100644 --- a/drivers/acpi/acpica/exresolv.c +++ b/drivers/acpi/acpica/exresolv.c | |||
@@ -521,7 +521,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state, | |||
521 | */ | 521 | */ |
522 | type = obj_desc->common.type; | 522 | type = obj_desc->common.type; |
523 | 523 | ||
524 | exit: | 524 | exit: |
525 | /* Convert internal types to external types */ | 525 | /* Convert internal types to external types */ |
526 | 526 | ||
527 | switch (type) { | 527 | switch (type) { |
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c index 00e5af7129c1..be3f66973ee8 100644 --- a/drivers/acpi/acpica/exresop.c +++ b/drivers/acpi/acpica/exresop.c | |||
@@ -683,7 +683,7 @@ acpi_ex_resolve_operands(u16 opcode, | |||
683 | return_ACPI_STATUS(status); | 683 | return_ACPI_STATUS(status); |
684 | } | 684 | } |
685 | 685 | ||
686 | next_operand: | 686 | next_operand: |
687 | /* | 687 | /* |
688 | * If more operands needed, decrement stack_ptr to point | 688 | * If more operands needed, decrement stack_ptr to point |
689 | * to next operand on stack | 689 | * to next operand on stack |
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index 8d2e866be15f..12e6cff54f78 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c | |||
@@ -560,7 +560,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value) | |||
560 | break; | 560 | break; |
561 | } | 561 | } |
562 | 562 | ||
563 | exit: | 563 | exit: |
564 | return_ACPI_STATUS(status); | 564 | return_ACPI_STATUS(status); |
565 | } | 565 | } |
566 | 566 | ||
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c index 2d7d22ebc782..3c498dc1636e 100644 --- a/drivers/acpi/acpica/hwtimer.c +++ b/drivers/acpi/acpica/hwtimer.c | |||
@@ -41,7 +41,8 @@ | |||
41 | * POSSIBILITY OF SUCH DAMAGES. | 41 | * POSSIBILITY OF SUCH DAMAGES. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/export.h> | 44 | #define EXPORT_ACPI_INTERFACES |
45 | |||
45 | #include <acpi/acpi.h> | 46 | #include <acpi/acpi.h> |
46 | #include "accommon.h" | 47 | #include "accommon.h" |
47 | 48 | ||
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index 5ee7a814cd92..b4b47db2dee2 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c | |||
@@ -41,7 +41,8 @@ | |||
41 | * POSSIBILITY OF SUCH DAMAGES. | 41 | * POSSIBILITY OF SUCH DAMAGES. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/export.h> | 44 | #define EXPORT_ACPI_INTERFACES |
45 | |||
45 | #include <acpi/acpi.h> | 46 | #include <acpi/acpi.h> |
46 | #include "accommon.h" | 47 | #include "accommon.h" |
47 | #include "acnamesp.h" | 48 | #include "acnamesp.h" |
@@ -83,11 +84,17 @@ acpi_status acpi_reset(void) | |||
83 | * For I/O space, write directly to the OSL. This bypasses the port | 84 | * For I/O space, write directly to the OSL. This bypasses the port |
84 | * validation mechanism, which may block a valid write to the reset | 85 | * validation mechanism, which may block a valid write to the reset |
85 | * register. | 86 | * register. |
86 | * Spec section 4.7.3.6 requires register width to be 8. | 87 | * |
88 | * NOTE: | ||
89 | * The ACPI spec requires the reset register width to be 8, so we | ||
90 | * hardcode it here and ignore the FADT value. This maintains | ||
91 | * compatibility with other ACPI implementations that have allowed | ||
92 | * BIOS code with bad register width values to go unnoticed. | ||
87 | */ | 93 | */ |
88 | status = | 94 | status = |
89 | acpi_os_write_port((acpi_io_address) reset_reg->address, | 95 | acpi_os_write_port((acpi_io_address) reset_reg->address, |
90 | acpi_gbl_FADT.reset_value, 8); | 96 | acpi_gbl_FADT.reset_value, |
97 | ACPI_RESET_REGISTER_WIDTH); | ||
91 | } else { | 98 | } else { |
92 | /* Write the reset value to the reset register */ | 99 | /* Write the reset value to the reset register */ |
93 | 100 | ||
@@ -119,7 +126,8 @@ ACPI_EXPORT_SYMBOL(acpi_reset) | |||
119 | ******************************************************************************/ | 126 | ******************************************************************************/ |
120 | acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg) | 127 | acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg) |
121 | { | 128 | { |
122 | u32 value; | 129 | u32 value_lo; |
130 | u32 value_hi; | ||
123 | u32 width; | 131 | u32 width; |
124 | u64 address; | 132 | u64 address; |
125 | acpi_status status; | 133 | acpi_status status; |
@@ -137,13 +145,8 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg) | |||
137 | return (status); | 145 | return (status); |
138 | } | 146 | } |
139 | 147 | ||
140 | /* Initialize entire 64-bit return value to zero */ | ||
141 | |||
142 | *return_value = 0; | ||
143 | value = 0; | ||
144 | |||
145 | /* | 148 | /* |
146 | * Two address spaces supported: Memory or IO. PCI_Config is | 149 | * Two address spaces supported: Memory or I/O. PCI_Config is |
147 | * not supported here because the GAS structure is insufficient | 150 | * not supported here because the GAS structure is insufficient |
148 | */ | 151 | */ |
149 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { | 152 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { |
@@ -155,29 +158,35 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg) | |||
155 | } | 158 | } |
156 | } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ | 159 | } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ |
157 | 160 | ||
161 | value_lo = 0; | ||
162 | value_hi = 0; | ||
163 | |||
158 | width = reg->bit_width; | 164 | width = reg->bit_width; |
159 | if (width == 64) { | 165 | if (width == 64) { |
160 | width = 32; /* Break into two 32-bit transfers */ | 166 | width = 32; /* Break into two 32-bit transfers */ |
161 | } | 167 | } |
162 | 168 | ||
163 | status = acpi_hw_read_port((acpi_io_address) | 169 | status = acpi_hw_read_port((acpi_io_address) |
164 | address, &value, width); | 170 | address, &value_lo, width); |
165 | if (ACPI_FAILURE(status)) { | 171 | if (ACPI_FAILURE(status)) { |
166 | return (status); | 172 | return (status); |
167 | } | 173 | } |
168 | *return_value = value; | ||
169 | 174 | ||
170 | if (reg->bit_width == 64) { | 175 | if (reg->bit_width == 64) { |
171 | 176 | ||
172 | /* Read the top 32 bits */ | 177 | /* Read the top 32 bits */ |
173 | 178 | ||
174 | status = acpi_hw_read_port((acpi_io_address) | 179 | status = acpi_hw_read_port((acpi_io_address) |
175 | (address + 4), &value, 32); | 180 | (address + 4), &value_hi, |
181 | 32); | ||
176 | if (ACPI_FAILURE(status)) { | 182 | if (ACPI_FAILURE(status)) { |
177 | return (status); | 183 | return (status); |
178 | } | 184 | } |
179 | *return_value |= ((u64)value << 32); | ||
180 | } | 185 | } |
186 | |||
187 | /* Set the return value only if status is AE_OK */ | ||
188 | |||
189 | *return_value = (value_lo | ((u64)value_hi << 32)); | ||
181 | } | 190 | } |
182 | 191 | ||
183 | ACPI_DEBUG_PRINT((ACPI_DB_IO, | 192 | ACPI_DEBUG_PRINT((ACPI_DB_IO, |
@@ -186,7 +195,7 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg) | |||
186 | ACPI_FORMAT_UINT64(address), | 195 | ACPI_FORMAT_UINT64(address), |
187 | acpi_ut_get_region_name(reg->space_id))); | 196 | acpi_ut_get_region_name(reg->space_id))); |
188 | 197 | ||
189 | return (status); | 198 | return (AE_OK); |
190 | } | 199 | } |
191 | 200 | ||
192 | ACPI_EXPORT_SYMBOL(acpi_read) | 201 | ACPI_EXPORT_SYMBOL(acpi_read) |
@@ -561,10 +570,10 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b) | |||
561 | break; | 570 | break; |
562 | } | 571 | } |
563 | 572 | ||
564 | cleanup1: | 573 | cleanup1: |
565 | acpi_ut_remove_reference(info->return_object); | 574 | acpi_ut_remove_reference(info->return_object); |
566 | 575 | ||
567 | cleanup: | 576 | cleanup: |
568 | if (ACPI_FAILURE(status)) { | 577 | if (ACPI_FAILURE(status)) { |
569 | ACPI_EXCEPTION((AE_INFO, status, | 578 | ACPI_EXCEPTION((AE_INFO, status, |
570 | "While evaluating Sleep State [%s]", | 579 | "While evaluating Sleep State [%s]", |
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index f2e669db8b65..15dddc10fc9b 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c | |||
@@ -41,7 +41,8 @@ | |||
41 | * POSSIBILITY OF SUCH DAMAGES. | 41 | * POSSIBILITY OF SUCH DAMAGES. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/export.h> | 44 | #define EXPORT_ACPI_INTERFACES |
45 | |||
45 | #include <acpi/acpi.h> | 46 | #include <acpi/acpi.h> |
46 | #include "accommon.h" | 47 | #include "accommon.h" |
47 | 48 | ||
@@ -166,7 +167,7 @@ ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector64) | |||
166 | * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED | 167 | * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED |
167 | * | 168 | * |
168 | ******************************************************************************/ | 169 | ******************************************************************************/ |
169 | acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void) | 170 | acpi_status acpi_enter_sleep_state_s4bios(void) |
170 | { | 171 | { |
171 | u32 in_value; | 172 | u32 in_value; |
172 | acpi_status status; | 173 | acpi_status status; |
@@ -360,7 +361,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep) | |||
360 | * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED | 361 | * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED |
361 | * | 362 | * |
362 | ******************************************************************************/ | 363 | ******************************************************************************/ |
363 | acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state) | 364 | acpi_status acpi_enter_sleep_state(u8 sleep_state) |
364 | { | 365 | { |
365 | acpi_status status; | 366 | acpi_status status; |
366 | 367 | ||
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c index c5316e5bd4ab..14f65f6345b9 100644 --- a/drivers/acpi/acpica/nsaccess.c +++ b/drivers/acpi/acpica/nsaccess.c | |||
@@ -240,7 +240,7 @@ acpi_status acpi_ns_root_initialize(void) | |||
240 | } | 240 | } |
241 | } | 241 | } |
242 | 242 | ||
243 | unlock_and_exit: | 243 | unlock_and_exit: |
244 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 244 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); |
245 | 245 | ||
246 | /* Save a handle to "_GPE", it is always present */ | 246 | /* Save a handle to "_GPE", it is always present */ |
@@ -424,8 +424,9 @@ acpi_ns_lookup(union acpi_generic_state *scope_info, | |||
424 | /* Current scope has no parent scope */ | 424 | /* Current scope has no parent scope */ |
425 | 425 | ||
426 | ACPI_ERROR((AE_INFO, | 426 | ACPI_ERROR((AE_INFO, |
427 | "ACPI path has too many parent prefixes (^) " | 427 | "%s: Path has too many parent prefixes (^) " |
428 | "- reached beyond root node")); | 428 | "- reached beyond root node", |
429 | pathname)); | ||
429 | return_ACPI_STATUS(AE_NOT_FOUND); | 430 | return_ACPI_STATUS(AE_NOT_FOUND); |
430 | } | 431 | } |
431 | } | 432 | } |
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c index 7418c77fde8c..48b9c6f12643 100644 --- a/drivers/acpi/acpica/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c | |||
@@ -59,6 +59,17 @@ acpi_ns_dump_one_device(acpi_handle obj_handle, | |||
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) | 61 | #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) |
62 | |||
63 | #ifdef ACPI_FUTURE_USAGE | ||
64 | static acpi_status | ||
65 | acpi_ns_dump_one_object_path(acpi_handle obj_handle, | ||
66 | u32 level, void *context, void **return_value); | ||
67 | |||
68 | static acpi_status | ||
69 | acpi_ns_get_max_depth(acpi_handle obj_handle, | ||
70 | u32 level, void *context, void **return_value); | ||
71 | #endif /* ACPI_FUTURE_USAGE */ | ||
72 | |||
62 | /******************************************************************************* | 73 | /******************************************************************************* |
63 | * | 74 | * |
64 | * FUNCTION: acpi_ns_print_pathname | 75 | * FUNCTION: acpi_ns_print_pathname |
@@ -609,7 +620,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, | |||
609 | obj_type = ACPI_TYPE_INVALID; /* Terminate loop after next pass */ | 620 | obj_type = ACPI_TYPE_INVALID; /* Terminate loop after next pass */ |
610 | } | 621 | } |
611 | 622 | ||
612 | cleanup: | 623 | cleanup: |
613 | acpi_os_printf("\n"); | 624 | acpi_os_printf("\n"); |
614 | return (AE_OK); | 625 | return (AE_OK); |
615 | } | 626 | } |
@@ -671,6 +682,136 @@ acpi_ns_dump_objects(acpi_object_type type, | |||
671 | } | 682 | } |
672 | #endif /* ACPI_FUTURE_USAGE */ | 683 | #endif /* ACPI_FUTURE_USAGE */ |
673 | 684 | ||
685 | #ifdef ACPI_FUTURE_USAGE | ||
686 | /******************************************************************************* | ||
687 | * | ||
688 | * FUNCTION: acpi_ns_dump_one_object_path, acpi_ns_get_max_depth | ||
689 | * | ||
690 | * PARAMETERS: obj_handle - Node to be dumped | ||
691 | * level - Nesting level of the handle | ||
692 | * context - Passed into walk_namespace | ||
693 | * return_value - Not used | ||
694 | * | ||
695 | * RETURN: Status | ||
696 | * | ||
697 | * DESCRIPTION: Dump the full pathname to a namespace object. acp_ns_get_max_depth | ||
698 | * computes the maximum nesting depth in the namespace tree, in | ||
699 | * order to simplify formatting in acpi_ns_dump_one_object_path. | ||
700 | * These procedures are user_functions called by acpi_ns_walk_namespace. | ||
701 | * | ||
702 | ******************************************************************************/ | ||
703 | |||
704 | static acpi_status | ||
705 | acpi_ns_dump_one_object_path(acpi_handle obj_handle, | ||
706 | u32 level, void *context, void **return_value) | ||
707 | { | ||
708 | u32 max_level = *((u32 *)context); | ||
709 | char *pathname; | ||
710 | struct acpi_namespace_node *node; | ||
711 | int path_indent; | ||
712 | |||
713 | if (!obj_handle) { | ||
714 | return (AE_OK); | ||
715 | } | ||
716 | |||
717 | node = acpi_ns_validate_handle(obj_handle); | ||
718 | if (!node) { | ||
719 | |||
720 | /* Ignore bad node during namespace walk */ | ||
721 | |||
722 | return (AE_OK); | ||
723 | } | ||
724 | |||
725 | pathname = acpi_ns_get_external_pathname(node); | ||
726 | |||
727 | path_indent = 1; | ||
728 | if (level <= max_level) { | ||
729 | path_indent = max_level - level + 1; | ||
730 | } | ||
731 | |||
732 | acpi_os_printf("%2d%*s%-12s%*s", | ||
733 | level, level, " ", acpi_ut_get_type_name(node->type), | ||
734 | path_indent, " "); | ||
735 | |||
736 | acpi_os_printf("%s\n", &pathname[1]); | ||
737 | ACPI_FREE(pathname); | ||
738 | return (AE_OK); | ||
739 | } | ||
740 | |||
741 | static acpi_status | ||
742 | acpi_ns_get_max_depth(acpi_handle obj_handle, | ||
743 | u32 level, void *context, void **return_value) | ||
744 | { | ||
745 | u32 *max_level = (u32 *)context; | ||
746 | |||
747 | if (level > *max_level) { | ||
748 | *max_level = level; | ||
749 | } | ||
750 | return (AE_OK); | ||
751 | } | ||
752 | |||
753 | /******************************************************************************* | ||
754 | * | ||
755 | * FUNCTION: acpi_ns_dump_object_paths | ||
756 | * | ||
757 | * PARAMETERS: type - Object type to be dumped | ||
758 | * display_type - 0 or ACPI_DISPLAY_SUMMARY | ||
759 | * max_depth - Maximum depth of dump. Use ACPI_UINT32_MAX | ||
760 | * for an effectively unlimited depth. | ||
761 | * owner_id - Dump only objects owned by this ID. Use | ||
762 | * ACPI_UINT32_MAX to match all owners. | ||
763 | * start_handle - Where in namespace to start/end search | ||
764 | * | ||
765 | * RETURN: None | ||
766 | * | ||
767 | * DESCRIPTION: Dump full object pathnames within the loaded namespace. Uses | ||
768 | * acpi_ns_walk_namespace in conjunction with acpi_ns_dump_one_object_path. | ||
769 | * | ||
770 | ******************************************************************************/ | ||
771 | |||
772 | void | ||
773 | acpi_ns_dump_object_paths(acpi_object_type type, | ||
774 | u8 display_type, | ||
775 | u32 max_depth, | ||
776 | acpi_owner_id owner_id, acpi_handle start_handle) | ||
777 | { | ||
778 | acpi_status status; | ||
779 | u32 max_level = 0; | ||
780 | |||
781 | ACPI_FUNCTION_ENTRY(); | ||
782 | |||
783 | /* | ||
784 | * Just lock the entire namespace for the duration of the dump. | ||
785 | * We don't want any changes to the namespace during this time, | ||
786 | * especially the temporary nodes since we are going to display | ||
787 | * them also. | ||
788 | */ | ||
789 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
790 | if (ACPI_FAILURE(status)) { | ||
791 | acpi_os_printf("Could not acquire namespace mutex\n"); | ||
792 | return; | ||
793 | } | ||
794 | |||
795 | /* Get the max depth of the namespace tree, for formatting later */ | ||
796 | |||
797 | (void)acpi_ns_walk_namespace(type, start_handle, max_depth, | ||
798 | ACPI_NS_WALK_NO_UNLOCK | | ||
799 | ACPI_NS_WALK_TEMP_NODES, | ||
800 | acpi_ns_get_max_depth, NULL, | ||
801 | (void *)&max_level, NULL); | ||
802 | |||
803 | /* Now dump the entire namespace */ | ||
804 | |||
805 | (void)acpi_ns_walk_namespace(type, start_handle, max_depth, | ||
806 | ACPI_NS_WALK_NO_UNLOCK | | ||
807 | ACPI_NS_WALK_TEMP_NODES, | ||
808 | acpi_ns_dump_one_object_path, NULL, | ||
809 | (void *)&max_level, NULL); | ||
810 | |||
811 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
812 | } | ||
813 | #endif /* ACPI_FUTURE_USAGE */ | ||
814 | |||
674 | /******************************************************************************* | 815 | /******************************************************************************* |
675 | * | 816 | * |
676 | * FUNCTION: acpi_ns_dump_entry | 817 | * FUNCTION: acpi_ns_dump_entry |
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c index 409ae80824d1..283762511b73 100644 --- a/drivers/acpi/acpica/nsdumpdv.c +++ b/drivers/acpi/acpica/nsdumpdv.c | |||
@@ -69,6 +69,7 @@ static acpi_status | |||
69 | acpi_ns_dump_one_device(acpi_handle obj_handle, | 69 | acpi_ns_dump_one_device(acpi_handle obj_handle, |
70 | u32 level, void *context, void **return_value) | 70 | u32 level, void *context, void **return_value) |
71 | { | 71 | { |
72 | struct acpi_buffer buffer; | ||
72 | struct acpi_device_info *info; | 73 | struct acpi_device_info *info; |
73 | acpi_status status; | 74 | acpi_status status; |
74 | u32 i; | 75 | u32 i; |
@@ -78,15 +79,17 @@ acpi_ns_dump_one_device(acpi_handle obj_handle, | |||
78 | status = | 79 | status = |
79 | acpi_ns_dump_one_object(obj_handle, level, context, return_value); | 80 | acpi_ns_dump_one_object(obj_handle, level, context, return_value); |
80 | 81 | ||
81 | status = acpi_get_object_info(obj_handle, &info); | 82 | buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; |
83 | status = acpi_get_object_info(obj_handle, &buffer); | ||
82 | if (ACPI_SUCCESS(status)) { | 84 | if (ACPI_SUCCESS(status)) { |
85 | info = buffer.pointer; | ||
83 | for (i = 0; i < level; i++) { | 86 | for (i = 0; i < level; i++) { |
84 | ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES, " ")); | 87 | ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES, " ")); |
85 | } | 88 | } |
86 | 89 | ||
87 | ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES, | 90 | ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES, |
88 | " HID: %s, ADR: %8.8X%8.8X, Status: %X\n", | 91 | " HID: %s, ADR: %8.8X%8.8X, Status: %X\n", |
89 | info->hardware_id.string, | 92 | info->hardware_id.value, |
90 | ACPI_FORMAT_UINT64(info->address), | 93 | ACPI_FORMAT_UINT64(info->address), |
91 | info->current_status)); | 94 | info->current_status)); |
92 | ACPI_FREE(info); | 95 | ACPI_FREE(info); |
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c index 18108bc2e51c..963ceef063f8 100644 --- a/drivers/acpi/acpica/nseval.c +++ b/drivers/acpi/acpica/nseval.c | |||
@@ -314,7 +314,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info) | |||
314 | "*** Completed evaluation of object %s ***\n", | 314 | "*** Completed evaluation of object %s ***\n", |
315 | info->relative_pathname)); | 315 | info->relative_pathname)); |
316 | 316 | ||
317 | cleanup: | 317 | cleanup: |
318 | /* | 318 | /* |
319 | * Namespace was unlocked by the handling acpi_ns* function, so we | 319 | * Namespace was unlocked by the handling acpi_ns* function, so we |
320 | * just free the pathname and return | 320 | * just free the pathname and return |
@@ -486,7 +486,7 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj, | |||
486 | parent_node->type = (u8)type; | 486 | parent_node->type = (u8)type; |
487 | } | 487 | } |
488 | 488 | ||
489 | exit: | 489 | exit: |
490 | if (parent_obj) { | 490 | if (parent_obj) { |
491 | acpi_ut_remove_reference(parent_obj); | 491 | acpi_ut_remove_reference(parent_obj); |
492 | } | 492 | } |
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c index dd2ceae3f717..3a0423af968c 100644 --- a/drivers/acpi/acpica/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c | |||
@@ -213,7 +213,7 @@ acpi_status acpi_ns_initialize_devices(void) | |||
213 | 213 | ||
214 | return_ACPI_STATUS(status); | 214 | return_ACPI_STATUS(status); |
215 | 215 | ||
216 | error_exit: | 216 | error_exit: |
217 | ACPI_EXCEPTION((AE_INFO, status, "During device initialization")); | 217 | ACPI_EXCEPTION((AE_INFO, status, "During device initialization")); |
218 | return_ACPI_STATUS(status); | 218 | return_ACPI_STATUS(status); |
219 | } | 219 | } |
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c index 0a7badc3179f..89ec645e7730 100644 --- a/drivers/acpi/acpica/nsload.c +++ b/drivers/acpi/acpica/nsload.c | |||
@@ -114,7 +114,7 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node) | |||
114 | (void)acpi_tb_release_owner_id(table_index); | 114 | (void)acpi_tb_release_owner_id(table_index); |
115 | } | 115 | } |
116 | 116 | ||
117 | unlock: | 117 | unlock: |
118 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 118 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); |
119 | 119 | ||
120 | if (ACPI_FAILURE(status)) { | 120 | if (ACPI_FAILURE(status)) { |
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c index 35dde8151c0d..177857340271 100644 --- a/drivers/acpi/acpica/nsparse.c +++ b/drivers/acpi/acpica/nsparse.c | |||
@@ -140,7 +140,7 @@ acpi_ns_one_complete_parse(u32 pass_number, | |||
140 | pass_number)); | 140 | pass_number)); |
141 | status = acpi_ps_parse_aml(walk_state); | 141 | status = acpi_ps_parse_aml(walk_state); |
142 | 142 | ||
143 | cleanup: | 143 | cleanup: |
144 | acpi_ps_delete_parse_tree(parse_root); | 144 | acpi_ps_delete_parse_tree(parse_root); |
145 | return_ACPI_STATUS(status); | 145 | return_ACPI_STATUS(status); |
146 | } | 146 | } |
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c index 098e7666cbc9..d2855d9857c4 100644 --- a/drivers/acpi/acpica/nspredef.c +++ b/drivers/acpi/acpica/nspredef.c | |||
@@ -271,7 +271,7 @@ acpi_ns_check_object_type(struct acpi_evaluate_info *info, | |||
271 | return (AE_OK); /* Successful repair */ | 271 | return (AE_OK); /* Successful repair */ |
272 | } | 272 | } |
273 | 273 | ||
274 | type_error_exit: | 274 | type_error_exit: |
275 | 275 | ||
276 | /* Create a string with all expected types for this predefined object */ | 276 | /* Create a string with all expected types for this predefined object */ |
277 | 277 | ||
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c index 6d55cef7916c..3d5391f9bcb5 100644 --- a/drivers/acpi/acpica/nsprepkg.c +++ b/drivers/acpi/acpica/nsprepkg.c | |||
@@ -330,7 +330,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info, | |||
330 | 330 | ||
331 | return (status); | 331 | return (status); |
332 | 332 | ||
333 | package_too_small: | 333 | package_too_small: |
334 | 334 | ||
335 | /* Error exit for the case with an incorrect package count */ | 335 | /* Error exit for the case with an incorrect package count */ |
336 | 336 | ||
@@ -555,7 +555,7 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info, | |||
555 | 555 | ||
556 | return (AE_OK); | 556 | return (AE_OK); |
557 | 557 | ||
558 | package_too_small: | 558 | package_too_small: |
559 | 559 | ||
560 | /* The sub-package count was smaller than required */ | 560 | /* The sub-package count was smaller than required */ |
561 | 561 | ||
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c index f8e71ea60319..a05afff50eb9 100644 --- a/drivers/acpi/acpica/nsrepair.c +++ b/drivers/acpi/acpica/nsrepair.c | |||
@@ -263,7 +263,7 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info, | |||
263 | 263 | ||
264 | return (AE_AML_OPERAND_TYPE); | 264 | return (AE_AML_OPERAND_TYPE); |
265 | 265 | ||
266 | object_repaired: | 266 | object_repaired: |
267 | 267 | ||
268 | /* Object was successfully repaired */ | 268 | /* Object was successfully repaired */ |
269 | 269 | ||
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c index c84603ee83ae..6a25d320b169 100644 --- a/drivers/acpi/acpica/nsrepair2.c +++ b/drivers/acpi/acpica/nsrepair2.c | |||
@@ -478,7 +478,7 @@ acpi_ns_repair_CST(struct acpi_evaluate_info *info, | |||
478 | removing = TRUE; | 478 | removing = TRUE; |
479 | } | 479 | } |
480 | 480 | ||
481 | remove_element: | 481 | remove_element: |
482 | if (removing) { | 482 | if (removing) { |
483 | acpi_ns_remove_element(return_object, i + 1); | 483 | acpi_ns_remove_element(return_object, i + 1); |
484 | outer_element_count--; | 484 | outer_element_count--; |
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c index 5d43efc53a61..47420faef073 100644 --- a/drivers/acpi/acpica/nssearch.c +++ b/drivers/acpi/acpica/nssearch.c | |||
@@ -381,7 +381,8 @@ acpi_ns_search_and_enter(u32 target_name, | |||
381 | 381 | ||
382 | /* Node is an object defined by an External() statement */ | 382 | /* Node is an object defined by an External() statement */ |
383 | 383 | ||
384 | if (flags & ACPI_NS_EXTERNAL) { | 384 | if (flags & ACPI_NS_EXTERNAL || |
385 | (walk_state && walk_state->opcode == AML_SCOPE_OP)) { | ||
385 | new_node->flags |= ANOBJ_IS_EXTERNAL; | 386 | new_node->flags |= ANOBJ_IS_EXTERNAL; |
386 | } | 387 | } |
387 | #endif | 388 | #endif |
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c index 08c0b5beec88..cc2fea94c5f0 100644 --- a/drivers/acpi/acpica/nsutils.c +++ b/drivers/acpi/acpica/nsutils.c | |||
@@ -722,7 +722,7 @@ acpi_ns_get_node(struct acpi_namespace_node *prefix_node, | |||
722 | 722 | ||
723 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 723 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); |
724 | 724 | ||
725 | cleanup: | 725 | cleanup: |
726 | ACPI_FREE(internal_path); | 726 | ACPI_FREE(internal_path); |
727 | return_ACPI_STATUS(status); | 727 | return_ACPI_STATUS(status); |
728 | } | 728 | } |
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c index b38b4b07f86e..e973e311f856 100644 --- a/drivers/acpi/acpica/nsxfeval.c +++ b/drivers/acpi/acpica/nsxfeval.c | |||
@@ -42,7 +42,8 @@ | |||
42 | * POSSIBILITY OF SUCH DAMAGES. | 42 | * POSSIBILITY OF SUCH DAMAGES. |
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <linux/export.h> | 45 | #define EXPORT_ACPI_INTERFACES |
46 | |||
46 | #include <acpi/acpi.h> | 47 | #include <acpi/acpi.h> |
47 | #include "accommon.h" | 48 | #include "accommon.h" |
48 | #include "acnamesp.h" | 49 | #include "acnamesp.h" |
@@ -138,7 +139,7 @@ acpi_evaluate_object_typed(acpi_handle handle, | |||
138 | 139 | ||
139 | /* Caller used ACPI_ALLOCATE_BUFFER, free the return buffer */ | 140 | /* Caller used ACPI_ALLOCATE_BUFFER, free the return buffer */ |
140 | 141 | ||
141 | ACPI_FREE(return_buffer->pointer); | 142 | ACPI_FREE_BUFFER(*return_buffer); |
142 | return_buffer->pointer = NULL; | 143 | return_buffer->pointer = NULL; |
143 | } | 144 | } |
144 | 145 | ||
@@ -441,7 +442,7 @@ acpi_evaluate_object(acpi_handle handle, | |||
441 | acpi_ex_exit_interpreter(); | 442 | acpi_ex_exit_interpreter(); |
442 | } | 443 | } |
443 | 444 | ||
444 | cleanup: | 445 | cleanup: |
445 | 446 | ||
446 | /* Free the input parameter list (if we created one) */ | 447 | /* Free the input parameter list (if we created one) */ |
447 | 448 | ||
@@ -605,14 +606,22 @@ acpi_walk_namespace(acpi_object_type type, | |||
605 | goto unlock_and_exit; | 606 | goto unlock_and_exit; |
606 | } | 607 | } |
607 | 608 | ||
609 | /* Now we can validate the starting node */ | ||
610 | |||
611 | if (!acpi_ns_validate_handle(start_object)) { | ||
612 | status = AE_BAD_PARAMETER; | ||
613 | goto unlock_and_exit2; | ||
614 | } | ||
615 | |||
608 | status = acpi_ns_walk_namespace(type, start_object, max_depth, | 616 | status = acpi_ns_walk_namespace(type, start_object, max_depth, |
609 | ACPI_NS_WALK_UNLOCK, | 617 | ACPI_NS_WALK_UNLOCK, |
610 | descending_callback, ascending_callback, | 618 | descending_callback, ascending_callback, |
611 | context, return_value); | 619 | context, return_value); |
612 | 620 | ||
621 | unlock_and_exit2: | ||
613 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 622 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); |
614 | 623 | ||
615 | unlock_and_exit: | 624 | unlock_and_exit: |
616 | (void)acpi_ut_release_read_lock(&acpi_gbl_namespace_rw_lock); | 625 | (void)acpi_ut_release_read_lock(&acpi_gbl_namespace_rw_lock); |
617 | return_ACPI_STATUS(status); | 626 | return_ACPI_STATUS(status); |
618 | } | 627 | } |
@@ -856,7 +865,7 @@ acpi_attach_data(acpi_handle obj_handle, | |||
856 | 865 | ||
857 | status = acpi_ns_attach_data(node, handler, data); | 866 | status = acpi_ns_attach_data(node, handler, data); |
858 | 867 | ||
859 | unlock_and_exit: | 868 | unlock_and_exit: |
860 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 869 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); |
861 | return (status); | 870 | return (status); |
862 | } | 871 | } |
@@ -902,7 +911,7 @@ acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler) | |||
902 | 911 | ||
903 | status = acpi_ns_detach_data(node, handler); | 912 | status = acpi_ns_detach_data(node, handler); |
904 | 913 | ||
905 | unlock_and_exit: | 914 | unlock_and_exit: |
906 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 915 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); |
907 | return (status); | 916 | return (status); |
908 | } | 917 | } |
@@ -949,7 +958,7 @@ acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data) | |||
949 | 958 | ||
950 | status = acpi_ns_get_attached_data(node, handler, data); | 959 | status = acpi_ns_get_attached_data(node, handler, data); |
951 | 960 | ||
952 | unlock_and_exit: | 961 | unlock_and_exit: |
953 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 962 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); |
954 | return (status); | 963 | return (status); |
955 | } | 964 | } |
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c index 83c164434580..3a4bd3ff49a3 100644 --- a/drivers/acpi/acpica/nsxfname.c +++ b/drivers/acpi/acpica/nsxfname.c | |||
@@ -42,7 +42,8 @@ | |||
42 | * POSSIBILITY OF SUCH DAMAGES. | 42 | * POSSIBILITY OF SUCH DAMAGES. |
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <linux/export.h> | 45 | #define EXPORT_ACPI_INTERFACES |
46 | |||
46 | #include <acpi/acpi.h> | 47 | #include <acpi/acpi.h> |
47 | #include "accommon.h" | 48 | #include "accommon.h" |
48 | #include "acnamesp.h" | 49 | #include "acnamesp.h" |
@@ -208,7 +209,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer) | |||
208 | ((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0; | 209 | ((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0; |
209 | status = AE_OK; | 210 | status = AE_OK; |
210 | 211 | ||
211 | unlock_and_exit: | 212 | unlock_and_exit: |
212 | 213 | ||
213 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 214 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); |
214 | return (status); | 215 | return (status); |
@@ -496,7 +497,7 @@ acpi_get_object_info(acpi_handle handle, | |||
496 | *return_buffer = info; | 497 | *return_buffer = info; |
497 | status = AE_OK; | 498 | status = AE_OK; |
498 | 499 | ||
499 | cleanup: | 500 | cleanup: |
500 | if (hid) { | 501 | if (hid) { |
501 | ACPI_FREE(hid); | 502 | ACPI_FREE(hid); |
502 | } | 503 | } |
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c index c0853ef294e4..0e6d79e462d4 100644 --- a/drivers/acpi/acpica/nsxfobj.c +++ b/drivers/acpi/acpica/nsxfobj.c | |||
@@ -42,7 +42,8 @@ | |||
42 | * POSSIBILITY OF SUCH DAMAGES. | 42 | * POSSIBILITY OF SUCH DAMAGES. |
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <linux/export.h> | 45 | #define EXPORT_ACPI_INTERFACES |
46 | |||
46 | #include <acpi/acpi.h> | 47 | #include <acpi/acpi.h> |
47 | #include "accommon.h" | 48 | #include "accommon.h" |
48 | #include "acnamesp.h" | 49 | #include "acnamesp.h" |
@@ -200,7 +201,7 @@ acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle) | |||
200 | status = AE_NULL_ENTRY; | 201 | status = AE_NULL_ENTRY; |
201 | } | 202 | } |
202 | 203 | ||
203 | unlock_and_exit: | 204 | unlock_and_exit: |
204 | 205 | ||
205 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 206 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); |
206 | return (status); | 207 | return (status); |
@@ -280,7 +281,7 @@ acpi_get_next_object(acpi_object_type type, | |||
280 | *ret_handle = ACPI_CAST_PTR(acpi_handle, node); | 281 | *ret_handle = ACPI_CAST_PTR(acpi_handle, node); |
281 | } | 282 | } |
282 | 283 | ||
283 | unlock_and_exit: | 284 | unlock_and_exit: |
284 | 285 | ||
285 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 286 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); |
286 | return (status); | 287 | return (status); |
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c index 86198a9139b5..79d9a28dedef 100644 --- a/drivers/acpi/acpica/psparse.c +++ b/drivers/acpi/acpica/psparse.c | |||
@@ -297,7 +297,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state, | |||
297 | } | 297 | } |
298 | } | 298 | } |
299 | 299 | ||
300 | cleanup: | 300 | cleanup: |
301 | 301 | ||
302 | /* Now we can actually delete the subtree rooted at Op */ | 302 | /* Now we can actually delete the subtree rooted at Op */ |
303 | 303 | ||
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c index 11b99ab20bb3..fcb7a840e996 100644 --- a/drivers/acpi/acpica/psxface.c +++ b/drivers/acpi/acpica/psxface.c | |||
@@ -142,7 +142,7 @@ static void acpi_ps_start_trace(struct acpi_evaluate_info *info) | |||
142 | acpi_dbg_layer = acpi_gbl_trace_dbg_layer; | 142 | acpi_dbg_layer = acpi_gbl_trace_dbg_layer; |
143 | } | 143 | } |
144 | 144 | ||
145 | exit: | 145 | exit: |
146 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 146 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); |
147 | } | 147 | } |
148 | 148 | ||
@@ -185,7 +185,7 @@ static void acpi_ps_stop_trace(struct acpi_evaluate_info *info) | |||
185 | acpi_dbg_level = acpi_gbl_original_dbg_level; | 185 | acpi_dbg_level = acpi_gbl_original_dbg_level; |
186 | acpi_dbg_layer = acpi_gbl_original_dbg_layer; | 186 | acpi_dbg_layer = acpi_gbl_original_dbg_layer; |
187 | 187 | ||
188 | exit: | 188 | exit: |
189 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 189 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); |
190 | } | 190 | } |
191 | 191 | ||
@@ -323,7 +323,7 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info) | |||
323 | 323 | ||
324 | /* walk_state was deleted by parse_aml */ | 324 | /* walk_state was deleted by parse_aml */ |
325 | 325 | ||
326 | cleanup: | 326 | cleanup: |
327 | acpi_ps_delete_parse_tree(op); | 327 | acpi_ps_delete_parse_tree(op); |
328 | 328 | ||
329 | /* End optional tracing */ | 329 | /* End optional tracing */ |
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c index 80d12994e0d0..c99cec9cefde 100644 --- a/drivers/acpi/acpica/rsmisc.c +++ b/drivers/acpi/acpica/rsmisc.c | |||
@@ -440,7 +440,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource, | |||
440 | info++; | 440 | info++; |
441 | } | 441 | } |
442 | 442 | ||
443 | exit: | 443 | exit: |
444 | if (!flags_mode) { | 444 | if (!flags_mode) { |
445 | 445 | ||
446 | /* Round the resource struct length up to the next boundary (32 or 64) */ | 446 | /* Round the resource struct length up to the next boundary (32 or 64) */ |
@@ -783,7 +783,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource, | |||
783 | info++; | 783 | info++; |
784 | } | 784 | } |
785 | 785 | ||
786 | exit: | 786 | exit: |
787 | return_ACPI_STATUS(AE_OK); | 787 | return_ACPI_STATUS(AE_OK); |
788 | } | 788 | } |
789 | 789 | ||
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c index 480b6b40c5ea..aef303d56d86 100644 --- a/drivers/acpi/acpica/rsutils.c +++ b/drivers/acpi/acpica/rsutils.c | |||
@@ -784,7 +784,7 @@ acpi_rs_set_srs_method_data(struct acpi_namespace_node *node, | |||
784 | 784 | ||
785 | acpi_ut_remove_reference(args[0]); | 785 | acpi_ut_remove_reference(args[0]); |
786 | 786 | ||
787 | cleanup: | 787 | cleanup: |
788 | ACPI_FREE(info); | 788 | ACPI_FREE(info); |
789 | return_ACPI_STATUS(status); | 789 | return_ACPI_STATUS(status); |
790 | } | 790 | } |
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c index 94e3517554f9..01e476988aae 100644 --- a/drivers/acpi/acpica/rsxface.c +++ b/drivers/acpi/acpica/rsxface.c | |||
@@ -41,7 +41,8 @@ | |||
41 | * POSSIBILITY OF SUCH DAMAGES. | 41 | * POSSIBILITY OF SUCH DAMAGES. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/export.h> | 44 | #define EXPORT_ACPI_INTERFACES |
45 | |||
45 | #include <acpi/acpi.h> | 46 | #include <acpi/acpi.h> |
46 | #include "accommon.h" | 47 | #include "accommon.h" |
47 | #include "acresrc.h" | 48 | #include "acresrc.h" |
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 42a13c0d7015..634357d51fe9 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c | |||
@@ -80,16 +80,10 @@ acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc) | |||
80 | } | 80 | } |
81 | } | 81 | } |
82 | 82 | ||
83 | /* FACS is the odd table, has no standard ACPI header and no checksum */ | 83 | /* Always calculate checksum, ignore bad checksum if requested */ |
84 | 84 | ||
85 | if (!ACPI_COMPARE_NAME(&table_desc->signature, ACPI_SIG_FACS)) { | 85 | status = |
86 | 86 | acpi_tb_verify_checksum(table_desc->pointer, table_desc->length); | |
87 | /* Always calculate checksum, ignore bad checksum if requested */ | ||
88 | |||
89 | status = | ||
90 | acpi_tb_verify_checksum(table_desc->pointer, | ||
91 | table_desc->length); | ||
92 | } | ||
93 | 87 | ||
94 | return_ACPI_STATUS(status); | 88 | return_ACPI_STATUS(status); |
95 | } | 89 | } |
@@ -237,10 +231,10 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index) | |||
237 | goto release; | 231 | goto release; |
238 | } | 232 | } |
239 | 233 | ||
240 | print_header: | 234 | print_header: |
241 | acpi_tb_print_table_header(table_desc->address, table_desc->pointer); | 235 | acpi_tb_print_table_header(table_desc->address, table_desc->pointer); |
242 | 236 | ||
243 | release: | 237 | release: |
244 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | 238 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); |
245 | return_ACPI_STATUS(status); | 239 | return_ACPI_STATUS(status); |
246 | } | 240 | } |
@@ -312,7 +306,7 @@ struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header | |||
312 | 306 | ||
313 | return (NULL); /* There was no override */ | 307 | return (NULL); /* There was no override */ |
314 | 308 | ||
315 | finish_override: | 309 | finish_override: |
316 | 310 | ||
317 | ACPI_INFO((AE_INFO, | 311 | ACPI_INFO((AE_INFO, |
318 | "%4.4s %p %s table override, new table: %p", | 312 | "%4.4s %p %s table override, new table: %p", |
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c index dc963f823d2c..6866e767ba90 100644 --- a/drivers/acpi/acpica/tbprint.c +++ b/drivers/acpi/acpica/tbprint.c | |||
@@ -135,10 +135,10 @@ acpi_tb_print_table_header(acpi_physical_address address, | |||
135 | 135 | ||
136 | /* FACS only has signature and length fields */ | 136 | /* FACS only has signature and length fields */ |
137 | 137 | ||
138 | ACPI_INFO((AE_INFO, "%4.4s %p %05X", | 138 | ACPI_INFO((AE_INFO, "%4.4s %p %06X", |
139 | header->signature, ACPI_CAST_PTR(void, address), | 139 | header->signature, ACPI_CAST_PTR(void, address), |
140 | header->length)); | 140 | header->length)); |
141 | } else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) { | 141 | } else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) { |
142 | 142 | ||
143 | /* RSDP has no common fields */ | 143 | /* RSDP has no common fields */ |
144 | 144 | ||
@@ -147,7 +147,7 @@ acpi_tb_print_table_header(acpi_physical_address address, | |||
147 | header)->oem_id, ACPI_OEM_ID_SIZE); | 147 | header)->oem_id, ACPI_OEM_ID_SIZE); |
148 | acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE); | 148 | acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE); |
149 | 149 | ||
150 | ACPI_INFO((AE_INFO, "RSDP %p %05X (v%.2d %6.6s)", | 150 | ACPI_INFO((AE_INFO, "RSDP %p %06X (v%.2d %6.6s)", |
151 | ACPI_CAST_PTR(void, address), | 151 | ACPI_CAST_PTR(void, address), |
152 | (ACPI_CAST_PTR(struct acpi_table_rsdp, header)-> | 152 | (ACPI_CAST_PTR(struct acpi_table_rsdp, header)-> |
153 | revision > | 153 | revision > |
@@ -162,7 +162,7 @@ acpi_tb_print_table_header(acpi_physical_address address, | |||
162 | acpi_tb_cleanup_table_header(&local_header, header); | 162 | acpi_tb_cleanup_table_header(&local_header, header); |
163 | 163 | ||
164 | ACPI_INFO((AE_INFO, | 164 | ACPI_INFO((AE_INFO, |
165 | "%4.4s %p %05X (v%.2d %6.6s %8.8s %08X %4.4s %08X)", | 165 | "%4.4s %p %06X (v%.2d %6.6s %8.8s %08X %4.4s %08X)", |
166 | local_header.signature, ACPI_CAST_PTR(void, address), | 166 | local_header.signature, ACPI_CAST_PTR(void, address), |
167 | local_header.length, local_header.revision, | 167 | local_header.length, local_header.revision, |
168 | local_header.oem_id, local_header.oem_table_id, | 168 | local_header.oem_id, local_header.oem_table_id, |
@@ -190,6 +190,16 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length) | |||
190 | { | 190 | { |
191 | u8 checksum; | 191 | u8 checksum; |
192 | 192 | ||
193 | /* | ||
194 | * FACS/S3PT: | ||
195 | * They are the odd tables, have no standard ACPI header and no checksum | ||
196 | */ | ||
197 | |||
198 | if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_S3PT) || | ||
199 | ACPI_COMPARE_NAME(table->signature, ACPI_SIG_FACS)) { | ||
200 | return (AE_OK); | ||
201 | } | ||
202 | |||
193 | /* Compute the checksum on the table */ | 203 | /* Compute the checksum on the table */ |
194 | 204 | ||
195 | checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length); | 205 | checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length); |
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index bffdfc7b8322..3d6bb83aa7e7 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c | |||
@@ -350,7 +350,7 @@ acpi_tb_install_table(acpi_physical_address address, | |||
350 | acpi_tb_delete_table(table_desc); | 350 | acpi_tb_delete_table(table_desc); |
351 | } | 351 | } |
352 | 352 | ||
353 | unmap_and_exit: | 353 | unmap_and_exit: |
354 | 354 | ||
355 | /* Always unmap the table header that we mapped above */ | 355 | /* Always unmap the table header that we mapped above */ |
356 | 356 | ||
@@ -430,8 +430,7 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size) | |||
430 | * | 430 | * |
431 | ******************************************************************************/ | 431 | ******************************************************************************/ |
432 | 432 | ||
433 | acpi_status __init | 433 | acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address) |
434 | acpi_tb_parse_root_table(acpi_physical_address rsdp_address) | ||
435 | { | 434 | { |
436 | struct acpi_table_rsdp *rsdp; | 435 | struct acpi_table_rsdp *rsdp; |
437 | u32 table_entry_size; | 436 | u32 table_entry_size; |
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index ad11162482ff..db826eaadd1c 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c | |||
@@ -41,7 +41,8 @@ | |||
41 | * POSSIBILITY OF SUCH DAMAGES. | 41 | * POSSIBILITY OF SUCH DAMAGES. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/export.h> | 44 | #define EXPORT_ACPI_INTERFACES |
45 | |||
45 | #include <acpi/acpi.h> | 46 | #include <acpi/acpi.h> |
46 | #include "accommon.h" | 47 | #include "accommon.h" |
47 | #include "actables.h" | 48 | #include "actables.h" |
@@ -147,6 +148,8 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array, | |||
147 | return_ACPI_STATUS(status); | 148 | return_ACPI_STATUS(status); |
148 | } | 149 | } |
149 | 150 | ||
151 | ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_tables) | ||
152 | |||
150 | /******************************************************************************* | 153 | /******************************************************************************* |
151 | * | 154 | * |
152 | * FUNCTION: acpi_reallocate_root_table | 155 | * FUNCTION: acpi_reallocate_root_table |
@@ -161,7 +164,7 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array, | |||
161 | * kernel. | 164 | * kernel. |
162 | * | 165 | * |
163 | ******************************************************************************/ | 166 | ******************************************************************************/ |
164 | acpi_status acpi_reallocate_root_table(void) | 167 | acpi_status __init acpi_reallocate_root_table(void) |
165 | { | 168 | { |
166 | acpi_status status; | 169 | acpi_status status; |
167 | 170 | ||
@@ -181,6 +184,8 @@ acpi_status acpi_reallocate_root_table(void) | |||
181 | return_ACPI_STATUS(status); | 184 | return_ACPI_STATUS(status); |
182 | } | 185 | } |
183 | 186 | ||
187 | ACPI_EXPORT_SYMBOL_INIT(acpi_reallocate_root_table) | ||
188 | |||
184 | /******************************************************************************* | 189 | /******************************************************************************* |
185 | * | 190 | * |
186 | * FUNCTION: acpi_get_table_header | 191 | * FUNCTION: acpi_get_table_header |
@@ -356,6 +361,7 @@ acpi_get_table_with_size(char *signature, | |||
356 | 361 | ||
357 | return (AE_NOT_FOUND); | 362 | return (AE_NOT_FOUND); |
358 | } | 363 | } |
364 | |||
359 | ACPI_EXPORT_SYMBOL(acpi_get_table_with_size) | 365 | ACPI_EXPORT_SYMBOL(acpi_get_table_with_size) |
360 | 366 | ||
361 | acpi_status | 367 | acpi_status |
@@ -367,6 +373,7 @@ acpi_get_table(char *signature, | |||
367 | return acpi_get_table_with_size(signature, | 373 | return acpi_get_table_with_size(signature, |
368 | instance, out_table, &tbl_size); | 374 | instance, out_table, &tbl_size); |
369 | } | 375 | } |
376 | |||
370 | ACPI_EXPORT_SYMBOL(acpi_get_table) | 377 | ACPI_EXPORT_SYMBOL(acpi_get_table) |
371 | 378 | ||
372 | /******************************************************************************* | 379 | /******************************************************************************* |
@@ -424,7 +431,6 @@ acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table) | |||
424 | 431 | ||
425 | ACPI_EXPORT_SYMBOL(acpi_get_table_by_index) | 432 | ACPI_EXPORT_SYMBOL(acpi_get_table_by_index) |
426 | 433 | ||
427 | |||
428 | /******************************************************************************* | 434 | /******************************************************************************* |
429 | * | 435 | * |
430 | * FUNCTION: acpi_install_table_handler | 436 | * FUNCTION: acpi_install_table_handler |
@@ -465,7 +471,7 @@ acpi_install_table_handler(acpi_table_handler handler, void *context) | |||
465 | acpi_gbl_table_handler = handler; | 471 | acpi_gbl_table_handler = handler; |
466 | acpi_gbl_table_handler_context = context; | 472 | acpi_gbl_table_handler_context = context; |
467 | 473 | ||
468 | cleanup: | 474 | cleanup: |
469 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | 475 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); |
470 | return_ACPI_STATUS(status); | 476 | return_ACPI_STATUS(status); |
471 | } | 477 | } |
@@ -506,7 +512,7 @@ acpi_status acpi_remove_table_handler(acpi_table_handler handler) | |||
506 | 512 | ||
507 | acpi_gbl_table_handler = NULL; | 513 | acpi_gbl_table_handler = NULL; |
508 | 514 | ||
509 | cleanup: | 515 | cleanup: |
510 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | 516 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); |
511 | return_ACPI_STATUS(status); | 517 | return_ACPI_STATUS(status); |
512 | } | 518 | } |
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c index 0ba9e328d5d7..60b5a871833c 100644 --- a/drivers/acpi/acpica/tbxfload.c +++ b/drivers/acpi/acpica/tbxfload.c | |||
@@ -41,7 +41,8 @@ | |||
41 | * POSSIBILITY OF SUCH DAMAGES. | 41 | * POSSIBILITY OF SUCH DAMAGES. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/export.h> | 44 | #define EXPORT_ACPI_INTERFACES |
45 | |||
45 | #include <acpi/acpi.h> | 46 | #include <acpi/acpi.h> |
46 | #include "accommon.h" | 47 | #include "accommon.h" |
47 | #include "acnamesp.h" | 48 | #include "acnamesp.h" |
@@ -65,7 +66,7 @@ static acpi_status acpi_tb_load_namespace(void); | |||
65 | * | 66 | * |
66 | ******************************************************************************/ | 67 | ******************************************************************************/ |
67 | 68 | ||
68 | acpi_status acpi_load_tables(void) | 69 | acpi_status __init acpi_load_tables(void) |
69 | { | 70 | { |
70 | acpi_status status; | 71 | acpi_status status; |
71 | 72 | ||
@@ -82,7 +83,7 @@ acpi_status acpi_load_tables(void) | |||
82 | return_ACPI_STATUS(status); | 83 | return_ACPI_STATUS(status); |
83 | } | 84 | } |
84 | 85 | ||
85 | ACPI_EXPORT_SYMBOL(acpi_load_tables) | 86 | ACPI_EXPORT_SYMBOL_INIT(acpi_load_tables) |
86 | 87 | ||
87 | /******************************************************************************* | 88 | /******************************************************************************* |
88 | * | 89 | * |
@@ -200,7 +201,7 @@ static acpi_status acpi_tb_load_namespace(void) | |||
200 | 201 | ||
201 | ACPI_INFO((AE_INFO, "All ACPI Tables successfully acquired")); | 202 | ACPI_INFO((AE_INFO, "All ACPI Tables successfully acquired")); |
202 | 203 | ||
203 | unlock_and_exit: | 204 | unlock_and_exit: |
204 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | 205 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); |
205 | return_ACPI_STATUS(status); | 206 | return_ACPI_STATUS(status); |
206 | } | 207 | } |
@@ -268,7 +269,7 @@ acpi_status acpi_load_table(struct acpi_table_header *table) | |||
268 | acpi_gbl_table_handler_context); | 269 | acpi_gbl_table_handler_context); |
269 | } | 270 | } |
270 | 271 | ||
271 | unlock_and_exit: | 272 | unlock_and_exit: |
272 | (void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER); | 273 | (void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER); |
273 | return_ACPI_STATUS(status); | 274 | return_ACPI_STATUS(status); |
274 | } | 275 | } |
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c index 948c95e80d44..e4e1468877c3 100644 --- a/drivers/acpi/acpica/tbxfroot.c +++ b/drivers/acpi/acpica/tbxfroot.c | |||
@@ -68,8 +68,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) | |||
68 | * Note: Sometimes there exists more than one RSDP in memory; the valid | 68 | * Note: Sometimes there exists more than one RSDP in memory; the valid |
69 | * RSDP has a valid checksum, all others have an invalid checksum. | 69 | * RSDP has a valid checksum, all others have an invalid checksum. |
70 | */ | 70 | */ |
71 | if (ACPI_STRNCMP((char *)rsdp->signature, ACPI_SIG_RSDP, | 71 | if (!ACPI_VALIDATE_RSDP_SIG(rsdp->signature)) { |
72 | sizeof(ACPI_SIG_RSDP) - 1) != 0) { | ||
73 | 72 | ||
74 | /* Nope, BAD Signature */ | 73 | /* Nope, BAD Signature */ |
75 | 74 | ||
@@ -112,7 +111,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) | |||
112 | * | 111 | * |
113 | ******************************************************************************/ | 112 | ******************************************************************************/ |
114 | 113 | ||
115 | acpi_status acpi_find_root_pointer(acpi_size *table_address) | 114 | acpi_status __init acpi_find_root_pointer(acpi_size *table_address) |
116 | { | 115 | { |
117 | u8 *table_ptr; | 116 | u8 *table_ptr; |
118 | u8 *mem_rover; | 117 | u8 *mem_rover; |
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c index e0ffb580f4b0..814267f52715 100644 --- a/drivers/acpi/acpica/utalloc.c +++ b/drivers/acpi/acpica/utalloc.c | |||
@@ -48,6 +48,39 @@ | |||
48 | #define _COMPONENT ACPI_UTILITIES | 48 | #define _COMPONENT ACPI_UTILITIES |
49 | ACPI_MODULE_NAME("utalloc") | 49 | ACPI_MODULE_NAME("utalloc") |
50 | 50 | ||
51 | #if !defined (USE_NATIVE_ALLOCATE_ZEROED) | ||
52 | /******************************************************************************* | ||
53 | * | ||
54 | * FUNCTION: acpi_os_allocate_zeroed | ||
55 | * | ||
56 | * PARAMETERS: size - Size of the allocation | ||
57 | * | ||
58 | * RETURN: Address of the allocated memory on success, NULL on failure. | ||
59 | * | ||
60 | * DESCRIPTION: Subsystem equivalent of calloc. Allocate and zero memory. | ||
61 | * This is the default implementation. Can be overridden via the | ||
62 | * USE_NATIVE_ALLOCATE_ZEROED flag. | ||
63 | * | ||
64 | ******************************************************************************/ | ||
65 | void *acpi_os_allocate_zeroed(acpi_size size) | ||
66 | { | ||
67 | void *allocation; | ||
68 | |||
69 | ACPI_FUNCTION_ENTRY(); | ||
70 | |||
71 | allocation = acpi_os_allocate(size); | ||
72 | if (allocation) { | ||
73 | |||
74 | /* Clear the memory block */ | ||
75 | |||
76 | ACPI_MEMSET(allocation, 0, size); | ||
77 | } | ||
78 | |||
79 | return (allocation); | ||
80 | } | ||
81 | |||
82 | #endif /* !USE_NATIVE_ALLOCATE_ZEROED */ | ||
83 | |||
51 | /******************************************************************************* | 84 | /******************************************************************************* |
52 | * | 85 | * |
53 | * FUNCTION: acpi_ut_create_caches | 86 | * FUNCTION: acpi_ut_create_caches |
@@ -59,6 +92,7 @@ ACPI_MODULE_NAME("utalloc") | |||
59 | * DESCRIPTION: Create all local caches | 92 | * DESCRIPTION: Create all local caches |
60 | * | 93 | * |
61 | ******************************************************************************/ | 94 | ******************************************************************************/ |
95 | |||
62 | acpi_status acpi_ut_create_caches(void) | 96 | acpi_status acpi_ut_create_caches(void) |
63 | { | 97 | { |
64 | acpi_status status; | 98 | acpi_status status; |
@@ -175,10 +209,10 @@ acpi_status acpi_ut_delete_caches(void) | |||
175 | 209 | ||
176 | /* Free memory lists */ | 210 | /* Free memory lists */ |
177 | 211 | ||
178 | ACPI_FREE(acpi_gbl_global_list); | 212 | acpi_os_free(acpi_gbl_global_list); |
179 | acpi_gbl_global_list = NULL; | 213 | acpi_gbl_global_list = NULL; |
180 | 214 | ||
181 | ACPI_FREE(acpi_gbl_ns_node_list); | 215 | acpi_os_free(acpi_gbl_ns_node_list); |
182 | acpi_gbl_ns_node_list = NULL; | 216 | acpi_gbl_ns_node_list = NULL; |
183 | #endif | 217 | #endif |
184 | 218 | ||
@@ -302,82 +336,3 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer, | |||
302 | ACPI_MEMSET(buffer->pointer, 0, required_length); | 336 | ACPI_MEMSET(buffer->pointer, 0, required_length); |
303 | return (AE_OK); | 337 | return (AE_OK); |
304 | } | 338 | } |
305 | |||
306 | #ifdef NOT_USED_BY_LINUX | ||
307 | /******************************************************************************* | ||
308 | * | ||
309 | * FUNCTION: acpi_ut_allocate | ||
310 | * | ||
311 | * PARAMETERS: size - Size of the allocation | ||
312 | * component - Component type of caller | ||
313 | * module - Source file name of caller | ||
314 | * line - Line number of caller | ||
315 | * | ||
316 | * RETURN: Address of the allocated memory on success, NULL on failure. | ||
317 | * | ||
318 | * DESCRIPTION: Subsystem equivalent of malloc. | ||
319 | * | ||
320 | ******************************************************************************/ | ||
321 | |||
322 | void *acpi_ut_allocate(acpi_size size, | ||
323 | u32 component, const char *module, u32 line) | ||
324 | { | ||
325 | void *allocation; | ||
326 | |||
327 | ACPI_FUNCTION_TRACE_U32(ut_allocate, size); | ||
328 | |||
329 | /* Check for an inadvertent size of zero bytes */ | ||
330 | |||
331 | if (!size) { | ||
332 | ACPI_WARNING((module, line, | ||
333 | "Attempt to allocate zero bytes, allocating 1 byte")); | ||
334 | size = 1; | ||
335 | } | ||
336 | |||
337 | allocation = acpi_os_allocate(size); | ||
338 | if (!allocation) { | ||
339 | |||
340 | /* Report allocation error */ | ||
341 | |||
342 | ACPI_WARNING((module, line, | ||
343 | "Could not allocate size %u", (u32) size)); | ||
344 | |||
345 | return_PTR(NULL); | ||
346 | } | ||
347 | |||
348 | return_PTR(allocation); | ||
349 | } | ||
350 | |||
351 | /******************************************************************************* | ||
352 | * | ||
353 | * FUNCTION: acpi_ut_allocate_zeroed | ||
354 | * | ||
355 | * PARAMETERS: size - Size of the allocation | ||
356 | * component - Component type of caller | ||
357 | * module - Source file name of caller | ||
358 | * line - Line number of caller | ||
359 | * | ||
360 | * RETURN: Address of the allocated memory on success, NULL on failure. | ||
361 | * | ||
362 | * DESCRIPTION: Subsystem equivalent of calloc. Allocate and zero memory. | ||
363 | * | ||
364 | ******************************************************************************/ | ||
365 | |||
366 | void *acpi_ut_allocate_zeroed(acpi_size size, | ||
367 | u32 component, const char *module, u32 line) | ||
368 | { | ||
369 | void *allocation; | ||
370 | |||
371 | ACPI_FUNCTION_ENTRY(); | ||
372 | |||
373 | allocation = acpi_ut_allocate(size, component, module, line); | ||
374 | if (allocation) { | ||
375 | |||
376 | /* Clear the memory block */ | ||
377 | |||
378 | ACPI_MEMSET(allocation, 0, size); | ||
379 | } | ||
380 | |||
381 | return (allocation); | ||
382 | } | ||
383 | #endif | ||
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c index a877a9647fd9..366bfec4b770 100644 --- a/drivers/acpi/acpica/utcache.c +++ b/drivers/acpi/acpica/utcache.c | |||
@@ -65,7 +65,7 @@ ACPI_MODULE_NAME("utcache") | |||
65 | acpi_status | 65 | acpi_status |
66 | acpi_os_create_cache(char *cache_name, | 66 | acpi_os_create_cache(char *cache_name, |
67 | u16 object_size, | 67 | u16 object_size, |
68 | u16 max_depth, struct acpi_memory_list ** return_cache) | 68 | u16 max_depth, struct acpi_memory_list **return_cache) |
69 | { | 69 | { |
70 | struct acpi_memory_list *cache; | 70 | struct acpi_memory_list *cache; |
71 | 71 | ||
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c index 1731c27c36a6..edff4e653d9a 100644 --- a/drivers/acpi/acpica/utcopy.c +++ b/drivers/acpi/acpica/utcopy.c | |||
@@ -552,7 +552,7 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object, | |||
552 | *ret_internal_object = internal_object; | 552 | *ret_internal_object = internal_object; |
553 | return_ACPI_STATUS(AE_OK); | 553 | return_ACPI_STATUS(AE_OK); |
554 | 554 | ||
555 | error_exit: | 555 | error_exit: |
556 | acpi_ut_remove_reference(internal_object); | 556 | acpi_ut_remove_reference(internal_object); |
557 | return_ACPI_STATUS(AE_NO_MEMORY); | 557 | return_ACPI_STATUS(AE_NO_MEMORY); |
558 | } | 558 | } |
@@ -899,7 +899,7 @@ acpi_ut_copy_ielement_to_ielement(u8 object_type, | |||
899 | 899 | ||
900 | return (status); | 900 | return (status); |
901 | 901 | ||
902 | error_exit: | 902 | error_exit: |
903 | acpi_ut_remove_reference(target_object); | 903 | acpi_ut_remove_reference(target_object); |
904 | return (status); | 904 | return (status); |
905 | } | 905 | } |
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c index 5796e11a0671..1a67b3944b3b 100644 --- a/drivers/acpi/acpica/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c | |||
@@ -41,7 +41,8 @@ | |||
41 | * POSSIBILITY OF SUCH DAMAGES. | 41 | * POSSIBILITY OF SUCH DAMAGES. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/export.h> | 44 | #define EXPORT_ACPI_INTERFACES |
45 | |||
45 | #include <acpi/acpi.h> | 46 | #include <acpi/acpi.h> |
46 | #include "accommon.h" | 47 | #include "accommon.h" |
47 | 48 | ||
@@ -190,7 +191,7 @@ acpi_debug_print(u32 requested_debug_level, | |||
190 | * Display the module name, current line number, thread ID (if requested), | 191 | * Display the module name, current line number, thread ID (if requested), |
191 | * current procedure nesting level, and the current procedure name | 192 | * current procedure nesting level, and the current procedure name |
192 | */ | 193 | */ |
193 | acpi_os_printf("%8s-%04ld ", module_name, line_number); | 194 | acpi_os_printf("%9s-%04ld ", module_name, line_number); |
194 | 195 | ||
195 | if (ACPI_LV_THREADS & acpi_dbg_level) { | 196 | if (ACPI_LV_THREADS & acpi_dbg_level) { |
196 | acpi_os_printf("[%u] ", (u32)thread_id); | 197 | acpi_os_printf("[%u] ", (u32)thread_id); |
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c index 11e2e02e1618..b3f31dd89a45 100644 --- a/drivers/acpi/acpica/utdecode.c +++ b/drivers/acpi/acpica/utdecode.c | |||
@@ -41,7 +41,6 @@ | |||
41 | * POSSIBILITY OF SUCH DAMAGES. | 41 | * POSSIBILITY OF SUCH DAMAGES. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/export.h> | ||
45 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
46 | #include "accommon.h" | 45 | #include "accommon.h" |
47 | #include "acnamesp.h" | 46 | #include "acnamesp.h" |
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index d6b33f29d327..c07d2227ea42 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c | |||
@@ -649,7 +649,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) | |||
649 | 649 | ||
650 | return (AE_OK); | 650 | return (AE_OK); |
651 | 651 | ||
652 | error_exit: | 652 | error_exit: |
653 | 653 | ||
654 | ACPI_EXCEPTION((AE_INFO, status, | 654 | ACPI_EXCEPTION((AE_INFO, status, |
655 | "Could not update object reference count")); | 655 | "Could not update object reference count")); |
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c index 4fd68971019b..16fb90506db7 100644 --- a/drivers/acpi/acpica/uteval.c +++ b/drivers/acpi/acpica/uteval.c | |||
@@ -181,7 +181,7 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node, | |||
181 | 181 | ||
182 | *return_desc = info->return_object; | 182 | *return_desc = info->return_object; |
183 | 183 | ||
184 | cleanup: | 184 | cleanup: |
185 | ACPI_FREE(info); | 185 | ACPI_FREE(info); |
186 | return_ACPI_STATUS(status); | 186 | return_ACPI_STATUS(status); |
187 | } | 187 | } |
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c index ff6d9e8aa842..3cf7b597edb9 100644 --- a/drivers/acpi/acpica/utexcep.c +++ b/drivers/acpi/acpica/utexcep.c | |||
@@ -41,8 +41,9 @@ | |||
41 | * POSSIBILITY OF SUCH DAMAGES. | 41 | * POSSIBILITY OF SUCH DAMAGES. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #define EXPORT_ACPI_INTERFACES | ||
45 | |||
44 | #define ACPI_DEFINE_EXCEPTION_TABLE | 46 | #define ACPI_DEFINE_EXCEPTION_TABLE |
45 | #include <linux/export.h> | ||
46 | #include <acpi/acpi.h> | 47 | #include <acpi/acpi.h> |
47 | #include "accommon.h" | 48 | #include "accommon.h" |
48 | 49 | ||
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c index d6f26bf8a062..81f9a9584451 100644 --- a/drivers/acpi/acpica/utglobal.c +++ b/drivers/acpi/acpica/utglobal.c | |||
@@ -41,9 +41,9 @@ | |||
41 | * POSSIBILITY OF SUCH DAMAGES. | 41 | * POSSIBILITY OF SUCH DAMAGES. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #define EXPORT_ACPI_INTERFACES | ||
44 | #define DEFINE_ACPI_GLOBALS | 45 | #define DEFINE_ACPI_GLOBALS |
45 | 46 | ||
46 | #include <linux/export.h> | ||
47 | #include <acpi/acpi.h> | 47 | #include <acpi/acpi.h> |
48 | #include "accommon.h" | 48 | #include "accommon.h" |
49 | 49 | ||
@@ -289,9 +289,19 @@ acpi_status acpi_ut_init_globals(void) | |||
289 | 289 | ||
290 | acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000; | 290 | acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000; |
291 | 291 | ||
292 | /* Event counters */ | ||
293 | |||
294 | acpi_method_count = 0; | ||
295 | acpi_sci_count = 0; | ||
296 | acpi_gpe_count = 0; | ||
297 | |||
298 | for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { | ||
299 | acpi_fixed_event_count[i] = 0; | ||
300 | } | ||
301 | |||
292 | #if (!ACPI_REDUCED_HARDWARE) | 302 | #if (!ACPI_REDUCED_HARDWARE) |
293 | 303 | ||
294 | /* GPE support */ | 304 | /* GPE/SCI support */ |
295 | 305 | ||
296 | acpi_gbl_all_gpes_initialized = FALSE; | 306 | acpi_gbl_all_gpes_initialized = FALSE; |
297 | acpi_gbl_gpe_xrupt_list_head = NULL; | 307 | acpi_gbl_gpe_xrupt_list_head = NULL; |
@@ -300,6 +310,7 @@ acpi_status acpi_ut_init_globals(void) | |||
300 | acpi_current_gpe_count = 0; | 310 | acpi_current_gpe_count = 0; |
301 | 311 | ||
302 | acpi_gbl_global_event_handler = NULL; | 312 | acpi_gbl_global_event_handler = NULL; |
313 | acpi_gbl_sci_handler_list = NULL; | ||
303 | 314 | ||
304 | #endif /* !ACPI_REDUCED_HARDWARE */ | 315 | #endif /* !ACPI_REDUCED_HARDWARE */ |
305 | 316 | ||
@@ -377,6 +388,11 @@ acpi_status acpi_ut_init_globals(void) | |||
377 | /* Public globals */ | 388 | /* Public globals */ |
378 | 389 | ||
379 | ACPI_EXPORT_SYMBOL(acpi_gbl_FADT) | 390 | ACPI_EXPORT_SYMBOL(acpi_gbl_FADT) |
391 | |||
380 | ACPI_EXPORT_SYMBOL(acpi_dbg_level) | 392 | ACPI_EXPORT_SYMBOL(acpi_dbg_level) |
393 | |||
381 | ACPI_EXPORT_SYMBOL(acpi_dbg_layer) | 394 | ACPI_EXPORT_SYMBOL(acpi_dbg_layer) |
395 | |||
396 | ACPI_EXPORT_SYMBOL(acpi_gpe_count) | ||
397 | |||
382 | ACPI_EXPORT_SYMBOL(acpi_current_gpe_count) | 398 | ACPI_EXPORT_SYMBOL(acpi_current_gpe_count) |
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c index fa69071db418..bfca7b4b6731 100644 --- a/drivers/acpi/acpica/utids.c +++ b/drivers/acpi/acpica/utids.c | |||
@@ -184,7 +184,7 @@ acpi_ut_execute_SUB(struct acpi_namespace_node *device_node, | |||
184 | sub->length = length; | 184 | sub->length = length; |
185 | *return_id = sub; | 185 | *return_id = sub; |
186 | 186 | ||
187 | cleanup: | 187 | cleanup: |
188 | 188 | ||
189 | /* On exit, we must delete the return object */ | 189 | /* On exit, we must delete the return object */ |
190 | 190 | ||
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c index aa61f66ee861..13e045025c33 100644 --- a/drivers/acpi/acpica/utobject.c +++ b/drivers/acpi/acpica/utobject.c | |||
@@ -180,7 +180,7 @@ union acpi_operand_object *acpi_ut_create_package_object(u32 count) | |||
180 | package_elements = ACPI_ALLOCATE_ZEROED(((acpi_size) count + | 180 | package_elements = ACPI_ALLOCATE_ZEROED(((acpi_size) count + |
181 | 1) * sizeof(void *)); | 181 | 1) * sizeof(void *)); |
182 | if (!package_elements) { | 182 | if (!package_elements) { |
183 | acpi_ut_remove_reference(package_desc); | 183 | ACPI_FREE(package_desc); |
184 | return_PTR(NULL); | 184 | return_PTR(NULL); |
185 | } | 185 | } |
186 | 186 | ||
@@ -396,7 +396,6 @@ void *acpi_ut_allocate_object_desc_dbg(const char *module_name, | |||
396 | 396 | ||
397 | /* Mark the descriptor type */ | 397 | /* Mark the descriptor type */ |
398 | 398 | ||
399 | memset(object, 0, sizeof(union acpi_operand_object)); | ||
400 | ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_OPERAND); | 399 | ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_OPERAND); |
401 | 400 | ||
402 | ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p Size %X\n", | 401 | ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p Size %X\n", |
@@ -461,25 +460,28 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object, | |||
461 | 460 | ||
462 | ACPI_FUNCTION_TRACE_PTR(ut_get_simple_object_size, internal_object); | 461 | ACPI_FUNCTION_TRACE_PTR(ut_get_simple_object_size, internal_object); |
463 | 462 | ||
463 | /* Start with the length of the (external) Acpi object */ | ||
464 | |||
465 | length = sizeof(union acpi_object); | ||
466 | |||
467 | /* A NULL object is allowed, can be a legal uninitialized package element */ | ||
468 | |||
469 | if (!internal_object) { | ||
464 | /* | 470 | /* |
465 | * Handle a null object (Could be a uninitialized package | 471 | * Object is NULL, just return the length of union acpi_object |
466 | * element -- which is legal) | 472 | * (A NULL union acpi_object is an object of all zeroes.) |
467 | */ | 473 | */ |
468 | if (!internal_object) { | 474 | *obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length); |
469 | *obj_length = sizeof(union acpi_object); | ||
470 | return_ACPI_STATUS(AE_OK); | 475 | return_ACPI_STATUS(AE_OK); |
471 | } | 476 | } |
472 | 477 | ||
473 | /* Start with the length of the Acpi object */ | 478 | /* A Namespace Node should never appear here */ |
474 | |||
475 | length = sizeof(union acpi_object); | ||
476 | 479 | ||
477 | if (ACPI_GET_DESCRIPTOR_TYPE(internal_object) == ACPI_DESC_TYPE_NAMED) { | 480 | if (ACPI_GET_DESCRIPTOR_TYPE(internal_object) == ACPI_DESC_TYPE_NAMED) { |
478 | 481 | ||
479 | /* Object is a named object (reference), just return the length */ | 482 | /* A namespace node should never get here */ |
480 | 483 | ||
481 | *obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length); | 484 | return_ACPI_STATUS(AE_AML_INTERNAL); |
482 | return_ACPI_STATUS(status); | ||
483 | } | 485 | } |
484 | 486 | ||
485 | /* | 487 | /* |
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c index 835340b26d37..eb3aca761369 100644 --- a/drivers/acpi/acpica/utownerid.c +++ b/drivers/acpi/acpica/utownerid.c | |||
@@ -148,7 +148,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id) | |||
148 | ACPI_ERROR((AE_INFO, | 148 | ACPI_ERROR((AE_INFO, |
149 | "Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT")); | 149 | "Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT")); |
150 | 150 | ||
151 | exit: | 151 | exit: |
152 | (void)acpi_ut_release_mutex(ACPI_MTX_CACHES); | 152 | (void)acpi_ut_release_mutex(ACPI_MTX_CACHES); |
153 | return_ACPI_STATUS(status); | 153 | return_ACPI_STATUS(status); |
154 | } | 154 | } |
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c index cb7fa491decf..2c2accb9e534 100644 --- a/drivers/acpi/acpica/utresrc.c +++ b/drivers/acpi/acpica/utresrc.c | |||
@@ -643,7 +643,7 @@ acpi_ut_validate_resource(struct acpi_walk_state *walk_state, | |||
643 | 643 | ||
644 | return (AE_OK); | 644 | return (AE_OK); |
645 | 645 | ||
646 | invalid_resource: | 646 | invalid_resource: |
647 | 647 | ||
648 | if (walk_state) { | 648 | if (walk_state) { |
649 | ACPI_ERROR((AE_INFO, | 649 | ACPI_ERROR((AE_INFO, |
@@ -652,7 +652,7 @@ acpi_ut_validate_resource(struct acpi_walk_state *walk_state, | |||
652 | } | 652 | } |
653 | return (AE_AML_INVALID_RESOURCE_TYPE); | 653 | return (AE_AML_INVALID_RESOURCE_TYPE); |
654 | 654 | ||
655 | bad_resource_length: | 655 | bad_resource_length: |
656 | 656 | ||
657 | if (walk_state) { | 657 | if (walk_state) { |
658 | ACPI_ERROR((AE_INFO, | 658 | ACPI_ERROR((AE_INFO, |
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c index a6b729d4c1dc..03c4c2febd84 100644 --- a/drivers/acpi/acpica/utstate.c +++ b/drivers/acpi/acpica/utstate.c | |||
@@ -161,7 +161,6 @@ union acpi_generic_state *acpi_ut_create_generic_state(void) | |||
161 | if (state) { | 161 | if (state) { |
162 | 162 | ||
163 | /* Initialize */ | 163 | /* Initialize */ |
164 | memset(state, 0, sizeof(union acpi_generic_state)); | ||
165 | state->common.descriptor_type = ACPI_DESC_TYPE_STATE; | 164 | state->common.descriptor_type = ACPI_DESC_TYPE_STATE; |
166 | } | 165 | } |
167 | 166 | ||
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c index cb1e9cc32d5f..45c0eb26b33d 100644 --- a/drivers/acpi/acpica/utstring.c +++ b/drivers/acpi/acpica/utstring.c | |||
@@ -310,7 +310,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer) | |||
310 | 310 | ||
311 | /* All done, normal exit */ | 311 | /* All done, normal exit */ |
312 | 312 | ||
313 | all_done: | 313 | all_done: |
314 | 314 | ||
315 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n", | 315 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n", |
316 | ACPI_FORMAT_UINT64(return_value))); | 316 | ACPI_FORMAT_UINT64(return_value))); |
@@ -318,7 +318,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer) | |||
318 | *ret_integer = return_value; | 318 | *ret_integer = return_value; |
319 | return_ACPI_STATUS(AE_OK); | 319 | return_ACPI_STATUS(AE_OK); |
320 | 320 | ||
321 | error_exit: | 321 | error_exit: |
322 | /* Base was set/validated above */ | 322 | /* Base was set/validated above */ |
323 | 323 | ||
324 | if (base == 10) { | 324 | if (base == 10) { |
@@ -584,3 +584,65 @@ void ut_convert_backslashes(char *pathname) | |||
584 | } | 584 | } |
585 | } | 585 | } |
586 | #endif | 586 | #endif |
587 | |||
588 | #if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) | ||
589 | /******************************************************************************* | ||
590 | * | ||
591 | * FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat | ||
592 | * | ||
593 | * PARAMETERS: Adds a "DestSize" parameter to each of the standard string | ||
594 | * functions. This is the size of the Destination buffer. | ||
595 | * | ||
596 | * RETURN: TRUE if the operation would overflow the destination buffer. | ||
597 | * | ||
598 | * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that | ||
599 | * the result of the operation will not overflow the output string | ||
600 | * buffer. | ||
601 | * | ||
602 | * NOTE: These functions are typically only helpful for processing | ||
603 | * user input and command lines. For most ACPICA code, the | ||
604 | * required buffer length is precisely calculated before buffer | ||
605 | * allocation, so the use of these functions is unnecessary. | ||
606 | * | ||
607 | ******************************************************************************/ | ||
608 | |||
609 | u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source) | ||
610 | { | ||
611 | |||
612 | if (ACPI_STRLEN(source) >= dest_size) { | ||
613 | return (TRUE); | ||
614 | } | ||
615 | |||
616 | ACPI_STRCPY(dest, source); | ||
617 | return (FALSE); | ||
618 | } | ||
619 | |||
620 | u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source) | ||
621 | { | ||
622 | |||
623 | if ((ACPI_STRLEN(dest) + ACPI_STRLEN(source)) >= dest_size) { | ||
624 | return (TRUE); | ||
625 | } | ||
626 | |||
627 | ACPI_STRCAT(dest, source); | ||
628 | return (FALSE); | ||
629 | } | ||
630 | |||
631 | u8 | ||
632 | acpi_ut_safe_strncat(char *dest, | ||
633 | acpi_size dest_size, | ||
634 | char *source, acpi_size max_transfer_length) | ||
635 | { | ||
636 | acpi_size actual_transfer_length; | ||
637 | |||
638 | actual_transfer_length = | ||
639 | ACPI_MIN(max_transfer_length, ACPI_STRLEN(source)); | ||
640 | |||
641 | if ((ACPI_STRLEN(dest) + actual_transfer_length) >= dest_size) { | ||
642 | return (TRUE); | ||
643 | } | ||
644 | |||
645 | ACPI_STRNCAT(dest, source, max_transfer_length); | ||
646 | return (FALSE); | ||
647 | } | ||
648 | #endif | ||
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c index 160f13f4aab5..c0027773cccb 100644 --- a/drivers/acpi/acpica/uttrack.c +++ b/drivers/acpi/acpica/uttrack.c | |||
@@ -130,10 +130,23 @@ void *acpi_ut_allocate_and_track(acpi_size size, | |||
130 | struct acpi_debug_mem_block *allocation; | 130 | struct acpi_debug_mem_block *allocation; |
131 | acpi_status status; | 131 | acpi_status status; |
132 | 132 | ||
133 | /* Check for an inadvertent size of zero bytes */ | ||
134 | |||
135 | if (!size) { | ||
136 | ACPI_WARNING((module, line, | ||
137 | "Attempt to allocate zero bytes, allocating 1 byte")); | ||
138 | size = 1; | ||
139 | } | ||
140 | |||
133 | allocation = | 141 | allocation = |
134 | acpi_ut_allocate(size + sizeof(struct acpi_debug_mem_header), | 142 | acpi_os_allocate(size + sizeof(struct acpi_debug_mem_header)); |
135 | component, module, line); | ||
136 | if (!allocation) { | 143 | if (!allocation) { |
144 | |||
145 | /* Report allocation error */ | ||
146 | |||
147 | ACPI_WARNING((module, line, | ||
148 | "Could not allocate size %u", (u32)size)); | ||
149 | |||
137 | return (NULL); | 150 | return (NULL); |
138 | } | 151 | } |
139 | 152 | ||
@@ -179,9 +192,17 @@ void *acpi_ut_allocate_zeroed_and_track(acpi_size size, | |||
179 | struct acpi_debug_mem_block *allocation; | 192 | struct acpi_debug_mem_block *allocation; |
180 | acpi_status status; | 193 | acpi_status status; |
181 | 194 | ||
195 | /* Check for an inadvertent size of zero bytes */ | ||
196 | |||
197 | if (!size) { | ||
198 | ACPI_WARNING((module, line, | ||
199 | "Attempt to allocate zero bytes, allocating 1 byte")); | ||
200 | size = 1; | ||
201 | } | ||
202 | |||
182 | allocation = | 203 | allocation = |
183 | acpi_ut_allocate_zeroed(size + sizeof(struct acpi_debug_mem_header), | 204 | acpi_os_allocate_zeroed(size + |
184 | component, module, line); | 205 | sizeof(struct acpi_debug_mem_header)); |
185 | if (!allocation) { | 206 | if (!allocation) { |
186 | 207 | ||
187 | /* Report allocation error */ | 208 | /* Report allocation error */ |
@@ -409,7 +430,7 @@ acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation, | |||
409 | element->next = allocation; | 430 | element->next = allocation; |
410 | } | 431 | } |
411 | 432 | ||
412 | unlock_and_exit: | 433 | unlock_and_exit: |
413 | status = acpi_ut_release_mutex(ACPI_MTX_MEMORY); | 434 | status = acpi_ut_release_mutex(ACPI_MTX_MEMORY); |
414 | return_ACPI_STATUS(status); | 435 | return_ACPI_STATUS(status); |
415 | } | 436 | } |
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c index 03a211e6e26a..be322c83643a 100644 --- a/drivers/acpi/acpica/utxface.c +++ b/drivers/acpi/acpica/utxface.c | |||
@@ -41,7 +41,8 @@ | |||
41 | * POSSIBILITY OF SUCH DAMAGES. | 41 | * POSSIBILITY OF SUCH DAMAGES. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/export.h> | 44 | #define EXPORT_ACPI_INTERFACES |
45 | |||
45 | #include <acpi/acpi.h> | 46 | #include <acpi/acpi.h> |
46 | #include "accommon.h" | 47 | #include "accommon.h" |
47 | #include "acdebug.h" | 48 | #include "acdebug.h" |
@@ -60,7 +61,7 @@ ACPI_MODULE_NAME("utxface") | |||
60 | * DESCRIPTION: Shutdown the ACPICA subsystem and release all resources. | 61 | * DESCRIPTION: Shutdown the ACPICA subsystem and release all resources. |
61 | * | 62 | * |
62 | ******************************************************************************/ | 63 | ******************************************************************************/ |
63 | acpi_status acpi_terminate(void) | 64 | acpi_status __init acpi_terminate(void) |
64 | { | 65 | { |
65 | acpi_status status; | 66 | acpi_status status; |
66 | 67 | ||
@@ -104,7 +105,7 @@ acpi_status acpi_terminate(void) | |||
104 | return_ACPI_STATUS(status); | 105 | return_ACPI_STATUS(status); |
105 | } | 106 | } |
106 | 107 | ||
107 | ACPI_EXPORT_SYMBOL(acpi_terminate) | 108 | ACPI_EXPORT_SYMBOL_INIT(acpi_terminate) |
108 | 109 | ||
109 | #ifndef ACPI_ASL_COMPILER | 110 | #ifndef ACPI_ASL_COMPILER |
110 | #ifdef ACPI_FUTURE_USAGE | 111 | #ifdef ACPI_FUTURE_USAGE |
@@ -207,6 +208,44 @@ acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer) | |||
207 | 208 | ||
208 | ACPI_EXPORT_SYMBOL(acpi_get_system_info) | 209 | ACPI_EXPORT_SYMBOL(acpi_get_system_info) |
209 | 210 | ||
211 | /******************************************************************************* | ||
212 | * | ||
213 | * FUNCTION: acpi_get_statistics | ||
214 | * | ||
215 | * PARAMETERS: stats - Where the statistics are returned | ||
216 | * | ||
217 | * RETURN: status - the status of the call | ||
218 | * | ||
219 | * DESCRIPTION: Get the contents of the various system counters | ||
220 | * | ||
221 | ******************************************************************************/ | ||
222 | acpi_status acpi_get_statistics(struct acpi_statistics *stats) | ||
223 | { | ||
224 | ACPI_FUNCTION_TRACE(acpi_get_statistics); | ||
225 | |||
226 | /* Parameter validation */ | ||
227 | |||
228 | if (!stats) { | ||
229 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
230 | } | ||
231 | |||
232 | /* Various interrupt-based event counters */ | ||
233 | |||
234 | stats->sci_count = acpi_sci_count; | ||
235 | stats->gpe_count = acpi_gpe_count; | ||
236 | |||
237 | ACPI_MEMCPY(stats->fixed_event_count, acpi_fixed_event_count, | ||
238 | sizeof(acpi_fixed_event_count)); | ||
239 | |||
240 | /* Other counters */ | ||
241 | |||
242 | stats->method_count = acpi_method_count; | ||
243 | |||
244 | return_ACPI_STATUS(AE_OK); | ||
245 | } | ||
246 | |||
247 | ACPI_EXPORT_SYMBOL(acpi_get_statistics) | ||
248 | |||
210 | /***************************************************************************** | 249 | /***************************************************************************** |
211 | * | 250 | * |
212 | * FUNCTION: acpi_install_initialization_handler | 251 | * FUNCTION: acpi_install_initialization_handler |
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c index e966a2e47b76..f7edb88f6054 100644 --- a/drivers/acpi/acpica/utxferror.c +++ b/drivers/acpi/acpica/utxferror.c | |||
@@ -41,7 +41,8 @@ | |||
41 | * POSSIBILITY OF SUCH DAMAGES. | 41 | * POSSIBILITY OF SUCH DAMAGES. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/export.h> | 44 | #define EXPORT_ACPI_INTERFACES |
45 | |||
45 | #include <acpi/acpi.h> | 46 | #include <acpi/acpi.h> |
46 | #include "accommon.h" | 47 | #include "accommon.h" |
47 | 48 | ||
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c index 41ebaaf8bb1a..75efea0539c1 100644 --- a/drivers/acpi/acpica/utxfinit.c +++ b/drivers/acpi/acpica/utxfinit.c | |||
@@ -41,7 +41,8 @@ | |||
41 | * POSSIBILITY OF SUCH DAMAGES. | 41 | * POSSIBILITY OF SUCH DAMAGES. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/export.h> | 44 | #define EXPORT_ACPI_INTERFACES |
45 | |||
45 | #include <acpi/acpi.h> | 46 | #include <acpi/acpi.h> |
46 | #include "accommon.h" | 47 | #include "accommon.h" |
47 | #include "acevents.h" | 48 | #include "acevents.h" |
@@ -64,7 +65,7 @@ ACPI_MODULE_NAME("utxfinit") | |||
64 | * called, so any early initialization belongs here. | 65 | * called, so any early initialization belongs here. |
65 | * | 66 | * |
66 | ******************************************************************************/ | 67 | ******************************************************************************/ |
67 | acpi_status acpi_initialize_subsystem(void) | 68 | acpi_status __init acpi_initialize_subsystem(void) |
68 | { | 69 | { |
69 | acpi_status status; | 70 | acpi_status status; |
70 | 71 | ||
@@ -124,7 +125,8 @@ acpi_status acpi_initialize_subsystem(void) | |||
124 | ACPI_DEBUGGER_EXEC(status = acpi_db_initialize()); | 125 | ACPI_DEBUGGER_EXEC(status = acpi_db_initialize()); |
125 | return_ACPI_STATUS(status); | 126 | return_ACPI_STATUS(status); |
126 | } | 127 | } |
127 | ACPI_EXPORT_SYMBOL(acpi_initialize_subsystem) | 128 | |
129 | ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_subsystem) | ||
128 | 130 | ||
129 | /******************************************************************************* | 131 | /******************************************************************************* |
130 | * | 132 | * |
@@ -138,7 +140,7 @@ ACPI_EXPORT_SYMBOL(acpi_initialize_subsystem) | |||
138 | * Puts system into ACPI mode if it isn't already. | 140 | * Puts system into ACPI mode if it isn't already. |
139 | * | 141 | * |
140 | ******************************************************************************/ | 142 | ******************************************************************************/ |
141 | acpi_status acpi_enable_subsystem(u32 flags) | 143 | acpi_status __init acpi_enable_subsystem(u32 flags) |
142 | { | 144 | { |
143 | acpi_status status = AE_OK; | 145 | acpi_status status = AE_OK; |
144 | 146 | ||
@@ -228,7 +230,8 @@ acpi_status acpi_enable_subsystem(u32 flags) | |||
228 | 230 | ||
229 | return_ACPI_STATUS(status); | 231 | return_ACPI_STATUS(status); |
230 | } | 232 | } |
231 | ACPI_EXPORT_SYMBOL(acpi_enable_subsystem) | 233 | |
234 | ACPI_EXPORT_SYMBOL_INIT(acpi_enable_subsystem) | ||
232 | 235 | ||
233 | /******************************************************************************* | 236 | /******************************************************************************* |
234 | * | 237 | * |
@@ -242,7 +245,7 @@ ACPI_EXPORT_SYMBOL(acpi_enable_subsystem) | |||
242 | * objects and executing AML code for Regions, buffers, etc. | 245 | * objects and executing AML code for Regions, buffers, etc. |
243 | * | 246 | * |
244 | ******************************************************************************/ | 247 | ******************************************************************************/ |
245 | acpi_status acpi_initialize_objects(u32 flags) | 248 | acpi_status __init acpi_initialize_objects(u32 flags) |
246 | { | 249 | { |
247 | acpi_status status = AE_OK; | 250 | acpi_status status = AE_OK; |
248 | 251 | ||
@@ -314,4 +317,5 @@ acpi_status acpi_initialize_objects(u32 flags) | |||
314 | acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK; | 317 | acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK; |
315 | return_ACPI_STATUS(status); | 318 | return_ACPI_STATUS(status); |
316 | } | 319 | } |
317 | ACPI_EXPORT_SYMBOL(acpi_initialize_objects) | 320 | |
321 | ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_objects) | ||
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 2c9958cd7a43..fbf1aceda8b8 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
@@ -36,12 +36,6 @@ | |||
36 | #include <linux/suspend.h> | 36 | #include <linux/suspend.h> |
37 | #include <asm/unaligned.h> | 37 | #include <asm/unaligned.h> |
38 | 38 | ||
39 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
40 | #include <linux/proc_fs.h> | ||
41 | #include <linux/seq_file.h> | ||
42 | #include <asm/uaccess.h> | ||
43 | #endif | ||
44 | |||
45 | #include <acpi/acpi_bus.h> | 39 | #include <acpi/acpi_bus.h> |
46 | #include <acpi/acpi_drivers.h> | 40 | #include <acpi/acpi_drivers.h> |
47 | #include <linux/power_supply.h> | 41 | #include <linux/power_supply.h> |
@@ -72,19 +66,6 @@ static unsigned int cache_time = 1000; | |||
72 | module_param(cache_time, uint, 0644); | 66 | module_param(cache_time, uint, 0644); |
73 | MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); | 67 | MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); |
74 | 68 | ||
75 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
76 | extern struct proc_dir_entry *acpi_lock_battery_dir(void); | ||
77 | extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir); | ||
78 | |||
79 | enum acpi_battery_files { | ||
80 | info_tag = 0, | ||
81 | state_tag, | ||
82 | alarm_tag, | ||
83 | ACPI_BATTERY_NUMFILES, | ||
84 | }; | ||
85 | |||
86 | #endif | ||
87 | |||
88 | static const struct acpi_device_id battery_device_ids[] = { | 69 | static const struct acpi_device_id battery_device_ids[] = { |
89 | {"PNP0C0A", 0}, | 70 | {"PNP0C0A", 0}, |
90 | {"", 0}, | 71 | {"", 0}, |
@@ -320,14 +301,6 @@ static enum power_supply_property energy_battery_props[] = { | |||
320 | POWER_SUPPLY_PROP_SERIAL_NUMBER, | 301 | POWER_SUPPLY_PROP_SERIAL_NUMBER, |
321 | }; | 302 | }; |
322 | 303 | ||
323 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
324 | inline char *acpi_battery_units(struct acpi_battery *battery) | ||
325 | { | ||
326 | return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ? | ||
327 | "mA" : "mW"; | ||
328 | } | ||
329 | #endif | ||
330 | |||
331 | /* -------------------------------------------------------------------------- | 304 | /* -------------------------------------------------------------------------- |
332 | Battery Management | 305 | Battery Management |
333 | -------------------------------------------------------------------------- */ | 306 | -------------------------------------------------------------------------- */ |
@@ -741,279 +714,6 @@ static void acpi_battery_refresh(struct acpi_battery *battery) | |||
741 | } | 714 | } |
742 | 715 | ||
743 | /* -------------------------------------------------------------------------- | 716 | /* -------------------------------------------------------------------------- |
744 | FS Interface (/proc) | ||
745 | -------------------------------------------------------------------------- */ | ||
746 | |||
747 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
748 | static struct proc_dir_entry *acpi_battery_dir; | ||
749 | |||
750 | static int acpi_battery_print_info(struct seq_file *seq, int result) | ||
751 | { | ||
752 | struct acpi_battery *battery = seq->private; | ||
753 | |||
754 | if (result) | ||
755 | goto end; | ||
756 | |||
757 | seq_printf(seq, "present: %s\n", | ||
758 | acpi_battery_present(battery) ? "yes" : "no"); | ||
759 | if (!acpi_battery_present(battery)) | ||
760 | goto end; | ||
761 | if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN) | ||
762 | seq_printf(seq, "design capacity: unknown\n"); | ||
763 | else | ||
764 | seq_printf(seq, "design capacity: %d %sh\n", | ||
765 | battery->design_capacity, | ||
766 | acpi_battery_units(battery)); | ||
767 | |||
768 | if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN) | ||
769 | seq_printf(seq, "last full capacity: unknown\n"); | ||
770 | else | ||
771 | seq_printf(seq, "last full capacity: %d %sh\n", | ||
772 | battery->full_charge_capacity, | ||
773 | acpi_battery_units(battery)); | ||
774 | |||
775 | seq_printf(seq, "battery technology: %srechargeable\n", | ||
776 | (!battery->technology)?"non-":""); | ||
777 | |||
778 | if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN) | ||
779 | seq_printf(seq, "design voltage: unknown\n"); | ||
780 | else | ||
781 | seq_printf(seq, "design voltage: %d mV\n", | ||
782 | battery->design_voltage); | ||
783 | seq_printf(seq, "design capacity warning: %d %sh\n", | ||
784 | battery->design_capacity_warning, | ||
785 | acpi_battery_units(battery)); | ||
786 | seq_printf(seq, "design capacity low: %d %sh\n", | ||
787 | battery->design_capacity_low, | ||
788 | acpi_battery_units(battery)); | ||
789 | seq_printf(seq, "cycle count: %i\n", battery->cycle_count); | ||
790 | seq_printf(seq, "capacity granularity 1: %d %sh\n", | ||
791 | battery->capacity_granularity_1, | ||
792 | acpi_battery_units(battery)); | ||
793 | seq_printf(seq, "capacity granularity 2: %d %sh\n", | ||
794 | battery->capacity_granularity_2, | ||
795 | acpi_battery_units(battery)); | ||
796 | seq_printf(seq, "model number: %s\n", battery->model_number); | ||
797 | seq_printf(seq, "serial number: %s\n", battery->serial_number); | ||
798 | seq_printf(seq, "battery type: %s\n", battery->type); | ||
799 | seq_printf(seq, "OEM info: %s\n", battery->oem_info); | ||
800 | end: | ||
801 | if (result) | ||
802 | seq_printf(seq, "ERROR: Unable to read battery info\n"); | ||
803 | return result; | ||
804 | } | ||
805 | |||
806 | static int acpi_battery_print_state(struct seq_file *seq, int result) | ||
807 | { | ||
808 | struct acpi_battery *battery = seq->private; | ||
809 | |||
810 | if (result) | ||
811 | goto end; | ||
812 | |||
813 | seq_printf(seq, "present: %s\n", | ||
814 | acpi_battery_present(battery) ? "yes" : "no"); | ||
815 | if (!acpi_battery_present(battery)) | ||
816 | goto end; | ||
817 | |||
818 | seq_printf(seq, "capacity state: %s\n", | ||
819 | (battery->state & 0x04) ? "critical" : "ok"); | ||
820 | if ((battery->state & 0x01) && (battery->state & 0x02)) | ||
821 | seq_printf(seq, | ||
822 | "charging state: charging/discharging\n"); | ||
823 | else if (battery->state & 0x01) | ||
824 | seq_printf(seq, "charging state: discharging\n"); | ||
825 | else if (battery->state & 0x02) | ||
826 | seq_printf(seq, "charging state: charging\n"); | ||
827 | else | ||
828 | seq_printf(seq, "charging state: charged\n"); | ||
829 | |||
830 | if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) | ||
831 | seq_printf(seq, "present rate: unknown\n"); | ||
832 | else | ||
833 | seq_printf(seq, "present rate: %d %s\n", | ||
834 | battery->rate_now, acpi_battery_units(battery)); | ||
835 | |||
836 | if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN) | ||
837 | seq_printf(seq, "remaining capacity: unknown\n"); | ||
838 | else | ||
839 | seq_printf(seq, "remaining capacity: %d %sh\n", | ||
840 | battery->capacity_now, acpi_battery_units(battery)); | ||
841 | if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN) | ||
842 | seq_printf(seq, "present voltage: unknown\n"); | ||
843 | else | ||
844 | seq_printf(seq, "present voltage: %d mV\n", | ||
845 | battery->voltage_now); | ||
846 | end: | ||
847 | if (result) | ||
848 | seq_printf(seq, "ERROR: Unable to read battery state\n"); | ||
849 | |||
850 | return result; | ||
851 | } | ||
852 | |||
853 | static int acpi_battery_print_alarm(struct seq_file *seq, int result) | ||
854 | { | ||
855 | struct acpi_battery *battery = seq->private; | ||
856 | |||
857 | if (result) | ||
858 | goto end; | ||
859 | |||
860 | if (!acpi_battery_present(battery)) { | ||
861 | seq_printf(seq, "present: no\n"); | ||
862 | goto end; | ||
863 | } | ||
864 | seq_printf(seq, "alarm: "); | ||
865 | if (!battery->alarm) | ||
866 | seq_printf(seq, "unsupported\n"); | ||
867 | else | ||
868 | seq_printf(seq, "%u %sh\n", battery->alarm, | ||
869 | acpi_battery_units(battery)); | ||
870 | end: | ||
871 | if (result) | ||
872 | seq_printf(seq, "ERROR: Unable to read battery alarm\n"); | ||
873 | return result; | ||
874 | } | ||
875 | |||
876 | static ssize_t acpi_battery_write_alarm(struct file *file, | ||
877 | const char __user * buffer, | ||
878 | size_t count, loff_t * ppos) | ||
879 | { | ||
880 | int result = 0; | ||
881 | char alarm_string[12] = { '\0' }; | ||
882 | struct seq_file *m = file->private_data; | ||
883 | struct acpi_battery *battery = m->private; | ||
884 | |||
885 | if (!battery || (count > sizeof(alarm_string) - 1)) | ||
886 | return -EINVAL; | ||
887 | if (!acpi_battery_present(battery)) { | ||
888 | result = -ENODEV; | ||
889 | goto end; | ||
890 | } | ||
891 | if (copy_from_user(alarm_string, buffer, count)) { | ||
892 | result = -EFAULT; | ||
893 | goto end; | ||
894 | } | ||
895 | alarm_string[count] = '\0'; | ||
896 | battery->alarm = simple_strtol(alarm_string, NULL, 0); | ||
897 | result = acpi_battery_set_alarm(battery); | ||
898 | end: | ||
899 | if (!result) | ||
900 | return count; | ||
901 | return result; | ||
902 | } | ||
903 | |||
904 | typedef int(*print_func)(struct seq_file *seq, int result); | ||
905 | |||
906 | static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = { | ||
907 | acpi_battery_print_info, | ||
908 | acpi_battery_print_state, | ||
909 | acpi_battery_print_alarm, | ||
910 | }; | ||
911 | |||
912 | static int acpi_battery_read(int fid, struct seq_file *seq) | ||
913 | { | ||
914 | struct acpi_battery *battery = seq->private; | ||
915 | int result = acpi_battery_update(battery); | ||
916 | return acpi_print_funcs[fid](seq, result); | ||
917 | } | ||
918 | |||
919 | #define DECLARE_FILE_FUNCTIONS(_name) \ | ||
920 | static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \ | ||
921 | { \ | ||
922 | return acpi_battery_read(_name##_tag, seq); \ | ||
923 | } \ | ||
924 | static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \ | ||
925 | { \ | ||
926 | return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \ | ||
927 | } | ||
928 | |||
929 | DECLARE_FILE_FUNCTIONS(info); | ||
930 | DECLARE_FILE_FUNCTIONS(state); | ||
931 | DECLARE_FILE_FUNCTIONS(alarm); | ||
932 | |||
933 | #undef DECLARE_FILE_FUNCTIONS | ||
934 | |||
935 | #define FILE_DESCRIPTION_RO(_name) \ | ||
936 | { \ | ||
937 | .name = __stringify(_name), \ | ||
938 | .mode = S_IRUGO, \ | ||
939 | .ops = { \ | ||
940 | .open = acpi_battery_##_name##_open_fs, \ | ||
941 | .read = seq_read, \ | ||
942 | .llseek = seq_lseek, \ | ||
943 | .release = single_release, \ | ||
944 | .owner = THIS_MODULE, \ | ||
945 | }, \ | ||
946 | } | ||
947 | |||
948 | #define FILE_DESCRIPTION_RW(_name) \ | ||
949 | { \ | ||
950 | .name = __stringify(_name), \ | ||
951 | .mode = S_IFREG | S_IRUGO | S_IWUSR, \ | ||
952 | .ops = { \ | ||
953 | .open = acpi_battery_##_name##_open_fs, \ | ||
954 | .read = seq_read, \ | ||
955 | .llseek = seq_lseek, \ | ||
956 | .write = acpi_battery_write_##_name, \ | ||
957 | .release = single_release, \ | ||
958 | .owner = THIS_MODULE, \ | ||
959 | }, \ | ||
960 | } | ||
961 | |||
962 | static const struct battery_file { | ||
963 | struct file_operations ops; | ||
964 | umode_t mode; | ||
965 | const char *name; | ||
966 | } acpi_battery_file[] = { | ||
967 | FILE_DESCRIPTION_RO(info), | ||
968 | FILE_DESCRIPTION_RO(state), | ||
969 | FILE_DESCRIPTION_RW(alarm), | ||
970 | }; | ||
971 | |||
972 | #undef FILE_DESCRIPTION_RO | ||
973 | #undef FILE_DESCRIPTION_RW | ||
974 | |||
975 | static int acpi_battery_add_fs(struct acpi_device *device) | ||
976 | { | ||
977 | struct proc_dir_entry *entry = NULL; | ||
978 | int i; | ||
979 | |||
980 | printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded," | ||
981 | " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); | ||
982 | if (!acpi_device_dir(device)) { | ||
983 | acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), | ||
984 | acpi_battery_dir); | ||
985 | if (!acpi_device_dir(device)) | ||
986 | return -ENODEV; | ||
987 | } | ||
988 | |||
989 | for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) { | ||
990 | entry = proc_create_data(acpi_battery_file[i].name, | ||
991 | acpi_battery_file[i].mode, | ||
992 | acpi_device_dir(device), | ||
993 | &acpi_battery_file[i].ops, | ||
994 | acpi_driver_data(device)); | ||
995 | if (!entry) | ||
996 | return -ENODEV; | ||
997 | } | ||
998 | return 0; | ||
999 | } | ||
1000 | |||
1001 | static void acpi_battery_remove_fs(struct acpi_device *device) | ||
1002 | { | ||
1003 | int i; | ||
1004 | if (!acpi_device_dir(device)) | ||
1005 | return; | ||
1006 | for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) | ||
1007 | remove_proc_entry(acpi_battery_file[i].name, | ||
1008 | acpi_device_dir(device)); | ||
1009 | |||
1010 | remove_proc_entry(acpi_device_bid(device), acpi_battery_dir); | ||
1011 | acpi_device_dir(device) = NULL; | ||
1012 | } | ||
1013 | |||
1014 | #endif | ||
1015 | |||
1016 | /* -------------------------------------------------------------------------- | ||
1017 | Driver Interface | 717 | Driver Interface |
1018 | -------------------------------------------------------------------------- */ | 718 | -------------------------------------------------------------------------- */ |
1019 | 719 | ||
@@ -1075,15 +775,6 @@ static int acpi_battery_add(struct acpi_device *device) | |||
1075 | result = acpi_battery_update(battery); | 775 | result = acpi_battery_update(battery); |
1076 | if (result) | 776 | if (result) |
1077 | goto fail; | 777 | goto fail; |
1078 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
1079 | result = acpi_battery_add_fs(device); | ||
1080 | #endif | ||
1081 | if (result) { | ||
1082 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
1083 | acpi_battery_remove_fs(device); | ||
1084 | #endif | ||
1085 | goto fail; | ||
1086 | } | ||
1087 | 778 | ||
1088 | printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", | 779 | printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", |
1089 | ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), | 780 | ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), |
@@ -1110,9 +801,6 @@ static int acpi_battery_remove(struct acpi_device *device) | |||
1110 | return -EINVAL; | 801 | return -EINVAL; |
1111 | battery = acpi_driver_data(device); | 802 | battery = acpi_driver_data(device); |
1112 | unregister_pm_notifier(&battery->pm_nb); | 803 | unregister_pm_notifier(&battery->pm_nb); |
1113 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
1114 | acpi_battery_remove_fs(device); | ||
1115 | #endif | ||
1116 | sysfs_remove_battery(battery); | 804 | sysfs_remove_battery(battery); |
1117 | mutex_destroy(&battery->lock); | 805 | mutex_destroy(&battery->lock); |
1118 | mutex_destroy(&battery->sysfs_lock); | 806 | mutex_destroy(&battery->sysfs_lock); |
@@ -1158,18 +846,7 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie) | |||
1158 | { | 846 | { |
1159 | if (acpi_disabled) | 847 | if (acpi_disabled) |
1160 | return; | 848 | return; |
1161 | #ifdef CONFIG_ACPI_PROCFS_POWER | 849 | acpi_bus_register_driver(&acpi_battery_driver); |
1162 | acpi_battery_dir = acpi_lock_battery_dir(); | ||
1163 | if (!acpi_battery_dir) | ||
1164 | return; | ||
1165 | #endif | ||
1166 | if (acpi_bus_register_driver(&acpi_battery_driver) < 0) { | ||
1167 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
1168 | acpi_unlock_battery_dir(acpi_battery_dir); | ||
1169 | #endif | ||
1170 | return; | ||
1171 | } | ||
1172 | return; | ||
1173 | } | 850 | } |
1174 | 851 | ||
1175 | static int __init acpi_battery_init(void) | 852 | static int __init acpi_battery_init(void) |
@@ -1181,9 +858,6 @@ static int __init acpi_battery_init(void) | |||
1181 | static void __exit acpi_battery_exit(void) | 858 | static void __exit acpi_battery_exit(void) |
1182 | { | 859 | { |
1183 | acpi_bus_unregister_driver(&acpi_battery_driver); | 860 | acpi_bus_unregister_driver(&acpi_battery_driver); |
1184 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
1185 | acpi_unlock_battery_dir(acpi_battery_dir); | ||
1186 | #endif | ||
1187 | } | 861 | } |
1188 | 862 | ||
1189 | module_init(acpi_battery_init); | 863 | module_init(acpi_battery_init); |
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index 9515f18898b2..fb848378d582 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c | |||
@@ -274,6 +274,19 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { | |||
274 | }, | 274 | }, |
275 | }, | 275 | }, |
276 | { | 276 | { |
277 | .callback = dmi_disable_osi_vista, | ||
278 | .ident = "Toshiba NB100", | ||
279 | .matches = { | ||
280 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
281 | DMI_MATCH(DMI_PRODUCT_NAME, "NB100"), | ||
282 | }, | ||
283 | }, | ||
284 | |||
285 | /* | ||
286 | * The following machines have broken backlight support when reporting | ||
287 | * the Windows 2012 OSI, so disable it until their support is fixed. | ||
288 | */ | ||
289 | { | ||
277 | .callback = dmi_disable_osi_win8, | 290 | .callback = dmi_disable_osi_win8, |
278 | .ident = "ASUS Zenbook Prime UX31A", | 291 | .ident = "ASUS Zenbook Prime UX31A", |
279 | .matches = { | 292 | .matches = { |
@@ -291,12 +304,60 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { | |||
291 | }, | 304 | }, |
292 | { | 305 | { |
293 | .callback = dmi_disable_osi_win8, | 306 | .callback = dmi_disable_osi_win8, |
294 | .ident = "Lenovo ThinkPad Edge E530", | 307 | .ident = "ThinkPad Edge E530", |
295 | .matches = { | 308 | .matches = { |
296 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | 309 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
297 | DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"), | 310 | DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"), |
298 | }, | 311 | }, |
299 | }, | 312 | }, |
313 | { | ||
314 | .callback = dmi_disable_osi_win8, | ||
315 | .ident = "ThinkPad Edge E530", | ||
316 | .matches = { | ||
317 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
318 | DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"), | ||
319 | }, | ||
320 | }, | ||
321 | { | ||
322 | .callback = dmi_disable_osi_win8, | ||
323 | .ident = "ThinkPad Edge E530", | ||
324 | .matches = { | ||
325 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
326 | DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"), | ||
327 | }, | ||
328 | }, | ||
329 | { | ||
330 | .callback = dmi_disable_osi_win8, | ||
331 | .ident = "Acer Aspire V5-573G", | ||
332 | .matches = { | ||
333 | DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"), | ||
334 | DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"), | ||
335 | }, | ||
336 | }, | ||
337 | { | ||
338 | .callback = dmi_disable_osi_win8, | ||
339 | .ident = "Acer Aspire V5-572G", | ||
340 | .matches = { | ||
341 | DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"), | ||
342 | DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"), | ||
343 | }, | ||
344 | }, | ||
345 | { | ||
346 | .callback = dmi_disable_osi_win8, | ||
347 | .ident = "ThinkPad T431s", | ||
348 | .matches = { | ||
349 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
350 | DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"), | ||
351 | }, | ||
352 | }, | ||
353 | { | ||
354 | .callback = dmi_disable_osi_win8, | ||
355 | .ident = "ThinkPad T430", | ||
356 | .matches = { | ||
357 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
358 | DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"), | ||
359 | }, | ||
360 | }, | ||
300 | 361 | ||
301 | /* | 362 | /* |
302 | * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. | 363 | * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. |
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index a55773801c5f..c971929d75c2 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c | |||
@@ -383,18 +383,15 @@ static int acpi_button_add(struct acpi_device *device) | |||
383 | 383 | ||
384 | switch (button->type) { | 384 | switch (button->type) { |
385 | case ACPI_BUTTON_TYPE_POWER: | 385 | case ACPI_BUTTON_TYPE_POWER: |
386 | input->evbit[0] = BIT_MASK(EV_KEY); | 386 | input_set_capability(input, EV_KEY, KEY_POWER); |
387 | set_bit(KEY_POWER, input->keybit); | ||
388 | break; | 387 | break; |
389 | 388 | ||
390 | case ACPI_BUTTON_TYPE_SLEEP: | 389 | case ACPI_BUTTON_TYPE_SLEEP: |
391 | input->evbit[0] = BIT_MASK(EV_KEY); | 390 | input_set_capability(input, EV_KEY, KEY_SLEEP); |
392 | set_bit(KEY_SLEEP, input->keybit); | ||
393 | break; | 391 | break; |
394 | 392 | ||
395 | case ACPI_BUTTON_TYPE_LID: | 393 | case ACPI_BUTTON_TYPE_LID: |
396 | input->evbit[0] = BIT_MASK(EV_SW); | 394 | input_set_capability(input, EV_SW, SW_LID); |
397 | set_bit(SW_LID, input->swbit); | ||
398 | break; | 395 | break; |
399 | } | 396 | } |
400 | 397 | ||
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c deleted file mode 100644 index 6c9ee68e46fb..000000000000 --- a/drivers/acpi/cm_sbs.c +++ /dev/null | |||
@@ -1,105 +0,0 @@ | |||
1 | /* | ||
2 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or (at | ||
7 | * your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
12 | * General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along | ||
15 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
17 | * | ||
18 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
19 | */ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/acpi.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/proc_fs.h> | ||
27 | #include <linux/seq_file.h> | ||
28 | #include <acpi/acpi_bus.h> | ||
29 | #include <acpi/acpi_drivers.h> | ||
30 | |||
31 | #define PREFIX "ACPI: " | ||
32 | |||
33 | ACPI_MODULE_NAME("cm_sbs"); | ||
34 | #define ACPI_AC_CLASS "ac_adapter" | ||
35 | #define ACPI_BATTERY_CLASS "battery" | ||
36 | #define _COMPONENT ACPI_SBS_COMPONENT | ||
37 | static struct proc_dir_entry *acpi_ac_dir; | ||
38 | static struct proc_dir_entry *acpi_battery_dir; | ||
39 | |||
40 | static DEFINE_MUTEX(cm_sbs_mutex); | ||
41 | |||
42 | static int lock_ac_dir_cnt; | ||
43 | static int lock_battery_dir_cnt; | ||
44 | |||
45 | struct proc_dir_entry *acpi_lock_ac_dir(void) | ||
46 | { | ||
47 | mutex_lock(&cm_sbs_mutex); | ||
48 | if (!acpi_ac_dir) | ||
49 | acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir); | ||
50 | if (acpi_ac_dir) { | ||
51 | lock_ac_dir_cnt++; | ||
52 | } else { | ||
53 | printk(KERN_ERR PREFIX | ||
54 | "Cannot create %s\n", ACPI_AC_CLASS); | ||
55 | } | ||
56 | mutex_unlock(&cm_sbs_mutex); | ||
57 | return acpi_ac_dir; | ||
58 | } | ||
59 | EXPORT_SYMBOL(acpi_lock_ac_dir); | ||
60 | |||
61 | void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param) | ||
62 | { | ||
63 | mutex_lock(&cm_sbs_mutex); | ||
64 | if (acpi_ac_dir_param) | ||
65 | lock_ac_dir_cnt--; | ||
66 | if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) { | ||
67 | remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir); | ||
68 | acpi_ac_dir = NULL; | ||
69 | } | ||
70 | mutex_unlock(&cm_sbs_mutex); | ||
71 | } | ||
72 | EXPORT_SYMBOL(acpi_unlock_ac_dir); | ||
73 | |||
74 | struct proc_dir_entry *acpi_lock_battery_dir(void) | ||
75 | { | ||
76 | mutex_lock(&cm_sbs_mutex); | ||
77 | if (!acpi_battery_dir) { | ||
78 | acpi_battery_dir = | ||
79 | proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir); | ||
80 | } | ||
81 | if (acpi_battery_dir) { | ||
82 | lock_battery_dir_cnt++; | ||
83 | } else { | ||
84 | printk(KERN_ERR PREFIX | ||
85 | "Cannot create %s\n", ACPI_BATTERY_CLASS); | ||
86 | } | ||
87 | mutex_unlock(&cm_sbs_mutex); | ||
88 | return acpi_battery_dir; | ||
89 | } | ||
90 | EXPORT_SYMBOL(acpi_lock_battery_dir); | ||
91 | |||
92 | void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param) | ||
93 | { | ||
94 | mutex_lock(&cm_sbs_mutex); | ||
95 | if (acpi_battery_dir_param) | ||
96 | lock_battery_dir_cnt--; | ||
97 | if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param | ||
98 | && acpi_battery_dir) { | ||
99 | remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir); | ||
100 | acpi_battery_dir = NULL; | ||
101 | } | ||
102 | mutex_unlock(&cm_sbs_mutex); | ||
103 | return; | ||
104 | } | ||
105 | EXPORT_SYMBOL(acpi_unlock_battery_dir); | ||
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index a94383d1f350..d42b2fb5a7e9 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c | |||
@@ -118,9 +118,10 @@ int acpi_device_get_power(struct acpi_device *device, int *state) | |||
118 | /* | 118 | /* |
119 | * If we were unsure about the device parent's power state up to this | 119 | * If we were unsure about the device parent's power state up to this |
120 | * point, the fact that the device is in D0 implies that the parent has | 120 | * point, the fact that the device is in D0 implies that the parent has |
121 | * to be in D0 too. | 121 | * to be in D0 too, except if ignore_parent is set. |
122 | */ | 122 | */ |
123 | if (device->parent && device->parent->power.state == ACPI_STATE_UNKNOWN | 123 | if (!device->power.flags.ignore_parent && device->parent |
124 | && device->parent->power.state == ACPI_STATE_UNKNOWN | ||
124 | && result == ACPI_STATE_D0) | 125 | && result == ACPI_STATE_D0) |
125 | device->parent->power.state = ACPI_STATE_D0; | 126 | device->parent->power.state = ACPI_STATE_D0; |
126 | 127 | ||
@@ -177,7 +178,8 @@ int acpi_device_set_power(struct acpi_device *device, int state) | |||
177 | acpi_power_state_string(state)); | 178 | acpi_power_state_string(state)); |
178 | return -ENODEV; | 179 | return -ENODEV; |
179 | } | 180 | } |
180 | if (device->parent && (state < device->parent->power.state)) { | 181 | if (!device->power.flags.ignore_parent && |
182 | device->parent && (state < device->parent->power.state)) { | ||
181 | dev_warn(&device->dev, | 183 | dev_warn(&device->dev, |
182 | "Cannot transition to power state %s for parent in %s\n", | 184 | "Cannot transition to power state %s for parent in %s\n", |
183 | acpi_power_state_string(state), | 185 | acpi_power_state_string(state), |
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 05ea4be01a83..dcd73ccb514c 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c | |||
@@ -441,7 +441,7 @@ static void handle_dock(struct dock_station *ds, int dock) | |||
441 | acpi_status status; | 441 | acpi_status status; |
442 | struct acpi_object_list arg_list; | 442 | struct acpi_object_list arg_list; |
443 | union acpi_object arg; | 443 | union acpi_object arg; |
444 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 444 | unsigned long long value; |
445 | 445 | ||
446 | acpi_handle_info(ds->handle, "%s\n", dock ? "docking" : "undocking"); | 446 | acpi_handle_info(ds->handle, "%s\n", dock ? "docking" : "undocking"); |
447 | 447 | ||
@@ -450,12 +450,10 @@ static void handle_dock(struct dock_station *ds, int dock) | |||
450 | arg_list.pointer = &arg; | 450 | arg_list.pointer = &arg; |
451 | arg.type = ACPI_TYPE_INTEGER; | 451 | arg.type = ACPI_TYPE_INTEGER; |
452 | arg.integer.value = dock; | 452 | arg.integer.value = dock; |
453 | status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer); | 453 | status = acpi_evaluate_integer(ds->handle, "_DCK", &arg_list, &value); |
454 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) | 454 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) |
455 | acpi_handle_err(ds->handle, "Failed to execute _DCK (0x%x)\n", | 455 | acpi_handle_err(ds->handle, "Failed to execute _DCK (0x%x)\n", |
456 | status); | 456 | status); |
457 | |||
458 | kfree(buffer.pointer); | ||
459 | } | 457 | } |
460 | 458 | ||
461 | static inline void dock(struct dock_station *ds) | 459 | static inline void dock(struct dock_station *ds) |
@@ -671,39 +669,20 @@ static void dock_notify(struct dock_station *ds, u32 event) | |||
671 | } | 669 | } |
672 | } | 670 | } |
673 | 671 | ||
674 | struct dock_data { | 672 | static void acpi_dock_deferred_cb(void *data, u32 event) |
675 | struct dock_station *ds; | ||
676 | u32 event; | ||
677 | }; | ||
678 | |||
679 | static void acpi_dock_deferred_cb(void *context) | ||
680 | { | 673 | { |
681 | struct dock_data *data = context; | ||
682 | |||
683 | acpi_scan_lock_acquire(); | 674 | acpi_scan_lock_acquire(); |
684 | dock_notify(data->ds, data->event); | 675 | dock_notify(data, event); |
685 | acpi_scan_lock_release(); | 676 | acpi_scan_lock_release(); |
686 | kfree(data); | ||
687 | } | 677 | } |
688 | 678 | ||
689 | static void dock_notify_handler(acpi_handle handle, u32 event, void *data) | 679 | static void dock_notify_handler(acpi_handle handle, u32 event, void *data) |
690 | { | 680 | { |
691 | struct dock_data *dd; | ||
692 | |||
693 | if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK | 681 | if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK |
694 | && event != ACPI_NOTIFY_EJECT_REQUEST) | 682 | && event != ACPI_NOTIFY_EJECT_REQUEST) |
695 | return; | 683 | return; |
696 | 684 | ||
697 | dd = kmalloc(sizeof(*dd), GFP_KERNEL); | 685 | acpi_hotplug_execute(acpi_dock_deferred_cb, data, event); |
698 | if (dd) { | ||
699 | acpi_status status; | ||
700 | |||
701 | dd->ds = data; | ||
702 | dd->event = event; | ||
703 | status = acpi_os_hotplug_execute(acpi_dock_deferred_cb, dd); | ||
704 | if (ACPI_FAILURE(status)) | ||
705 | kfree(dd); | ||
706 | } | ||
707 | } | 686 | } |
708 | 687 | ||
709 | /** | 688 | /** |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index a06d98374705..d5309fd49458 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -28,6 +28,7 @@ | |||
28 | 28 | ||
29 | /* Uncomment next line to get verbose printout */ | 29 | /* Uncomment next line to get verbose printout */ |
30 | /* #define DEBUG */ | 30 | /* #define DEBUG */ |
31 | #define pr_fmt(fmt) "ACPI : EC: " fmt | ||
31 | 32 | ||
32 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
33 | #include <linux/module.h> | 34 | #include <linux/module.h> |
@@ -49,9 +50,6 @@ | |||
49 | #define ACPI_EC_DEVICE_NAME "Embedded Controller" | 50 | #define ACPI_EC_DEVICE_NAME "Embedded Controller" |
50 | #define ACPI_EC_FILE_INFO "info" | 51 | #define ACPI_EC_FILE_INFO "info" |
51 | 52 | ||
52 | #undef PREFIX | ||
53 | #define PREFIX "ACPI: EC: " | ||
54 | |||
55 | /* EC status register */ | 53 | /* EC status register */ |
56 | #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */ | 54 | #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */ |
57 | #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */ | 55 | #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */ |
@@ -131,26 +129,26 @@ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ | |||
131 | static inline u8 acpi_ec_read_status(struct acpi_ec *ec) | 129 | static inline u8 acpi_ec_read_status(struct acpi_ec *ec) |
132 | { | 130 | { |
133 | u8 x = inb(ec->command_addr); | 131 | u8 x = inb(ec->command_addr); |
134 | pr_debug(PREFIX "---> status = 0x%2.2x\n", x); | 132 | pr_debug("---> status = 0x%2.2x\n", x); |
135 | return x; | 133 | return x; |
136 | } | 134 | } |
137 | 135 | ||
138 | static inline u8 acpi_ec_read_data(struct acpi_ec *ec) | 136 | static inline u8 acpi_ec_read_data(struct acpi_ec *ec) |
139 | { | 137 | { |
140 | u8 x = inb(ec->data_addr); | 138 | u8 x = inb(ec->data_addr); |
141 | pr_debug(PREFIX "---> data = 0x%2.2x\n", x); | 139 | pr_debug("---> data = 0x%2.2x\n", x); |
142 | return x; | 140 | return x; |
143 | } | 141 | } |
144 | 142 | ||
145 | static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command) | 143 | static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command) |
146 | { | 144 | { |
147 | pr_debug(PREFIX "<--- command = 0x%2.2x\n", command); | 145 | pr_debug("<--- command = 0x%2.2x\n", command); |
148 | outb(command, ec->command_addr); | 146 | outb(command, ec->command_addr); |
149 | } | 147 | } |
150 | 148 | ||
151 | static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) | 149 | static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) |
152 | { | 150 | { |
153 | pr_debug(PREFIX "<--- data = 0x%2.2x\n", data); | 151 | pr_debug("<--- data = 0x%2.2x\n", data); |
154 | outb(data, ec->data_addr); | 152 | outb(data, ec->data_addr); |
155 | } | 153 | } |
156 | 154 | ||
@@ -241,7 +239,7 @@ static int ec_poll(struct acpi_ec *ec) | |||
241 | } | 239 | } |
242 | advance_transaction(ec, acpi_ec_read_status(ec)); | 240 | advance_transaction(ec, acpi_ec_read_status(ec)); |
243 | } while (time_before(jiffies, delay)); | 241 | } while (time_before(jiffies, delay)); |
244 | pr_debug(PREFIX "controller reset, restart transaction\n"); | 242 | pr_debug("controller reset, restart transaction\n"); |
245 | spin_lock_irqsave(&ec->lock, flags); | 243 | spin_lock_irqsave(&ec->lock, flags); |
246 | start_transaction(ec); | 244 | start_transaction(ec); |
247 | spin_unlock_irqrestore(&ec->lock, flags); | 245 | spin_unlock_irqrestore(&ec->lock, flags); |
@@ -309,12 +307,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) | |||
309 | } | 307 | } |
310 | } | 308 | } |
311 | if (ec_wait_ibf0(ec)) { | 309 | if (ec_wait_ibf0(ec)) { |
312 | pr_err(PREFIX "input buffer is not empty, " | 310 | pr_err("input buffer is not empty, " |
313 | "aborting transaction\n"); | 311 | "aborting transaction\n"); |
314 | status = -ETIME; | 312 | status = -ETIME; |
315 | goto end; | 313 | goto end; |
316 | } | 314 | } |
317 | pr_debug(PREFIX "transaction start (cmd=0x%02x, addr=0x%02x)\n", | 315 | pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n", |
318 | t->command, t->wdata ? t->wdata[0] : 0); | 316 | t->command, t->wdata ? t->wdata[0] : 0); |
319 | /* disable GPE during transaction if storm is detected */ | 317 | /* disable GPE during transaction if storm is detected */ |
320 | if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { | 318 | if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { |
@@ -331,12 +329,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) | |||
331 | /* It is safe to enable the GPE outside of the transaction. */ | 329 | /* It is safe to enable the GPE outside of the transaction. */ |
332 | acpi_enable_gpe(NULL, ec->gpe); | 330 | acpi_enable_gpe(NULL, ec->gpe); |
333 | } else if (t->irq_count > ec_storm_threshold) { | 331 | } else if (t->irq_count > ec_storm_threshold) { |
334 | pr_info(PREFIX "GPE storm detected(%d GPEs), " | 332 | pr_info("GPE storm detected(%d GPEs), " |
335 | "transactions will use polling mode\n", | 333 | "transactions will use polling mode\n", |
336 | t->irq_count); | 334 | t->irq_count); |
337 | set_bit(EC_FLAGS_GPE_STORM, &ec->flags); | 335 | set_bit(EC_FLAGS_GPE_STORM, &ec->flags); |
338 | } | 336 | } |
339 | pr_debug(PREFIX "transaction end\n"); | 337 | pr_debug("transaction end\n"); |
340 | end: | 338 | end: |
341 | if (ec->global_lock) | 339 | if (ec->global_lock) |
342 | acpi_release_global_lock(glk); | 340 | acpi_release_global_lock(glk); |
@@ -570,12 +568,12 @@ static void acpi_ec_run(void *cxt) | |||
570 | struct acpi_ec_query_handler *handler = cxt; | 568 | struct acpi_ec_query_handler *handler = cxt; |
571 | if (!handler) | 569 | if (!handler) |
572 | return; | 570 | return; |
573 | pr_debug(PREFIX "start query execution\n"); | 571 | pr_debug("start query execution\n"); |
574 | if (handler->func) | 572 | if (handler->func) |
575 | handler->func(handler->data); | 573 | handler->func(handler->data); |
576 | else if (handler->handle) | 574 | else if (handler->handle) |
577 | acpi_evaluate_object(handler->handle, NULL, NULL, NULL); | 575 | acpi_evaluate_object(handler->handle, NULL, NULL, NULL); |
578 | pr_debug(PREFIX "stop query execution\n"); | 576 | pr_debug("stop query execution\n"); |
579 | kfree(handler); | 577 | kfree(handler); |
580 | } | 578 | } |
581 | 579 | ||
@@ -593,7 +591,8 @@ static int acpi_ec_sync_query(struct acpi_ec *ec) | |||
593 | if (!copy) | 591 | if (!copy) |
594 | return -ENOMEM; | 592 | return -ENOMEM; |
595 | memcpy(copy, handler, sizeof(*copy)); | 593 | memcpy(copy, handler, sizeof(*copy)); |
596 | pr_debug(PREFIX "push query execution (0x%2x) on queue\n", value); | 594 | pr_debug("push query execution (0x%2x) on queue\n", |
595 | value); | ||
597 | return acpi_os_execute((copy->func) ? | 596 | return acpi_os_execute((copy->func) ? |
598 | OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER, | 597 | OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER, |
599 | acpi_ec_run, copy); | 598 | acpi_ec_run, copy); |
@@ -616,7 +615,7 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state) | |||
616 | { | 615 | { |
617 | if (state & ACPI_EC_FLAG_SCI) { | 616 | if (state & ACPI_EC_FLAG_SCI) { |
618 | if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { | 617 | if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { |
619 | pr_debug(PREFIX "push gpe query to the queue\n"); | 618 | pr_debug("push gpe query to the queue\n"); |
620 | return acpi_os_execute(OSL_NOTIFY_HANDLER, | 619 | return acpi_os_execute(OSL_NOTIFY_HANDLER, |
621 | acpi_ec_gpe_query, ec); | 620 | acpi_ec_gpe_query, ec); |
622 | } | 621 | } |
@@ -630,7 +629,7 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, | |||
630 | struct acpi_ec *ec = data; | 629 | struct acpi_ec *ec = data; |
631 | u8 status = acpi_ec_read_status(ec); | 630 | u8 status = acpi_ec_read_status(ec); |
632 | 631 | ||
633 | pr_debug(PREFIX "~~~> interrupt, status:0x%02x\n", status); | 632 | pr_debug("~~~> interrupt, status:0x%02x\n", status); |
634 | 633 | ||
635 | advance_transaction(ec, status); | 634 | advance_transaction(ec, status); |
636 | if (ec_transaction_done(ec) && | 635 | if (ec_transaction_done(ec) && |
@@ -776,7 +775,7 @@ static int ec_install_handlers(struct acpi_ec *ec) | |||
776 | * The AE_NOT_FOUND error will be ignored and OS | 775 | * The AE_NOT_FOUND error will be ignored and OS |
777 | * continue to initialize EC. | 776 | * continue to initialize EC. |
778 | */ | 777 | */ |
779 | printk(KERN_ERR "Fail in evaluating the _REG object" | 778 | pr_err("Fail in evaluating the _REG object" |
780 | " of EC device. Broken bios is suspected.\n"); | 779 | " of EC device. Broken bios is suspected.\n"); |
781 | } else { | 780 | } else { |
782 | acpi_remove_gpe_handler(NULL, ec->gpe, | 781 | acpi_remove_gpe_handler(NULL, ec->gpe, |
@@ -795,10 +794,10 @@ static void ec_remove_handlers(struct acpi_ec *ec) | |||
795 | acpi_disable_gpe(NULL, ec->gpe); | 794 | acpi_disable_gpe(NULL, ec->gpe); |
796 | if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, | 795 | if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, |
797 | ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) | 796 | ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) |
798 | pr_err(PREFIX "failed to remove space handler\n"); | 797 | pr_err("failed to remove space handler\n"); |
799 | if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe, | 798 | if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe, |
800 | &acpi_ec_gpe_handler))) | 799 | &acpi_ec_gpe_handler))) |
801 | pr_err(PREFIX "failed to remove gpe handler\n"); | 800 | pr_err("failed to remove gpe handler\n"); |
802 | clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags); | 801 | clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags); |
803 | } | 802 | } |
804 | 803 | ||
@@ -840,7 +839,7 @@ static int acpi_ec_add(struct acpi_device *device) | |||
840 | ret = !!request_region(ec->command_addr, 1, "EC cmd"); | 839 | ret = !!request_region(ec->command_addr, 1, "EC cmd"); |
841 | WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr); | 840 | WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr); |
842 | 841 | ||
843 | pr_info(PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n", | 842 | pr_info("GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n", |
844 | ec->gpe, ec->command_addr, ec->data_addr); | 843 | ec->gpe, ec->command_addr, ec->data_addr); |
845 | 844 | ||
846 | ret = ec_install_handlers(ec); | 845 | ret = ec_install_handlers(ec); |
@@ -931,7 +930,7 @@ static int ec_validate_ecdt(const struct dmi_system_id *id) | |||
931 | /* MSI EC needs special treatment, enable it */ | 930 | /* MSI EC needs special treatment, enable it */ |
932 | static int ec_flag_msi(const struct dmi_system_id *id) | 931 | static int ec_flag_msi(const struct dmi_system_id *id) |
933 | { | 932 | { |
934 | printk(KERN_DEBUG PREFIX "Detected MSI hardware, enabling workarounds.\n"); | 933 | pr_debug("Detected MSI hardware, enabling workarounds.\n"); |
935 | EC_FLAGS_MSI = 1; | 934 | EC_FLAGS_MSI = 1; |
936 | EC_FLAGS_VALIDATE_ECDT = 1; | 935 | EC_FLAGS_VALIDATE_ECDT = 1; |
937 | return 0; | 936 | return 0; |
@@ -1010,7 +1009,7 @@ int __init acpi_ec_ecdt_probe(void) | |||
1010 | status = acpi_get_table(ACPI_SIG_ECDT, 1, | 1009 | status = acpi_get_table(ACPI_SIG_ECDT, 1, |
1011 | (struct acpi_table_header **)&ecdt_ptr); | 1010 | (struct acpi_table_header **)&ecdt_ptr); |
1012 | if (ACPI_SUCCESS(status)) { | 1011 | if (ACPI_SUCCESS(status)) { |
1013 | pr_info(PREFIX "EC description table is found, configuring boot EC\n"); | 1012 | pr_info("EC description table is found, configuring boot EC\n"); |
1014 | boot_ec->command_addr = ecdt_ptr->control.address; | 1013 | boot_ec->command_addr = ecdt_ptr->control.address; |
1015 | boot_ec->data_addr = ecdt_ptr->data.address; | 1014 | boot_ec->data_addr = ecdt_ptr->data.address; |
1016 | boot_ec->gpe = ecdt_ptr->gpe; | 1015 | boot_ec->gpe = ecdt_ptr->gpe; |
@@ -1030,7 +1029,7 @@ int __init acpi_ec_ecdt_probe(void) | |||
1030 | 1029 | ||
1031 | /* This workaround is needed only on some broken machines, | 1030 | /* This workaround is needed only on some broken machines, |
1032 | * which require early EC, but fail to provide ECDT */ | 1031 | * which require early EC, but fail to provide ECDT */ |
1033 | printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n"); | 1032 | pr_debug("Look up EC in DSDT\n"); |
1034 | status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, | 1033 | status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, |
1035 | boot_ec, NULL); | 1034 | boot_ec, NULL); |
1036 | /* Check that acpi_get_devices actually find something */ | 1035 | /* Check that acpi_get_devices actually find something */ |
@@ -1042,7 +1041,7 @@ int __init acpi_ec_ecdt_probe(void) | |||
1042 | saved_ec->data_addr != boot_ec->data_addr || | 1041 | saved_ec->data_addr != boot_ec->data_addr || |
1043 | saved_ec->gpe != boot_ec->gpe || | 1042 | saved_ec->gpe != boot_ec->gpe || |
1044 | saved_ec->handle != boot_ec->handle) | 1043 | saved_ec->handle != boot_ec->handle) |
1045 | pr_info(PREFIX "ASUSTek keeps feeding us with broken " | 1044 | pr_info("ASUSTek keeps feeding us with broken " |
1046 | "ECDT tables, which are very hard to workaround. " | 1045 | "ECDT tables, which are very hard to workaround. " |
1047 | "Trying to use DSDT EC info instead. Please send " | 1046 | "Trying to use DSDT EC info instead. Please send " |
1048 | "output of acpidump to linux-acpi@vger.kernel.org\n"); | 1047 | "output of acpidump to linux-acpi@vger.kernel.org\n"); |
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c index 8247fcdde079..fdef416c0ff6 100644 --- a/drivers/acpi/event.c +++ b/drivers/acpi/event.c | |||
@@ -127,11 +127,6 @@ int acpi_bus_generate_netlink_event(const char *device_class, | |||
127 | } | 127 | } |
128 | 128 | ||
129 | event = nla_data(attr); | 129 | event = nla_data(attr); |
130 | if (!event) { | ||
131 | nlmsg_free(skb); | ||
132 | return -EINVAL; | ||
133 | } | ||
134 | |||
135 | memset(event, 0, sizeof(struct acpi_genl_event)); | 130 | memset(event, 0, sizeof(struct acpi_genl_event)); |
136 | 131 | ||
137 | strcpy(event->device_class, device_class); | 132 | strcpy(event->device_class, device_class); |
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 41ade6570bc0..ba3da88cee45 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c | |||
@@ -168,7 +168,7 @@ static int acpi_fan_add(struct acpi_device *device) | |||
168 | acpi_device_name(device), acpi_device_bid(device), | 168 | acpi_device_name(device), acpi_device_bid(device), |
169 | !device->power.state ? "on" : "off"); | 169 | !device->power.state ? "on" : "off"); |
170 | 170 | ||
171 | end: | 171 | end: |
172 | return result; | 172 | return result; |
173 | } | 173 | } |
174 | 174 | ||
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 20f423337e1f..a29739c0ba79 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
@@ -26,11 +26,6 @@ | |||
26 | acpi_status acpi_os_initialize1(void); | 26 | acpi_status acpi_os_initialize1(void); |
27 | int init_acpi_device_notify(void); | 27 | int init_acpi_device_notify(void); |
28 | int acpi_scan_init(void); | 28 | int acpi_scan_init(void); |
29 | #ifdef CONFIG_ACPI_PCI_SLOT | ||
30 | void acpi_pci_slot_init(void); | ||
31 | #else | ||
32 | static inline void acpi_pci_slot_init(void) { } | ||
33 | #endif | ||
34 | void acpi_pci_root_init(void); | 29 | void acpi_pci_root_init(void); |
35 | void acpi_pci_link_init(void); | 30 | void acpi_pci_link_init(void); |
36 | void acpi_pci_root_hp_init(void); | 31 | void acpi_pci_root_hp_init(void); |
@@ -92,6 +87,7 @@ void acpi_device_add_finalize(struct acpi_device *device); | |||
92 | void acpi_free_pnp_ids(struct acpi_device_pnp *pnp); | 87 | void acpi_free_pnp_ids(struct acpi_device_pnp *pnp); |
93 | int acpi_bind_one(struct device *dev, acpi_handle handle); | 88 | int acpi_bind_one(struct device *dev, acpi_handle handle); |
94 | int acpi_unbind_one(struct device *dev); | 89 | int acpi_unbind_one(struct device *dev); |
90 | void acpi_bus_device_eject(void *data, u32 ost_src); | ||
95 | 91 | ||
96 | /* -------------------------------------------------------------------------- | 92 | /* -------------------------------------------------------------------------- |
97 | Power Resource | 93 | Power Resource |
@@ -169,9 +165,7 @@ int acpi_create_platform_device(struct acpi_device *adev, | |||
169 | Video | 165 | Video |
170 | -------------------------------------------------------------------------- */ | 166 | -------------------------------------------------------------------------- */ |
171 | #if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) | 167 | #if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) |
172 | bool acpi_video_backlight_quirks(void); | 168 | bool acpi_osi_is_win8(void); |
173 | #else | ||
174 | static inline bool acpi_video_backlight_quirks(void) { return false; } | ||
175 | #endif | 169 | #endif |
176 | 170 | ||
177 | #endif /* _ACPI_INTERNAL_H_ */ | 171 | #endif /* _ACPI_INTERNAL_H_ */ |
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 2e82e5d76930..a2343a1d9e0b 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c | |||
@@ -73,7 +73,7 @@ int acpi_map_pxm_to_node(int pxm) | |||
73 | { | 73 | { |
74 | int node = pxm_to_node_map[pxm]; | 74 | int node = pxm_to_node_map[pxm]; |
75 | 75 | ||
76 | if (node < 0) { | 76 | if (node == NUMA_NO_NODE) { |
77 | if (nodes_weight(nodes_found_map) >= MAX_NUMNODES) | 77 | if (nodes_weight(nodes_found_map) >= MAX_NUMNODES) |
78 | return NUMA_NO_NODE; | 78 | return NUMA_NO_NODE; |
79 | node = first_unset_node(nodes_found_map); | 79 | node = first_unset_node(nodes_found_map); |
@@ -334,7 +334,7 @@ int acpi_get_pxm(acpi_handle h) | |||
334 | 334 | ||
335 | int acpi_get_node(acpi_handle *handle) | 335 | int acpi_get_node(acpi_handle *handle) |
336 | { | 336 | { |
337 | int pxm, node = -1; | 337 | int pxm, node = NUMA_NO_NODE; |
338 | 338 | ||
339 | pxm = acpi_get_pxm(handle); | 339 | pxm = acpi_get_pxm(handle); |
340 | if (pxm >= 0 && pxm < MAX_PXM_DOMAINS) | 340 | if (pxm >= 0 && pxm < MAX_PXM_DOMAINS) |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index e5f416c7f66e..54a20ff4b864 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -61,7 +61,6 @@ struct acpi_os_dpc { | |||
61 | acpi_osd_exec_callback function; | 61 | acpi_osd_exec_callback function; |
62 | void *context; | 62 | void *context; |
63 | struct work_struct work; | 63 | struct work_struct work; |
64 | int wait; | ||
65 | }; | 64 | }; |
66 | 65 | ||
67 | #ifdef CONFIG_ACPI_CUSTOM_DSDT | 66 | #ifdef CONFIG_ACPI_CUSTOM_DSDT |
@@ -569,8 +568,10 @@ static const char * const table_sigs[] = { | |||
569 | 568 | ||
570 | #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header) | 569 | #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header) |
571 | 570 | ||
572 | /* Must not increase 10 or needs code modification below */ | 571 | #define ACPI_OVERRIDE_TABLES 64 |
573 | #define ACPI_OVERRIDE_TABLES 10 | 572 | static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES]; |
573 | |||
574 | #define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT) | ||
574 | 575 | ||
575 | void __init acpi_initrd_override(void *data, size_t size) | 576 | void __init acpi_initrd_override(void *data, size_t size) |
576 | { | 577 | { |
@@ -579,8 +580,6 @@ void __init acpi_initrd_override(void *data, size_t size) | |||
579 | struct acpi_table_header *table; | 580 | struct acpi_table_header *table; |
580 | char cpio_path[32] = "kernel/firmware/acpi/"; | 581 | char cpio_path[32] = "kernel/firmware/acpi/"; |
581 | struct cpio_data file; | 582 | struct cpio_data file; |
582 | struct cpio_data early_initrd_files[ACPI_OVERRIDE_TABLES]; | ||
583 | char *p; | ||
584 | 583 | ||
585 | if (data == NULL || size == 0) | 584 | if (data == NULL || size == 0) |
586 | return; | 585 | return; |
@@ -625,8 +624,8 @@ void __init acpi_initrd_override(void *data, size_t size) | |||
625 | table->signature, cpio_path, file.name, table->length); | 624 | table->signature, cpio_path, file.name, table->length); |
626 | 625 | ||
627 | all_tables_size += table->length; | 626 | all_tables_size += table->length; |
628 | early_initrd_files[table_nr].data = file.data; | 627 | acpi_initrd_files[table_nr].data = file.data; |
629 | early_initrd_files[table_nr].size = file.size; | 628 | acpi_initrd_files[table_nr].size = file.size; |
630 | table_nr++; | 629 | table_nr++; |
631 | } | 630 | } |
632 | if (table_nr == 0) | 631 | if (table_nr == 0) |
@@ -652,14 +651,34 @@ void __init acpi_initrd_override(void *data, size_t size) | |||
652 | memblock_reserve(acpi_tables_addr, all_tables_size); | 651 | memblock_reserve(acpi_tables_addr, all_tables_size); |
653 | arch_reserve_mem_area(acpi_tables_addr, all_tables_size); | 652 | arch_reserve_mem_area(acpi_tables_addr, all_tables_size); |
654 | 653 | ||
655 | p = early_ioremap(acpi_tables_addr, all_tables_size); | 654 | /* |
656 | 655 | * early_ioremap only can remap 256k one time. If we map all | |
656 | * tables one time, we will hit the limit. Need to map chunks | ||
657 | * one by one during copying the same as that in relocate_initrd(). | ||
658 | */ | ||
657 | for (no = 0; no < table_nr; no++) { | 659 | for (no = 0; no < table_nr; no++) { |
658 | memcpy(p + total_offset, early_initrd_files[no].data, | 660 | unsigned char *src_p = acpi_initrd_files[no].data; |
659 | early_initrd_files[no].size); | 661 | phys_addr_t size = acpi_initrd_files[no].size; |
660 | total_offset += early_initrd_files[no].size; | 662 | phys_addr_t dest_addr = acpi_tables_addr + total_offset; |
663 | phys_addr_t slop, clen; | ||
664 | char *dest_p; | ||
665 | |||
666 | total_offset += size; | ||
667 | |||
668 | while (size) { | ||
669 | slop = dest_addr & ~PAGE_MASK; | ||
670 | clen = size; | ||
671 | if (clen > MAP_CHUNK_SIZE - slop) | ||
672 | clen = MAP_CHUNK_SIZE - slop; | ||
673 | dest_p = early_ioremap(dest_addr & PAGE_MASK, | ||
674 | clen + slop); | ||
675 | memcpy(dest_p + slop, src_p, clen); | ||
676 | early_iounmap(dest_p, clen + slop); | ||
677 | src_p += clen; | ||
678 | dest_addr += clen; | ||
679 | size -= clen; | ||
680 | } | ||
661 | } | 681 | } |
662 | early_iounmap(p, all_tables_size); | ||
663 | } | 682 | } |
664 | #endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */ | 683 | #endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */ |
665 | 684 | ||
@@ -820,7 +839,7 @@ acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler) | |||
820 | 839 | ||
821 | void acpi_os_sleep(u64 ms) | 840 | void acpi_os_sleep(u64 ms) |
822 | { | 841 | { |
823 | schedule_timeout_interruptible(msecs_to_jiffies(ms)); | 842 | msleep(ms); |
824 | } | 843 | } |
825 | 844 | ||
826 | void acpi_os_stall(u32 us) | 845 | void acpi_os_stall(u32 us) |
@@ -1067,9 +1086,6 @@ static void acpi_os_execute_deferred(struct work_struct *work) | |||
1067 | { | 1086 | { |
1068 | struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); | 1087 | struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); |
1069 | 1088 | ||
1070 | if (dpc->wait) | ||
1071 | acpi_os_wait_events_complete(); | ||
1072 | |||
1073 | dpc->function(dpc->context); | 1089 | dpc->function(dpc->context); |
1074 | kfree(dpc); | 1090 | kfree(dpc); |
1075 | } | 1091 | } |
@@ -1089,8 +1105,8 @@ static void acpi_os_execute_deferred(struct work_struct *work) | |||
1089 | * | 1105 | * |
1090 | ******************************************************************************/ | 1106 | ******************************************************************************/ |
1091 | 1107 | ||
1092 | static acpi_status __acpi_os_execute(acpi_execute_type type, | 1108 | acpi_status acpi_os_execute(acpi_execute_type type, |
1093 | acpi_osd_exec_callback function, void *context, int hp) | 1109 | acpi_osd_exec_callback function, void *context) |
1094 | { | 1110 | { |
1095 | acpi_status status = AE_OK; | 1111 | acpi_status status = AE_OK; |
1096 | struct acpi_os_dpc *dpc; | 1112 | struct acpi_os_dpc *dpc; |
@@ -1117,20 +1133,11 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, | |||
1117 | dpc->context = context; | 1133 | dpc->context = context; |
1118 | 1134 | ||
1119 | /* | 1135 | /* |
1120 | * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq | ||
1121 | * because the hotplug code may call driver .remove() functions, | ||
1122 | * which invoke flush_scheduled_work/acpi_os_wait_events_complete | ||
1123 | * to flush these workqueues. | ||
1124 | * | ||
1125 | * To prevent lockdep from complaining unnecessarily, make sure that | 1136 | * To prevent lockdep from complaining unnecessarily, make sure that |
1126 | * there is a different static lockdep key for each workqueue by using | 1137 | * there is a different static lockdep key for each workqueue by using |
1127 | * INIT_WORK() for each of them separately. | 1138 | * INIT_WORK() for each of them separately. |
1128 | */ | 1139 | */ |
1129 | if (hp) { | 1140 | if (type == OSL_NOTIFY_HANDLER) { |
1130 | queue = kacpi_hotplug_wq; | ||
1131 | dpc->wait = 1; | ||
1132 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); | ||
1133 | } else if (type == OSL_NOTIFY_HANDLER) { | ||
1134 | queue = kacpi_notify_wq; | 1141 | queue = kacpi_notify_wq; |
1135 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); | 1142 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); |
1136 | } else { | 1143 | } else { |
@@ -1155,28 +1162,59 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, | |||
1155 | } | 1162 | } |
1156 | return status; | 1163 | return status; |
1157 | } | 1164 | } |
1165 | EXPORT_SYMBOL(acpi_os_execute); | ||
1158 | 1166 | ||
1159 | acpi_status acpi_os_execute(acpi_execute_type type, | 1167 | void acpi_os_wait_events_complete(void) |
1160 | acpi_osd_exec_callback function, void *context) | ||
1161 | { | 1168 | { |
1162 | return __acpi_os_execute(type, function, context, 0); | 1169 | flush_workqueue(kacpid_wq); |
1170 | flush_workqueue(kacpi_notify_wq); | ||
1163 | } | 1171 | } |
1164 | EXPORT_SYMBOL(acpi_os_execute); | ||
1165 | 1172 | ||
1166 | acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function, | 1173 | struct acpi_hp_work { |
1167 | void *context) | 1174 | struct work_struct work; |
1175 | acpi_hp_callback func; | ||
1176 | void *data; | ||
1177 | u32 src; | ||
1178 | }; | ||
1179 | |||
1180 | static void acpi_hotplug_work_fn(struct work_struct *work) | ||
1168 | { | 1181 | { |
1169 | return __acpi_os_execute(0, function, context, 1); | 1182 | struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work); |
1183 | |||
1184 | acpi_os_wait_events_complete(); | ||
1185 | hpw->func(hpw->data, hpw->src); | ||
1186 | kfree(hpw); | ||
1170 | } | 1187 | } |
1171 | EXPORT_SYMBOL(acpi_os_hotplug_execute); | ||
1172 | 1188 | ||
1173 | void acpi_os_wait_events_complete(void) | 1189 | acpi_status acpi_hotplug_execute(acpi_hp_callback func, void *data, u32 src) |
1174 | { | 1190 | { |
1175 | flush_workqueue(kacpid_wq); | 1191 | struct acpi_hp_work *hpw; |
1176 | flush_workqueue(kacpi_notify_wq); | 1192 | |
1193 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, | ||
1194 | "Scheduling function [%p(%p, %u)] for deferred execution.\n", | ||
1195 | func, data, src)); | ||
1196 | |||
1197 | hpw = kmalloc(sizeof(*hpw), GFP_KERNEL); | ||
1198 | if (!hpw) | ||
1199 | return AE_NO_MEMORY; | ||
1200 | |||
1201 | INIT_WORK(&hpw->work, acpi_hotplug_work_fn); | ||
1202 | hpw->func = func; | ||
1203 | hpw->data = data; | ||
1204 | hpw->src = src; | ||
1205 | /* | ||
1206 | * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because | ||
1207 | * the hotplug code may call driver .remove() functions, which may | ||
1208 | * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush | ||
1209 | * these workqueues. | ||
1210 | */ | ||
1211 | if (!queue_work(kacpi_hotplug_wq, &hpw->work)) { | ||
1212 | kfree(hpw); | ||
1213 | return AE_ERROR; | ||
1214 | } | ||
1215 | return AE_OK; | ||
1177 | } | 1216 | } |
1178 | 1217 | ||
1179 | EXPORT_SYMBOL(acpi_os_wait_events_complete); | ||
1180 | 1218 | ||
1181 | acpi_status | 1219 | acpi_status |
1182 | acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) | 1220 | acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) |
@@ -1335,7 +1373,7 @@ static int __init acpi_os_name_setup(char *str) | |||
1335 | if (!str || !*str) | 1373 | if (!str || !*str) |
1336 | return 0; | 1374 | return 0; |
1337 | 1375 | ||
1338 | for (; count-- && str && *str; str++) { | 1376 | for (; count-- && *str; str++) { |
1339 | if (isalnum(*str) || *str == ' ' || *str == ':') | 1377 | if (isalnum(*str) || *str == ' ' || *str == ':') |
1340 | *p++ = *str; | 1378 | *p++ = *str; |
1341 | else if (*str == '\'' || *str == '"') | 1379 | else if (*str == '\'' || *str == '"') |
@@ -1825,25 +1863,3 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, | |||
1825 | { | 1863 | { |
1826 | __acpi_os_prepare_extended_sleep = func; | 1864 | __acpi_os_prepare_extended_sleep = func; |
1827 | } | 1865 | } |
1828 | |||
1829 | |||
1830 | void alloc_acpi_hp_work(acpi_handle handle, u32 type, void *context, | ||
1831 | void (*func)(struct work_struct *work)) | ||
1832 | { | ||
1833 | struct acpi_hp_work *hp_work; | ||
1834 | int ret; | ||
1835 | |||
1836 | hp_work = kmalloc(sizeof(*hp_work), GFP_KERNEL); | ||
1837 | if (!hp_work) | ||
1838 | return; | ||
1839 | |||
1840 | hp_work->handle = handle; | ||
1841 | hp_work->type = type; | ||
1842 | hp_work->context = context; | ||
1843 | |||
1844 | INIT_WORK(&hp_work->work, func); | ||
1845 | ret = queue_work(kacpi_hotplug_wq, &hp_work->work); | ||
1846 | if (!ret) | ||
1847 | kfree(hp_work); | ||
1848 | } | ||
1849 | EXPORT_SYMBOL_GPL(alloc_acpi_hp_work); | ||
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index d3874f425653..417876bce854 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -39,6 +39,8 @@ | |||
39 | #include <acpi/acpi_drivers.h> | 39 | #include <acpi/acpi_drivers.h> |
40 | #include <acpi/apei.h> | 40 | #include <acpi/apei.h> |
41 | 41 | ||
42 | #include "internal.h" | ||
43 | |||
42 | #define PREFIX "ACPI: " | 44 | #define PREFIX "ACPI: " |
43 | 45 | ||
44 | #define _COMPONENT ACPI_PCI_COMPONENT | 46 | #define _COMPONENT ACPI_PCI_COMPONENT |
@@ -590,39 +592,10 @@ static void handle_root_bridge_insertion(acpi_handle handle) | |||
590 | acpi_handle_err(handle, "cannot add bridge to acpi list\n"); | 592 | acpi_handle_err(handle, "cannot add bridge to acpi list\n"); |
591 | } | 593 | } |
592 | 594 | ||
593 | static void handle_root_bridge_removal(struct acpi_device *device) | 595 | static void hotplug_event_root(void *data, u32 type) |
594 | { | ||
595 | acpi_status status; | ||
596 | struct acpi_eject_event *ej_event; | ||
597 | |||
598 | ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL); | ||
599 | if (!ej_event) { | ||
600 | /* Inform firmware the hot-remove operation has error */ | ||
601 | (void) acpi_evaluate_hotplug_ost(device->handle, | ||
602 | ACPI_NOTIFY_EJECT_REQUEST, | ||
603 | ACPI_OST_SC_NON_SPECIFIC_FAILURE, | ||
604 | NULL); | ||
605 | return; | ||
606 | } | ||
607 | |||
608 | ej_event->device = device; | ||
609 | ej_event->event = ACPI_NOTIFY_EJECT_REQUEST; | ||
610 | |||
611 | status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event); | ||
612 | if (ACPI_FAILURE(status)) | ||
613 | kfree(ej_event); | ||
614 | } | ||
615 | |||
616 | static void _handle_hotplug_event_root(struct work_struct *work) | ||
617 | { | 596 | { |
597 | acpi_handle handle = data; | ||
618 | struct acpi_pci_root *root; | 598 | struct acpi_pci_root *root; |
619 | struct acpi_hp_work *hp_work; | ||
620 | acpi_handle handle; | ||
621 | u32 type; | ||
622 | |||
623 | hp_work = container_of(work, struct acpi_hp_work, work); | ||
624 | handle = hp_work->handle; | ||
625 | type = hp_work->type; | ||
626 | 599 | ||
627 | acpi_scan_lock_acquire(); | 600 | acpi_scan_lock_acquire(); |
628 | 601 | ||
@@ -652,9 +625,15 @@ static void _handle_hotplug_event_root(struct work_struct *work) | |||
652 | /* request device eject */ | 625 | /* request device eject */ |
653 | acpi_handle_printk(KERN_DEBUG, handle, | 626 | acpi_handle_printk(KERN_DEBUG, handle, |
654 | "Device eject notify on %s\n", __func__); | 627 | "Device eject notify on %s\n", __func__); |
655 | if (root) | 628 | if (!root) |
656 | handle_root_bridge_removal(root->device); | 629 | break; |
657 | break; | 630 | |
631 | get_device(&root->device->dev); | ||
632 | |||
633 | acpi_scan_lock_release(); | ||
634 | |||
635 | acpi_bus_device_eject(root->device, ACPI_NOTIFY_EJECT_REQUEST); | ||
636 | return; | ||
658 | default: | 637 | default: |
659 | acpi_handle_warn(handle, | 638 | acpi_handle_warn(handle, |
660 | "notify_handler: unknown event type 0x%x\n", | 639 | "notify_handler: unknown event type 0x%x\n", |
@@ -663,14 +642,12 @@ static void _handle_hotplug_event_root(struct work_struct *work) | |||
663 | } | 642 | } |
664 | 643 | ||
665 | acpi_scan_lock_release(); | 644 | acpi_scan_lock_release(); |
666 | kfree(hp_work); /* allocated in handle_hotplug_event_bridge */ | ||
667 | } | 645 | } |
668 | 646 | ||
669 | static void handle_hotplug_event_root(acpi_handle handle, u32 type, | 647 | static void handle_hotplug_event_root(acpi_handle handle, u32 type, |
670 | void *context) | 648 | void *context) |
671 | { | 649 | { |
672 | alloc_acpi_hp_work(handle, type, context, | 650 | acpi_hotplug_execute(hotplug_event_root, handle, type); |
673 | _handle_hotplug_event_root); | ||
674 | } | 651 | } |
675 | 652 | ||
676 | static acpi_status __init | 653 | static acpi_status __init |
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index 04a13784dd20..6a5b152ad4d0 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c | |||
@@ -8,289 +8,17 @@ | |||
8 | #include <acpi/acpi_bus.h> | 8 | #include <acpi/acpi_bus.h> |
9 | #include <acpi/acpi_drivers.h> | 9 | #include <acpi/acpi_drivers.h> |
10 | 10 | ||
11 | #ifdef CONFIG_X86 | ||
12 | #include <linux/mc146818rtc.h> | ||
13 | #endif | ||
14 | |||
15 | #include "sleep.h" | 11 | #include "sleep.h" |
16 | 12 | ||
17 | #define _COMPONENT ACPI_SYSTEM_COMPONENT | 13 | #define _COMPONENT ACPI_SYSTEM_COMPONENT |
18 | 14 | ||
19 | /* | 15 | /* |
20 | * this file provides support for: | 16 | * this file provides support for: |
21 | * /proc/acpi/alarm | ||
22 | * /proc/acpi/wakeup | 17 | * /proc/acpi/wakeup |
23 | */ | 18 | */ |
24 | 19 | ||
25 | ACPI_MODULE_NAME("sleep") | 20 | ACPI_MODULE_NAME("sleep") |
26 | 21 | ||
27 | #if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || !defined(CONFIG_X86) | ||
28 | /* use /sys/class/rtc/rtcX/wakealarm instead; it's not ACPI-specific */ | ||
29 | #else | ||
30 | #define HAVE_ACPI_LEGACY_ALARM | ||
31 | #endif | ||
32 | |||
33 | #ifdef HAVE_ACPI_LEGACY_ALARM | ||
34 | |||
35 | static u32 cmos_bcd_read(int offset, int rtc_control); | ||
36 | |||
37 | static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset) | ||
38 | { | ||
39 | u32 sec, min, hr; | ||
40 | u32 day, mo, yr, cent = 0; | ||
41 | u32 today = 0; | ||
42 | unsigned char rtc_control = 0; | ||
43 | unsigned long flags; | ||
44 | |||
45 | spin_lock_irqsave(&rtc_lock, flags); | ||
46 | |||
47 | rtc_control = CMOS_READ(RTC_CONTROL); | ||
48 | sec = cmos_bcd_read(RTC_SECONDS_ALARM, rtc_control); | ||
49 | min = cmos_bcd_read(RTC_MINUTES_ALARM, rtc_control); | ||
50 | hr = cmos_bcd_read(RTC_HOURS_ALARM, rtc_control); | ||
51 | |||
52 | /* If we ever get an FACP with proper values... */ | ||
53 | if (acpi_gbl_FADT.day_alarm) { | ||
54 | /* ACPI spec: only low 6 its should be cared */ | ||
55 | day = CMOS_READ(acpi_gbl_FADT.day_alarm) & 0x3F; | ||
56 | if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) | ||
57 | day = bcd2bin(day); | ||
58 | } else | ||
59 | day = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control); | ||
60 | if (acpi_gbl_FADT.month_alarm) | ||
61 | mo = cmos_bcd_read(acpi_gbl_FADT.month_alarm, rtc_control); | ||
62 | else { | ||
63 | mo = cmos_bcd_read(RTC_MONTH, rtc_control); | ||
64 | today = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control); | ||
65 | } | ||
66 | if (acpi_gbl_FADT.century) | ||
67 | cent = cmos_bcd_read(acpi_gbl_FADT.century, rtc_control); | ||
68 | |||
69 | yr = cmos_bcd_read(RTC_YEAR, rtc_control); | ||
70 | |||
71 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
72 | |||
73 | /* we're trusting the FADT (see above) */ | ||
74 | if (!acpi_gbl_FADT.century) | ||
75 | /* If we're not trusting the FADT, we should at least make it | ||
76 | * right for _this_ century... ehm, what is _this_ century? | ||
77 | * | ||
78 | * TBD: | ||
79 | * ASAP: find piece of code in the kernel, e.g. star tracker driver, | ||
80 | * which we can trust to determine the century correctly. Atom | ||
81 | * watch driver would be nice, too... | ||
82 | * | ||
83 | * if that has not happened, change for first release in 2050: | ||
84 | * if (yr<50) | ||
85 | * yr += 2100; | ||
86 | * else | ||
87 | * yr += 2000; // current line of code | ||
88 | * | ||
89 | * if that has not happened either, please do on 2099/12/31:23:59:59 | ||
90 | * s/2000/2100 | ||
91 | * | ||
92 | */ | ||
93 | yr += 2000; | ||
94 | else | ||
95 | yr += cent * 100; | ||
96 | |||
97 | /* | ||
98 | * Show correct dates for alarms up to a month into the future. | ||
99 | * This solves issues for nearly all situations with the common | ||
100 | * 30-day alarm clocks in PC hardware. | ||
101 | */ | ||
102 | if (day < today) { | ||
103 | if (mo < 12) { | ||
104 | mo += 1; | ||
105 | } else { | ||
106 | mo = 1; | ||
107 | yr += 1; | ||
108 | } | ||
109 | } | ||
110 | |||
111 | seq_printf(seq, "%4.4u-", yr); | ||
112 | (mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo); | ||
113 | (day > 31) ? seq_puts(seq, "** ") : seq_printf(seq, "%2.2u ", day); | ||
114 | (hr > 23) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", hr); | ||
115 | (min > 59) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", min); | ||
116 | (sec > 59) ? seq_puts(seq, "**\n") : seq_printf(seq, "%2.2u\n", sec); | ||
117 | |||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | static int acpi_system_alarm_open_fs(struct inode *inode, struct file *file) | ||
122 | { | ||
123 | return single_open(file, acpi_system_alarm_seq_show, PDE_DATA(inode)); | ||
124 | } | ||
125 | |||
126 | static int get_date_field(char **p, u32 * value) | ||
127 | { | ||
128 | char *next = NULL; | ||
129 | char *string_end = NULL; | ||
130 | int result = -EINVAL; | ||
131 | |||
132 | /* | ||
133 | * Try to find delimeter, only to insert null. The end of the | ||
134 | * string won't have one, but is still valid. | ||
135 | */ | ||
136 | if (*p == NULL) | ||
137 | return result; | ||
138 | |||
139 | next = strpbrk(*p, "- :"); | ||
140 | if (next) | ||
141 | *next++ = '\0'; | ||
142 | |||
143 | *value = simple_strtoul(*p, &string_end, 10); | ||
144 | |||
145 | /* Signal success if we got a good digit */ | ||
146 | if (string_end != *p) | ||
147 | result = 0; | ||
148 | |||
149 | if (next) | ||
150 | *p = next; | ||
151 | else | ||
152 | *p = NULL; | ||
153 | |||
154 | return result; | ||
155 | } | ||
156 | |||
157 | /* Read a possibly BCD register, always return binary */ | ||
158 | static u32 cmos_bcd_read(int offset, int rtc_control) | ||
159 | { | ||
160 | u32 val = CMOS_READ(offset); | ||
161 | if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) | ||
162 | val = bcd2bin(val); | ||
163 | return val; | ||
164 | } | ||
165 | |||
166 | /* Write binary value into possibly BCD register */ | ||
167 | static void cmos_bcd_write(u32 val, int offset, int rtc_control) | ||
168 | { | ||
169 | if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) | ||
170 | val = bin2bcd(val); | ||
171 | CMOS_WRITE(val, offset); | ||
172 | } | ||
173 | |||
174 | static ssize_t | ||
175 | acpi_system_write_alarm(struct file *file, | ||
176 | const char __user * buffer, size_t count, loff_t * ppos) | ||
177 | { | ||
178 | int result = 0; | ||
179 | char alarm_string[30] = { '\0' }; | ||
180 | char *p = alarm_string; | ||
181 | u32 sec, min, hr, day, mo, yr; | ||
182 | int adjust = 0; | ||
183 | unsigned char rtc_control = 0; | ||
184 | |||
185 | if (count > sizeof(alarm_string) - 1) | ||
186 | return -EINVAL; | ||
187 | |||
188 | if (copy_from_user(alarm_string, buffer, count)) | ||
189 | return -EFAULT; | ||
190 | |||
191 | alarm_string[count] = '\0'; | ||
192 | |||
193 | /* check for time adjustment */ | ||
194 | if (alarm_string[0] == '+') { | ||
195 | p++; | ||
196 | adjust = 1; | ||
197 | } | ||
198 | |||
199 | if ((result = get_date_field(&p, &yr))) | ||
200 | goto end; | ||
201 | if ((result = get_date_field(&p, &mo))) | ||
202 | goto end; | ||
203 | if ((result = get_date_field(&p, &day))) | ||
204 | goto end; | ||
205 | if ((result = get_date_field(&p, &hr))) | ||
206 | goto end; | ||
207 | if ((result = get_date_field(&p, &min))) | ||
208 | goto end; | ||
209 | if ((result = get_date_field(&p, &sec))) | ||
210 | goto end; | ||
211 | |||
212 | spin_lock_irq(&rtc_lock); | ||
213 | |||
214 | rtc_control = CMOS_READ(RTC_CONTROL); | ||
215 | |||
216 | if (adjust) { | ||
217 | yr += cmos_bcd_read(RTC_YEAR, rtc_control); | ||
218 | mo += cmos_bcd_read(RTC_MONTH, rtc_control); | ||
219 | day += cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control); | ||
220 | hr += cmos_bcd_read(RTC_HOURS, rtc_control); | ||
221 | min += cmos_bcd_read(RTC_MINUTES, rtc_control); | ||
222 | sec += cmos_bcd_read(RTC_SECONDS, rtc_control); | ||
223 | } | ||
224 | |||
225 | spin_unlock_irq(&rtc_lock); | ||
226 | |||
227 | if (sec > 59) { | ||
228 | min += sec/60; | ||
229 | sec = sec%60; | ||
230 | } | ||
231 | if (min > 59) { | ||
232 | hr += min/60; | ||
233 | min = min%60; | ||
234 | } | ||
235 | if (hr > 23) { | ||
236 | day += hr/24; | ||
237 | hr = hr%24; | ||
238 | } | ||
239 | if (day > 31) { | ||
240 | mo += day/32; | ||
241 | day = day%32; | ||
242 | } | ||
243 | if (mo > 12) { | ||
244 | yr += mo/13; | ||
245 | mo = mo%13; | ||
246 | } | ||
247 | |||
248 | spin_lock_irq(&rtc_lock); | ||
249 | /* | ||
250 | * Disable alarm interrupt before setting alarm timer or else | ||
251 | * when ACPI_EVENT_RTC is enabled, a spurious ACPI interrupt occurs | ||
252 | */ | ||
253 | rtc_control &= ~RTC_AIE; | ||
254 | CMOS_WRITE(rtc_control, RTC_CONTROL); | ||
255 | CMOS_READ(RTC_INTR_FLAGS); | ||
256 | |||
257 | /* write the fields the rtc knows about */ | ||
258 | cmos_bcd_write(hr, RTC_HOURS_ALARM, rtc_control); | ||
259 | cmos_bcd_write(min, RTC_MINUTES_ALARM, rtc_control); | ||
260 | cmos_bcd_write(sec, RTC_SECONDS_ALARM, rtc_control); | ||
261 | |||
262 | /* | ||
263 | * If the system supports an enhanced alarm it will have non-zero | ||
264 | * offsets into the CMOS RAM here -- which for some reason are pointing | ||
265 | * to the RTC area of memory. | ||
266 | */ | ||
267 | if (acpi_gbl_FADT.day_alarm) | ||
268 | cmos_bcd_write(day, acpi_gbl_FADT.day_alarm, rtc_control); | ||
269 | if (acpi_gbl_FADT.month_alarm) | ||
270 | cmos_bcd_write(mo, acpi_gbl_FADT.month_alarm, rtc_control); | ||
271 | if (acpi_gbl_FADT.century) { | ||
272 | if (adjust) | ||
273 | yr += cmos_bcd_read(acpi_gbl_FADT.century, rtc_control) * 100; | ||
274 | cmos_bcd_write(yr / 100, acpi_gbl_FADT.century, rtc_control); | ||
275 | } | ||
276 | /* enable the rtc alarm interrupt */ | ||
277 | rtc_control |= RTC_AIE; | ||
278 | CMOS_WRITE(rtc_control, RTC_CONTROL); | ||
279 | CMOS_READ(RTC_INTR_FLAGS); | ||
280 | |||
281 | spin_unlock_irq(&rtc_lock); | ||
282 | |||
283 | acpi_clear_event(ACPI_EVENT_RTC); | ||
284 | acpi_enable_event(ACPI_EVENT_RTC, 0); | ||
285 | |||
286 | *ppos += count; | ||
287 | |||
288 | result = 0; | ||
289 | end: | ||
290 | return result ? result : count; | ||
291 | } | ||
292 | #endif /* HAVE_ACPI_LEGACY_ALARM */ | ||
293 | |||
294 | static int | 22 | static int |
295 | acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) | 23 | acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) |
296 | { | 24 | { |
@@ -417,41 +145,8 @@ static const struct file_operations acpi_system_wakeup_device_fops = { | |||
417 | .release = single_release, | 145 | .release = single_release, |
418 | }; | 146 | }; |
419 | 147 | ||
420 | #ifdef HAVE_ACPI_LEGACY_ALARM | ||
421 | static const struct file_operations acpi_system_alarm_fops = { | ||
422 | .owner = THIS_MODULE, | ||
423 | .open = acpi_system_alarm_open_fs, | ||
424 | .read = seq_read, | ||
425 | .write = acpi_system_write_alarm, | ||
426 | .llseek = seq_lseek, | ||
427 | .release = single_release, | ||
428 | }; | ||
429 | |||
430 | static u32 rtc_handler(void *context) | ||
431 | { | ||
432 | acpi_clear_event(ACPI_EVENT_RTC); | ||
433 | acpi_disable_event(ACPI_EVENT_RTC, 0); | ||
434 | |||
435 | return ACPI_INTERRUPT_HANDLED; | ||
436 | } | ||
437 | #endif /* HAVE_ACPI_LEGACY_ALARM */ | ||
438 | |||
439 | int __init acpi_sleep_proc_init(void) | 148 | int __init acpi_sleep_proc_init(void) |
440 | { | 149 | { |
441 | #ifdef HAVE_ACPI_LEGACY_ALARM | ||
442 | /* 'alarm' [R/W] */ | ||
443 | proc_create("alarm", S_IFREG | S_IRUGO | S_IWUSR, | ||
444 | acpi_root_dir, &acpi_system_alarm_fops); | ||
445 | |||
446 | acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL); | ||
447 | /* | ||
448 | * Disable the RTC event after installing RTC handler. | ||
449 | * Only when RTC alarm is set will it be enabled. | ||
450 | */ | ||
451 | acpi_clear_event(ACPI_EVENT_RTC); | ||
452 | acpi_disable_event(ACPI_EVENT_RTC, 0); | ||
453 | #endif /* HAVE_ACPI_LEGACY_ALARM */ | ||
454 | |||
455 | /* 'wakeup device' [R/W] */ | 150 | /* 'wakeup device' [R/W] */ |
456 | proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR, | 151 | proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR, |
457 | acpi_root_dir, &acpi_system_wakeup_device_fops); | 152 | acpi_root_dir, &acpi_system_wakeup_device_fops); |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index cf34d903f4fb..b3171f30b319 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -162,16 +162,23 @@ exit: | |||
162 | return apic_id; | 162 | return apic_id; |
163 | } | 163 | } |
164 | 164 | ||
165 | int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) | 165 | int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id) |
166 | { | 166 | { |
167 | #ifdef CONFIG_SMP | 167 | int apic_id; |
168 | int i; | ||
169 | #endif | ||
170 | int apic_id = -1; | ||
171 | 168 | ||
172 | apic_id = map_mat_entry(handle, type, acpi_id); | 169 | apic_id = map_mat_entry(handle, type, acpi_id); |
173 | if (apic_id == -1) | 170 | if (apic_id == -1) |
174 | apic_id = map_madt_entry(type, acpi_id); | 171 | apic_id = map_madt_entry(type, acpi_id); |
172 | |||
173 | return apic_id; | ||
174 | } | ||
175 | |||
176 | int acpi_map_cpuid(int apic_id, u32 acpi_id) | ||
177 | { | ||
178 | #ifdef CONFIG_SMP | ||
179 | int i; | ||
180 | #endif | ||
181 | |||
175 | if (apic_id == -1) { | 182 | if (apic_id == -1) { |
176 | /* | 183 | /* |
177 | * On UP processor, there is no _MAT or MADT table. | 184 | * On UP processor, there is no _MAT or MADT table. |
@@ -211,6 +218,15 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) | |||
211 | #endif | 218 | #endif |
212 | return -1; | 219 | return -1; |
213 | } | 220 | } |
221 | |||
222 | int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) | ||
223 | { | ||
224 | int apic_id; | ||
225 | |||
226 | apic_id = acpi_get_apicid(handle, type, acpi_id); | ||
227 | |||
228 | return acpi_map_cpuid(apic_id, acpi_id); | ||
229 | } | ||
214 | EXPORT_SYMBOL_GPL(acpi_get_cpuid); | 230 | EXPORT_SYMBOL_GPL(acpi_get_cpuid); |
215 | 231 | ||
216 | static bool __init processor_physically_present(acpi_handle handle) | 232 | static bool __init processor_physically_present(acpi_handle handle) |
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index e534ba66d5b8..146ab7e2b81d 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c | |||
@@ -153,8 +153,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, | |||
153 | return NOTIFY_OK; | 153 | return NOTIFY_OK; |
154 | } | 154 | } |
155 | 155 | ||
156 | static struct notifier_block __refdata acpi_cpu_notifier = | 156 | static struct notifier_block __refdata acpi_cpu_notifier = { |
157 | { | ||
158 | .notifier_call = acpi_cpu_soft_notify, | 157 | .notifier_call = acpi_cpu_soft_notify, |
159 | }; | 158 | }; |
160 | 159 | ||
@@ -172,7 +171,6 @@ static int __acpi_processor_start(struct acpi_device *device) | |||
172 | 171 | ||
173 | #ifdef CONFIG_CPU_FREQ | 172 | #ifdef CONFIG_CPU_FREQ |
174 | acpi_processor_ppc_has_changed(pr, 0); | 173 | acpi_processor_ppc_has_changed(pr, 0); |
175 | acpi_processor_load_module(pr); | ||
176 | #endif | 174 | #endif |
177 | acpi_processor_get_throttling_info(pr); | 175 | acpi_processor_get_throttling_info(pr); |
178 | 176 | ||
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index c7414a545a4f..644516d9bde6 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -265,9 +265,6 @@ static void tsc_check_state(int state) { return; } | |||
265 | static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) | 265 | static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) |
266 | { | 266 | { |
267 | 267 | ||
268 | if (!pr) | ||
269 | return -EINVAL; | ||
270 | |||
271 | if (!pr->pblk) | 268 | if (!pr->pblk) |
272 | return -ENODEV; | 269 | return -ENODEV; |
273 | 270 | ||
@@ -1050,12 +1047,8 @@ int acpi_processor_hotplug(struct acpi_processor *pr) | |||
1050 | if (disabled_by_idle_boot_param()) | 1047 | if (disabled_by_idle_boot_param()) |
1051 | return 0; | 1048 | return 0; |
1052 | 1049 | ||
1053 | if (!pr) | 1050 | if (nocst) |
1054 | return -EINVAL; | ||
1055 | |||
1056 | if (nocst) { | ||
1057 | return -ENODEV; | 1051 | return -ENODEV; |
1058 | } | ||
1059 | 1052 | ||
1060 | if (!pr->flags.power_setup_done) | 1053 | if (!pr->flags.power_setup_done) |
1061 | return -ENODEV; | 1054 | return -ENODEV; |
@@ -1082,9 +1075,6 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) | |||
1082 | if (disabled_by_idle_boot_param()) | 1075 | if (disabled_by_idle_boot_param()) |
1083 | return 0; | 1076 | return 0; |
1084 | 1077 | ||
1085 | if (!pr) | ||
1086 | return -EINVAL; | ||
1087 | |||
1088 | if (nocst) | 1078 | if (nocst) |
1089 | return -ENODEV; | 1079 | return -ENODEV; |
1090 | 1080 | ||
@@ -1157,9 +1147,6 @@ int acpi_processor_power_init(struct acpi_processor *pr) | |||
1157 | first_run++; | 1147 | first_run++; |
1158 | } | 1148 | } |
1159 | 1149 | ||
1160 | if (!pr) | ||
1161 | return -EINVAL; | ||
1162 | |||
1163 | if (acpi_gbl_FADT.cst_control && !nocst) { | 1150 | if (acpi_gbl_FADT.cst_control && !nocst) { |
1164 | status = | 1151 | status = |
1165 | acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8); | 1152 | acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8); |
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 51d7948611da..60a7c28fc167 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
@@ -235,28 +235,6 @@ void acpi_processor_ppc_exit(void) | |||
235 | acpi_processor_ppc_status &= ~PPC_REGISTERED; | 235 | acpi_processor_ppc_status &= ~PPC_REGISTERED; |
236 | } | 236 | } |
237 | 237 | ||
238 | /* | ||
239 | * Do a quick check if the systems looks like it should use ACPI | ||
240 | * cpufreq. We look at a _PCT method being available, but don't | ||
241 | * do a whole lot of sanity checks. | ||
242 | */ | ||
243 | void acpi_processor_load_module(struct acpi_processor *pr) | ||
244 | { | ||
245 | static int requested; | ||
246 | acpi_status status = 0; | ||
247 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
248 | |||
249 | if (!arch_has_acpi_pdc() || requested) | ||
250 | return; | ||
251 | status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer); | ||
252 | if (!ACPI_FAILURE(status)) { | ||
253 | printk(KERN_INFO PREFIX "Requesting acpi_cpufreq\n"); | ||
254 | request_module_nowait("acpi_cpufreq"); | ||
255 | requested = 1; | ||
256 | } | ||
257 | kfree(buffer.pointer); | ||
258 | } | ||
259 | |||
260 | static int acpi_processor_get_performance_control(struct acpi_processor *pr) | 238 | static int acpi_processor_get_performance_control(struct acpi_processor *pr) |
261 | { | 239 | { |
262 | int result = 0; | 240 | int result = 0; |
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index aef7e1cd1e5d..d465ae6cdd00 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c | |||
@@ -30,12 +30,6 @@ | |||
30 | #include <linux/moduleparam.h> | 30 | #include <linux/moduleparam.h> |
31 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
32 | 32 | ||
33 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
34 | #include <linux/proc_fs.h> | ||
35 | #include <linux/seq_file.h> | ||
36 | #include <asm/uaccess.h> | ||
37 | #endif | ||
38 | |||
39 | #include <linux/acpi.h> | 33 | #include <linux/acpi.h> |
40 | #include <linux/timer.h> | 34 | #include <linux/timer.h> |
41 | #include <linux/jiffies.h> | 35 | #include <linux/jiffies.h> |
@@ -67,11 +61,6 @@ static unsigned int cache_time = 1000; | |||
67 | module_param(cache_time, uint, 0644); | 61 | module_param(cache_time, uint, 0644); |
68 | MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); | 62 | MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); |
69 | 63 | ||
70 | extern struct proc_dir_entry *acpi_lock_ac_dir(void); | ||
71 | extern struct proc_dir_entry *acpi_lock_battery_dir(void); | ||
72 | extern void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir); | ||
73 | extern void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir); | ||
74 | |||
75 | #define MAX_SBS_BAT 4 | 64 | #define MAX_SBS_BAT 4 |
76 | #define ACPI_SBS_BLOCK_MAX 32 | 65 | #define ACPI_SBS_BLOCK_MAX 32 |
77 | 66 | ||
@@ -84,9 +73,6 @@ MODULE_DEVICE_TABLE(acpi, sbs_device_ids); | |||
84 | struct acpi_battery { | 73 | struct acpi_battery { |
85 | struct power_supply bat; | 74 | struct power_supply bat; |
86 | struct acpi_sbs *sbs; | 75 | struct acpi_sbs *sbs; |
87 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
88 | struct proc_dir_entry *proc_entry; | ||
89 | #endif | ||
90 | unsigned long update_time; | 76 | unsigned long update_time; |
91 | char name[8]; | 77 | char name[8]; |
92 | char manufacturer_name[ACPI_SBS_BLOCK_MAX]; | 78 | char manufacturer_name[ACPI_SBS_BLOCK_MAX]; |
@@ -119,9 +105,6 @@ struct acpi_sbs { | |||
119 | struct acpi_device *device; | 105 | struct acpi_device *device; |
120 | struct acpi_smb_hc *hc; | 106 | struct acpi_smb_hc *hc; |
121 | struct mutex lock; | 107 | struct mutex lock; |
122 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
123 | struct proc_dir_entry *charger_entry; | ||
124 | #endif | ||
125 | struct acpi_battery battery[MAX_SBS_BAT]; | 108 | struct acpi_battery battery[MAX_SBS_BAT]; |
126 | u8 batteries_supported:4; | 109 | u8 batteries_supported:4; |
127 | u8 manager_present:1; | 110 | u8 manager_present:1; |
@@ -482,261 +465,6 @@ static struct device_attribute alarm_attr = { | |||
482 | }; | 465 | }; |
483 | 466 | ||
484 | /* -------------------------------------------------------------------------- | 467 | /* -------------------------------------------------------------------------- |
485 | FS Interface (/proc/acpi) | ||
486 | -------------------------------------------------------------------------- */ | ||
487 | |||
488 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
489 | /* Generic Routines */ | ||
490 | static int | ||
491 | acpi_sbs_add_fs(struct proc_dir_entry **dir, | ||
492 | struct proc_dir_entry *parent_dir, | ||
493 | char *dir_name, | ||
494 | const struct file_operations *info_fops, | ||
495 | const struct file_operations *state_fops, | ||
496 | const struct file_operations *alarm_fops, void *data) | ||
497 | { | ||
498 | printk(KERN_WARNING PREFIX "Deprecated procfs I/F for SBS is loaded," | ||
499 | " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); | ||
500 | if (!*dir) { | ||
501 | *dir = proc_mkdir(dir_name, parent_dir); | ||
502 | if (!*dir) { | ||
503 | return -ENODEV; | ||
504 | } | ||
505 | } | ||
506 | |||
507 | /* 'info' [R] */ | ||
508 | if (info_fops) | ||
509 | proc_create_data(ACPI_SBS_FILE_INFO, S_IRUGO, *dir, | ||
510 | info_fops, data); | ||
511 | |||
512 | /* 'state' [R] */ | ||
513 | if (state_fops) | ||
514 | proc_create_data(ACPI_SBS_FILE_STATE, S_IRUGO, *dir, | ||
515 | state_fops, data); | ||
516 | |||
517 | /* 'alarm' [R/W] */ | ||
518 | if (alarm_fops) | ||
519 | proc_create_data(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir, | ||
520 | alarm_fops, data); | ||
521 | return 0; | ||
522 | } | ||
523 | |||
524 | /* Smart Battery Interface */ | ||
525 | static struct proc_dir_entry *acpi_battery_dir = NULL; | ||
526 | |||
527 | static inline char *acpi_battery_units(struct acpi_battery *battery) | ||
528 | { | ||
529 | return acpi_battery_mode(battery) ? " mW" : " mA"; | ||
530 | } | ||
531 | |||
532 | |||
533 | static int acpi_battery_read_info(struct seq_file *seq, void *offset) | ||
534 | { | ||
535 | struct acpi_battery *battery = seq->private; | ||
536 | struct acpi_sbs *sbs = battery->sbs; | ||
537 | int result = 0; | ||
538 | |||
539 | mutex_lock(&sbs->lock); | ||
540 | |||
541 | seq_printf(seq, "present: %s\n", | ||
542 | (battery->present) ? "yes" : "no"); | ||
543 | if (!battery->present) | ||
544 | goto end; | ||
545 | |||
546 | seq_printf(seq, "design capacity: %i%sh\n", | ||
547 | battery->design_capacity * acpi_battery_scale(battery), | ||
548 | acpi_battery_units(battery)); | ||
549 | seq_printf(seq, "last full capacity: %i%sh\n", | ||
550 | battery->full_charge_capacity * acpi_battery_scale(battery), | ||
551 | acpi_battery_units(battery)); | ||
552 | seq_printf(seq, "battery technology: rechargeable\n"); | ||
553 | seq_printf(seq, "design voltage: %i mV\n", | ||
554 | battery->design_voltage * acpi_battery_vscale(battery)); | ||
555 | seq_printf(seq, "design capacity warning: unknown\n"); | ||
556 | seq_printf(seq, "design capacity low: unknown\n"); | ||
557 | seq_printf(seq, "cycle count: %i\n", battery->cycle_count); | ||
558 | seq_printf(seq, "capacity granularity 1: unknown\n"); | ||
559 | seq_printf(seq, "capacity granularity 2: unknown\n"); | ||
560 | seq_printf(seq, "model number: %s\n", battery->device_name); | ||
561 | seq_printf(seq, "serial number: %i\n", | ||
562 | battery->serial_number); | ||
563 | seq_printf(seq, "battery type: %s\n", | ||
564 | battery->device_chemistry); | ||
565 | seq_printf(seq, "OEM info: %s\n", | ||
566 | battery->manufacturer_name); | ||
567 | end: | ||
568 | mutex_unlock(&sbs->lock); | ||
569 | return result; | ||
570 | } | ||
571 | |||
572 | static int acpi_battery_info_open_fs(struct inode *inode, struct file *file) | ||
573 | { | ||
574 | return single_open(file, acpi_battery_read_info, PDE_DATA(inode)); | ||
575 | } | ||
576 | |||
577 | static int acpi_battery_read_state(struct seq_file *seq, void *offset) | ||
578 | { | ||
579 | struct acpi_battery *battery = seq->private; | ||
580 | struct acpi_sbs *sbs = battery->sbs; | ||
581 | int rate; | ||
582 | |||
583 | mutex_lock(&sbs->lock); | ||
584 | seq_printf(seq, "present: %s\n", | ||
585 | (battery->present) ? "yes" : "no"); | ||
586 | if (!battery->present) | ||
587 | goto end; | ||
588 | |||
589 | acpi_battery_get_state(battery); | ||
590 | seq_printf(seq, "capacity state: %s\n", | ||
591 | (battery->state & 0x0010) ? "critical" : "ok"); | ||
592 | seq_printf(seq, "charging state: %s\n", | ||
593 | (battery->rate_now < 0) ? "discharging" : | ||
594 | ((battery->rate_now > 0) ? "charging" : "charged")); | ||
595 | rate = abs(battery->rate_now) * acpi_battery_ipscale(battery); | ||
596 | rate *= (acpi_battery_mode(battery))?(battery->voltage_now * | ||
597 | acpi_battery_vscale(battery)/1000):1; | ||
598 | seq_printf(seq, "present rate: %d%s\n", rate, | ||
599 | acpi_battery_units(battery)); | ||
600 | seq_printf(seq, "remaining capacity: %i%sh\n", | ||
601 | battery->capacity_now * acpi_battery_scale(battery), | ||
602 | acpi_battery_units(battery)); | ||
603 | seq_printf(seq, "present voltage: %i mV\n", | ||
604 | battery->voltage_now * acpi_battery_vscale(battery)); | ||
605 | |||
606 | end: | ||
607 | mutex_unlock(&sbs->lock); | ||
608 | return 0; | ||
609 | } | ||
610 | |||
611 | static int acpi_battery_state_open_fs(struct inode *inode, struct file *file) | ||
612 | { | ||
613 | return single_open(file, acpi_battery_read_state, PDE_DATA(inode)); | ||
614 | } | ||
615 | |||
616 | static int acpi_battery_read_alarm(struct seq_file *seq, void *offset) | ||
617 | { | ||
618 | struct acpi_battery *battery = seq->private; | ||
619 | struct acpi_sbs *sbs = battery->sbs; | ||
620 | int result = 0; | ||
621 | |||
622 | mutex_lock(&sbs->lock); | ||
623 | |||
624 | if (!battery->present) { | ||
625 | seq_printf(seq, "present: no\n"); | ||
626 | goto end; | ||
627 | } | ||
628 | |||
629 | acpi_battery_get_alarm(battery); | ||
630 | seq_printf(seq, "alarm: "); | ||
631 | if (battery->alarm_capacity) | ||
632 | seq_printf(seq, "%i%sh\n", | ||
633 | battery->alarm_capacity * | ||
634 | acpi_battery_scale(battery), | ||
635 | acpi_battery_units(battery)); | ||
636 | else | ||
637 | seq_printf(seq, "disabled\n"); | ||
638 | end: | ||
639 | mutex_unlock(&sbs->lock); | ||
640 | return result; | ||
641 | } | ||
642 | |||
643 | static ssize_t | ||
644 | acpi_battery_write_alarm(struct file *file, const char __user * buffer, | ||
645 | size_t count, loff_t * ppos) | ||
646 | { | ||
647 | struct seq_file *seq = file->private_data; | ||
648 | struct acpi_battery *battery = seq->private; | ||
649 | struct acpi_sbs *sbs = battery->sbs; | ||
650 | char alarm_string[12] = { '\0' }; | ||
651 | int result = 0; | ||
652 | mutex_lock(&sbs->lock); | ||
653 | if (!battery->present) { | ||
654 | result = -ENODEV; | ||
655 | goto end; | ||
656 | } | ||
657 | if (count > sizeof(alarm_string) - 1) { | ||
658 | result = -EINVAL; | ||
659 | goto end; | ||
660 | } | ||
661 | if (copy_from_user(alarm_string, buffer, count)) { | ||
662 | result = -EFAULT; | ||
663 | goto end; | ||
664 | } | ||
665 | alarm_string[count] = 0; | ||
666 | battery->alarm_capacity = simple_strtoul(alarm_string, NULL, 0) / | ||
667 | acpi_battery_scale(battery); | ||
668 | acpi_battery_set_alarm(battery); | ||
669 | end: | ||
670 | mutex_unlock(&sbs->lock); | ||
671 | if (result) | ||
672 | return result; | ||
673 | return count; | ||
674 | } | ||
675 | |||
676 | static int acpi_battery_alarm_open_fs(struct inode *inode, struct file *file) | ||
677 | { | ||
678 | return single_open(file, acpi_battery_read_alarm, PDE_DATA(inode)); | ||
679 | } | ||
680 | |||
681 | static const struct file_operations acpi_battery_info_fops = { | ||
682 | .open = acpi_battery_info_open_fs, | ||
683 | .read = seq_read, | ||
684 | .llseek = seq_lseek, | ||
685 | .release = single_release, | ||
686 | .owner = THIS_MODULE, | ||
687 | }; | ||
688 | |||
689 | static const struct file_operations acpi_battery_state_fops = { | ||
690 | .open = acpi_battery_state_open_fs, | ||
691 | .read = seq_read, | ||
692 | .llseek = seq_lseek, | ||
693 | .release = single_release, | ||
694 | .owner = THIS_MODULE, | ||
695 | }; | ||
696 | |||
697 | static const struct file_operations acpi_battery_alarm_fops = { | ||
698 | .open = acpi_battery_alarm_open_fs, | ||
699 | .read = seq_read, | ||
700 | .write = acpi_battery_write_alarm, | ||
701 | .llseek = seq_lseek, | ||
702 | .release = single_release, | ||
703 | .owner = THIS_MODULE, | ||
704 | }; | ||
705 | |||
706 | /* Legacy AC Adapter Interface */ | ||
707 | |||
708 | static struct proc_dir_entry *acpi_ac_dir = NULL; | ||
709 | |||
710 | static int acpi_ac_read_state(struct seq_file *seq, void *offset) | ||
711 | { | ||
712 | |||
713 | struct acpi_sbs *sbs = seq->private; | ||
714 | |||
715 | mutex_lock(&sbs->lock); | ||
716 | |||
717 | seq_printf(seq, "state: %s\n", | ||
718 | sbs->charger_present ? "on-line" : "off-line"); | ||
719 | |||
720 | mutex_unlock(&sbs->lock); | ||
721 | return 0; | ||
722 | } | ||
723 | |||
724 | static int acpi_ac_state_open_fs(struct inode *inode, struct file *file) | ||
725 | { | ||
726 | return single_open(file, acpi_ac_read_state, PDE_DATA(inode)); | ||
727 | } | ||
728 | |||
729 | static const struct file_operations acpi_ac_state_fops = { | ||
730 | .open = acpi_ac_state_open_fs, | ||
731 | .read = seq_read, | ||
732 | .llseek = seq_lseek, | ||
733 | .release = single_release, | ||
734 | .owner = THIS_MODULE, | ||
735 | }; | ||
736 | |||
737 | #endif | ||
738 | |||
739 | /* -------------------------------------------------------------------------- | ||
740 | Driver Interface | 468 | Driver Interface |
741 | -------------------------------------------------------------------------- */ | 469 | -------------------------------------------------------------------------- */ |
742 | static int acpi_battery_read(struct acpi_battery *battery) | 470 | static int acpi_battery_read(struct acpi_battery *battery) |
@@ -781,12 +509,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id) | |||
781 | return result; | 509 | return result; |
782 | 510 | ||
783 | sprintf(battery->name, ACPI_BATTERY_DIR_NAME, id); | 511 | sprintf(battery->name, ACPI_BATTERY_DIR_NAME, id); |
784 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
785 | acpi_sbs_add_fs(&battery->proc_entry, acpi_battery_dir, | ||
786 | battery->name, &acpi_battery_info_fops, | ||
787 | &acpi_battery_state_fops, &acpi_battery_alarm_fops, | ||
788 | battery); | ||
789 | #endif | ||
790 | battery->bat.name = battery->name; | 512 | battery->bat.name = battery->name; |
791 | battery->bat.type = POWER_SUPPLY_TYPE_BATTERY; | 513 | battery->bat.type = POWER_SUPPLY_TYPE_BATTERY; |
792 | if (!acpi_battery_mode(battery)) { | 514 | if (!acpi_battery_mode(battery)) { |
@@ -822,10 +544,6 @@ static void acpi_battery_remove(struct acpi_sbs *sbs, int id) | |||
822 | device_remove_file(battery->bat.dev, &alarm_attr); | 544 | device_remove_file(battery->bat.dev, &alarm_attr); |
823 | power_supply_unregister(&battery->bat); | 545 | power_supply_unregister(&battery->bat); |
824 | } | 546 | } |
825 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
826 | proc_remove(battery->proc_entry); | ||
827 | battery->proc_entry = NULL; | ||
828 | #endif | ||
829 | } | 547 | } |
830 | 548 | ||
831 | static int acpi_charger_add(struct acpi_sbs *sbs) | 549 | static int acpi_charger_add(struct acpi_sbs *sbs) |
@@ -835,13 +553,7 @@ static int acpi_charger_add(struct acpi_sbs *sbs) | |||
835 | result = acpi_ac_get_present(sbs); | 553 | result = acpi_ac_get_present(sbs); |
836 | if (result) | 554 | if (result) |
837 | goto end; | 555 | goto end; |
838 | #ifdef CONFIG_ACPI_PROCFS_POWER | 556 | |
839 | result = acpi_sbs_add_fs(&sbs->charger_entry, acpi_ac_dir, | ||
840 | ACPI_AC_DIR_NAME, NULL, | ||
841 | &acpi_ac_state_fops, NULL, sbs); | ||
842 | if (result) | ||
843 | goto end; | ||
844 | #endif | ||
845 | sbs->charger.name = "sbs-charger"; | 557 | sbs->charger.name = "sbs-charger"; |
846 | sbs->charger.type = POWER_SUPPLY_TYPE_MAINS; | 558 | sbs->charger.type = POWER_SUPPLY_TYPE_MAINS; |
847 | sbs->charger.properties = sbs_ac_props; | 559 | sbs->charger.properties = sbs_ac_props; |
@@ -859,10 +571,6 @@ static void acpi_charger_remove(struct acpi_sbs *sbs) | |||
859 | { | 571 | { |
860 | if (sbs->charger.dev) | 572 | if (sbs->charger.dev) |
861 | power_supply_unregister(&sbs->charger); | 573 | power_supply_unregister(&sbs->charger); |
862 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
863 | proc_remove(sbs->charger_entry); | ||
864 | sbs->charger_entry = NULL; | ||
865 | #endif | ||
866 | } | 574 | } |
867 | 575 | ||
868 | static void acpi_sbs_callback(void *context) | 576 | static void acpi_sbs_callback(void *context) |
@@ -950,20 +658,6 @@ static int acpi_sbs_remove(struct acpi_device *device) | |||
950 | return 0; | 658 | return 0; |
951 | } | 659 | } |
952 | 660 | ||
953 | static void acpi_sbs_rmdirs(void) | ||
954 | { | ||
955 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
956 | if (acpi_ac_dir) { | ||
957 | acpi_unlock_ac_dir(acpi_ac_dir); | ||
958 | acpi_ac_dir = NULL; | ||
959 | } | ||
960 | if (acpi_battery_dir) { | ||
961 | acpi_unlock_battery_dir(acpi_battery_dir); | ||
962 | acpi_battery_dir = NULL; | ||
963 | } | ||
964 | #endif | ||
965 | } | ||
966 | |||
967 | #ifdef CONFIG_PM_SLEEP | 661 | #ifdef CONFIG_PM_SLEEP |
968 | static int acpi_sbs_resume(struct device *dev) | 662 | static int acpi_sbs_resume(struct device *dev) |
969 | { | 663 | { |
@@ -995,28 +689,17 @@ static int __init acpi_sbs_init(void) | |||
995 | 689 | ||
996 | if (acpi_disabled) | 690 | if (acpi_disabled) |
997 | return -ENODEV; | 691 | return -ENODEV; |
998 | #ifdef CONFIG_ACPI_PROCFS_POWER | 692 | |
999 | acpi_ac_dir = acpi_lock_ac_dir(); | ||
1000 | if (!acpi_ac_dir) | ||
1001 | return -ENODEV; | ||
1002 | acpi_battery_dir = acpi_lock_battery_dir(); | ||
1003 | if (!acpi_battery_dir) { | ||
1004 | acpi_sbs_rmdirs(); | ||
1005 | return -ENODEV; | ||
1006 | } | ||
1007 | #endif | ||
1008 | result = acpi_bus_register_driver(&acpi_sbs_driver); | 693 | result = acpi_bus_register_driver(&acpi_sbs_driver); |
1009 | if (result < 0) { | 694 | if (result < 0) |
1010 | acpi_sbs_rmdirs(); | ||
1011 | return -ENODEV; | 695 | return -ENODEV; |
1012 | } | 696 | |
1013 | return 0; | 697 | return 0; |
1014 | } | 698 | } |
1015 | 699 | ||
1016 | static void __exit acpi_sbs_exit(void) | 700 | static void __exit acpi_sbs_exit(void) |
1017 | { | 701 | { |
1018 | acpi_bus_unregister_driver(&acpi_sbs_driver); | 702 | acpi_bus_unregister_driver(&acpi_sbs_driver); |
1019 | acpi_sbs_rmdirs(); | ||
1020 | return; | 703 | return; |
1021 | } | 704 | } |
1022 | 705 | ||
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index fee8a297c7d9..55f9dedbbf9f 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -125,8 +125,8 @@ acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, cha | |||
125 | } | 125 | } |
126 | static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL); | 126 | static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL); |
127 | 127 | ||
128 | static acpi_status acpi_bus_offline_companions(acpi_handle handle, u32 lvl, | 128 | static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data, |
129 | void *data, void **ret_p) | 129 | void **ret_p) |
130 | { | 130 | { |
131 | struct acpi_device *device = NULL; | 131 | struct acpi_device *device = NULL; |
132 | struct acpi_device_physical_node *pn; | 132 | struct acpi_device_physical_node *pn; |
@@ -136,6 +136,11 @@ static acpi_status acpi_bus_offline_companions(acpi_handle handle, u32 lvl, | |||
136 | if (acpi_bus_get_device(handle, &device)) | 136 | if (acpi_bus_get_device(handle, &device)) |
137 | return AE_OK; | 137 | return AE_OK; |
138 | 138 | ||
139 | if (device->handler && !device->handler->hotplug.enabled) { | ||
140 | *ret_p = &device->dev; | ||
141 | return AE_SUPPORT; | ||
142 | } | ||
143 | |||
139 | mutex_lock(&device->physical_node_lock); | 144 | mutex_lock(&device->physical_node_lock); |
140 | 145 | ||
141 | list_for_each_entry(pn, &device->physical_node_list, node) { | 146 | list_for_each_entry(pn, &device->physical_node_list, node) { |
@@ -168,8 +173,8 @@ static acpi_status acpi_bus_offline_companions(acpi_handle handle, u32 lvl, | |||
168 | return status; | 173 | return status; |
169 | } | 174 | } |
170 | 175 | ||
171 | static acpi_status acpi_bus_online_companions(acpi_handle handle, u32 lvl, | 176 | static acpi_status acpi_bus_online(acpi_handle handle, u32 lvl, void *data, |
172 | void *data, void **ret_p) | 177 | void **ret_p) |
173 | { | 178 | { |
174 | struct acpi_device *device = NULL; | 179 | struct acpi_device *device = NULL; |
175 | struct acpi_device_physical_node *pn; | 180 | struct acpi_device_physical_node *pn; |
@@ -214,26 +219,32 @@ static int acpi_scan_hot_remove(struct acpi_device *device) | |||
214 | * If the first pass is successful, the second one isn't needed, though. | 219 | * If the first pass is successful, the second one isn't needed, though. |
215 | */ | 220 | */ |
216 | errdev = NULL; | 221 | errdev = NULL; |
217 | acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, | 222 | status = acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, |
218 | NULL, acpi_bus_offline_companions, | 223 | NULL, acpi_bus_offline, (void *)false, |
219 | (void *)false, (void **)&errdev); | 224 | (void **)&errdev); |
220 | acpi_bus_offline_companions(handle, 0, (void *)false, (void **)&errdev); | 225 | if (status == AE_SUPPORT) { |
226 | dev_warn(errdev, "Offline disabled.\n"); | ||
227 | acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, | ||
228 | acpi_bus_online, NULL, NULL, NULL); | ||
229 | put_device(&device->dev); | ||
230 | return -EPERM; | ||
231 | } | ||
232 | acpi_bus_offline(handle, 0, (void *)false, (void **)&errdev); | ||
221 | if (errdev) { | 233 | if (errdev) { |
222 | errdev = NULL; | 234 | errdev = NULL; |
223 | acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, | 235 | acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, |
224 | NULL, acpi_bus_offline_companions, | 236 | NULL, acpi_bus_offline, (void *)true, |
225 | (void *)true , (void **)&errdev); | 237 | (void **)&errdev); |
226 | if (!errdev || acpi_force_hot_remove) | 238 | if (!errdev || acpi_force_hot_remove) |
227 | acpi_bus_offline_companions(handle, 0, (void *)true, | 239 | acpi_bus_offline(handle, 0, (void *)true, |
228 | (void **)&errdev); | 240 | (void **)&errdev); |
229 | 241 | ||
230 | if (errdev && !acpi_force_hot_remove) { | 242 | if (errdev && !acpi_force_hot_remove) { |
231 | dev_warn(errdev, "Offline failed.\n"); | 243 | dev_warn(errdev, "Offline failed.\n"); |
232 | acpi_bus_online_companions(handle, 0, NULL, NULL); | 244 | acpi_bus_online(handle, 0, NULL, NULL); |
233 | acpi_walk_namespace(ACPI_TYPE_ANY, handle, | 245 | acpi_walk_namespace(ACPI_TYPE_ANY, handle, |
234 | ACPI_UINT32_MAX, | 246 | ACPI_UINT32_MAX, acpi_bus_online, |
235 | acpi_bus_online_companions, NULL, | 247 | NULL, NULL, NULL); |
236 | NULL, NULL); | ||
237 | put_device(&device->dev); | 248 | put_device(&device->dev); |
238 | return -EBUSY; | 249 | return -EBUSY; |
239 | } | 250 | } |
@@ -274,10 +285,10 @@ static int acpi_scan_hot_remove(struct acpi_device *device) | |||
274 | return 0; | 285 | return 0; |
275 | } | 286 | } |
276 | 287 | ||
277 | static void acpi_bus_device_eject(void *context) | 288 | void acpi_bus_device_eject(void *data, u32 ost_src) |
278 | { | 289 | { |
279 | acpi_handle handle = context; | 290 | struct acpi_device *device = data; |
280 | struct acpi_device *device = NULL; | 291 | acpi_handle handle = device->handle; |
281 | struct acpi_scan_handler *handler; | 292 | struct acpi_scan_handler *handler; |
282 | u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; | 293 | u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; |
283 | int error; | 294 | int error; |
@@ -285,38 +296,41 @@ static void acpi_bus_device_eject(void *context) | |||
285 | lock_device_hotplug(); | 296 | lock_device_hotplug(); |
286 | mutex_lock(&acpi_scan_lock); | 297 | mutex_lock(&acpi_scan_lock); |
287 | 298 | ||
288 | acpi_bus_get_device(handle, &device); | ||
289 | if (!device) | ||
290 | goto err_out; | ||
291 | |||
292 | handler = device->handler; | 299 | handler = device->handler; |
293 | if (!handler || !handler->hotplug.enabled) { | 300 | if (!handler || !handler->hotplug.enabled) { |
294 | ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED; | 301 | put_device(&device->dev); |
295 | goto err_out; | 302 | goto err_support; |
296 | } | 303 | } |
297 | acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST, | 304 | |
298 | ACPI_OST_SC_EJECT_IN_PROGRESS, NULL); | 305 | if (ost_src == ACPI_NOTIFY_EJECT_REQUEST) |
306 | acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST, | ||
307 | ACPI_OST_SC_EJECT_IN_PROGRESS, NULL); | ||
308 | |||
299 | if (handler->hotplug.mode == AHM_CONTAINER) | 309 | if (handler->hotplug.mode == AHM_CONTAINER) |
300 | kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE); | 310 | kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE); |
301 | 311 | ||
302 | get_device(&device->dev); | ||
303 | error = acpi_scan_hot_remove(device); | 312 | error = acpi_scan_hot_remove(device); |
304 | if (error) | 313 | if (error == -EPERM) { |
314 | goto err_support; | ||
315 | } else if (error) { | ||
305 | goto err_out; | 316 | goto err_out; |
317 | } | ||
306 | 318 | ||
307 | out: | 319 | out: |
308 | mutex_unlock(&acpi_scan_lock); | 320 | mutex_unlock(&acpi_scan_lock); |
309 | unlock_device_hotplug(); | 321 | unlock_device_hotplug(); |
310 | return; | 322 | return; |
311 | 323 | ||
324 | err_support: | ||
325 | ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED; | ||
312 | err_out: | 326 | err_out: |
313 | acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST, ost_code, | 327 | acpi_evaluate_hotplug_ost(handle, ost_src, ost_code, NULL); |
314 | NULL); | ||
315 | goto out; | 328 | goto out; |
316 | } | 329 | } |
317 | 330 | ||
318 | static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source) | 331 | static void acpi_scan_bus_device_check(void *data, u32 ost_source) |
319 | { | 332 | { |
333 | acpi_handle handle = data; | ||
320 | struct acpi_device *device = NULL; | 334 | struct acpi_device *device = NULL; |
321 | u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; | 335 | u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; |
322 | int error; | 336 | int error; |
@@ -331,8 +345,6 @@ static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source) | |||
331 | goto out; | 345 | goto out; |
332 | } | 346 | } |
333 | } | 347 | } |
334 | acpi_evaluate_hotplug_ost(handle, ost_source, | ||
335 | ACPI_OST_SC_INSERT_IN_PROGRESS, NULL); | ||
336 | error = acpi_bus_scan(handle); | 348 | error = acpi_bus_scan(handle); |
337 | if (error) { | 349 | if (error) { |
338 | acpi_handle_warn(handle, "Namespace scan failure\n"); | 350 | acpi_handle_warn(handle, "Namespace scan failure\n"); |
@@ -353,18 +365,6 @@ static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source) | |||
353 | unlock_device_hotplug(); | 365 | unlock_device_hotplug(); |
354 | } | 366 | } |
355 | 367 | ||
356 | static void acpi_scan_bus_check(void *context) | ||
357 | { | ||
358 | acpi_scan_bus_device_check((acpi_handle)context, | ||
359 | ACPI_NOTIFY_BUS_CHECK); | ||
360 | } | ||
361 | |||
362 | static void acpi_scan_device_check(void *context) | ||
363 | { | ||
364 | acpi_scan_bus_device_check((acpi_handle)context, | ||
365 | ACPI_NOTIFY_DEVICE_CHECK); | ||
366 | } | ||
367 | |||
368 | static void acpi_hotplug_unsupported(acpi_handle handle, u32 type) | 368 | static void acpi_hotplug_unsupported(acpi_handle handle, u32 type) |
369 | { | 369 | { |
370 | u32 ost_status; | 370 | u32 ost_status; |
@@ -395,8 +395,8 @@ static void acpi_hotplug_unsupported(acpi_handle handle, u32 type) | |||
395 | 395 | ||
396 | static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data) | 396 | static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data) |
397 | { | 397 | { |
398 | acpi_osd_exec_callback callback; | ||
399 | struct acpi_scan_handler *handler = data; | 398 | struct acpi_scan_handler *handler = data; |
399 | struct acpi_device *adev; | ||
400 | acpi_status status; | 400 | acpi_status status; |
401 | 401 | ||
402 | if (!handler->hotplug.enabled) | 402 | if (!handler->hotplug.enabled) |
@@ -405,56 +405,35 @@ static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data) | |||
405 | switch (type) { | 405 | switch (type) { |
406 | case ACPI_NOTIFY_BUS_CHECK: | 406 | case ACPI_NOTIFY_BUS_CHECK: |
407 | acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n"); | 407 | acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n"); |
408 | callback = acpi_scan_bus_check; | ||
409 | break; | 408 | break; |
410 | case ACPI_NOTIFY_DEVICE_CHECK: | 409 | case ACPI_NOTIFY_DEVICE_CHECK: |
411 | acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n"); | 410 | acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n"); |
412 | callback = acpi_scan_device_check; | ||
413 | break; | 411 | break; |
414 | case ACPI_NOTIFY_EJECT_REQUEST: | 412 | case ACPI_NOTIFY_EJECT_REQUEST: |
415 | acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n"); | 413 | acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n"); |
416 | callback = acpi_bus_device_eject; | 414 | status = acpi_bus_get_device(handle, &adev); |
417 | break; | 415 | if (ACPI_FAILURE(status)) |
416 | goto err_out; | ||
417 | |||
418 | get_device(&adev->dev); | ||
419 | status = acpi_hotplug_execute(acpi_bus_device_eject, adev, type); | ||
420 | if (ACPI_SUCCESS(status)) | ||
421 | return; | ||
422 | |||
423 | put_device(&adev->dev); | ||
424 | goto err_out; | ||
418 | default: | 425 | default: |
419 | /* non-hotplug event; possibly handled by other handler */ | 426 | /* non-hotplug event; possibly handled by other handler */ |
420 | return; | 427 | return; |
421 | } | 428 | } |
422 | status = acpi_os_hotplug_execute(callback, handle); | 429 | status = acpi_hotplug_execute(acpi_scan_bus_device_check, handle, type); |
423 | if (ACPI_FAILURE(status)) | 430 | if (ACPI_SUCCESS(status)) |
424 | acpi_evaluate_hotplug_ost(handle, type, | 431 | return; |
425 | ACPI_OST_SC_NON_SPECIFIC_FAILURE, | ||
426 | NULL); | ||
427 | } | ||
428 | |||
429 | /** | ||
430 | * acpi_bus_hot_remove_device: hot-remove a device and its children | ||
431 | * @context: struct acpi_eject_event pointer (freed in this func) | ||
432 | * | ||
433 | * Hot-remove a device and its children. This function frees up the | ||
434 | * memory space passed by arg context, so that the caller may call | ||
435 | * this function asynchronously through acpi_os_hotplug_execute(). | ||
436 | */ | ||
437 | void acpi_bus_hot_remove_device(void *context) | ||
438 | { | ||
439 | struct acpi_eject_event *ej_event = context; | ||
440 | struct acpi_device *device = ej_event->device; | ||
441 | acpi_handle handle = device->handle; | ||
442 | int error; | ||
443 | |||
444 | lock_device_hotplug(); | ||
445 | mutex_lock(&acpi_scan_lock); | ||
446 | |||
447 | error = acpi_scan_hot_remove(device); | ||
448 | if (error && handle) | ||
449 | acpi_evaluate_hotplug_ost(handle, ej_event->event, | ||
450 | ACPI_OST_SC_NON_SPECIFIC_FAILURE, | ||
451 | NULL); | ||
452 | 432 | ||
453 | mutex_unlock(&acpi_scan_lock); | 433 | err_out: |
454 | unlock_device_hotplug(); | 434 | acpi_evaluate_hotplug_ost(handle, type, |
455 | kfree(context); | 435 | ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL); |
456 | } | 436 | } |
457 | EXPORT_SYMBOL(acpi_bus_hot_remove_device); | ||
458 | 437 | ||
459 | static ssize_t real_power_state_show(struct device *dev, | 438 | static ssize_t real_power_state_show(struct device *dev, |
460 | struct device_attribute *attr, char *buf) | 439 | struct device_attribute *attr, char *buf) |
@@ -487,10 +466,8 @@ acpi_eject_store(struct device *d, struct device_attribute *attr, | |||
487 | const char *buf, size_t count) | 466 | const char *buf, size_t count) |
488 | { | 467 | { |
489 | struct acpi_device *acpi_device = to_acpi_device(d); | 468 | struct acpi_device *acpi_device = to_acpi_device(d); |
490 | struct acpi_eject_event *ej_event; | ||
491 | acpi_object_type not_used; | 469 | acpi_object_type not_used; |
492 | acpi_status status; | 470 | acpi_status status; |
493 | int ret; | ||
494 | 471 | ||
495 | if (!count || buf[0] != '1') | 472 | if (!count || buf[0] != '1') |
496 | return -EINVAL; | 473 | return -EINVAL; |
@@ -503,28 +480,18 @@ acpi_eject_store(struct device *d, struct device_attribute *attr, | |||
503 | if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable) | 480 | if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable) |
504 | return -ENODEV; | 481 | return -ENODEV; |
505 | 482 | ||
506 | ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL); | ||
507 | if (!ej_event) { | ||
508 | ret = -ENOMEM; | ||
509 | goto err_out; | ||
510 | } | ||
511 | acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT, | 483 | acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT, |
512 | ACPI_OST_SC_EJECT_IN_PROGRESS, NULL); | 484 | ACPI_OST_SC_EJECT_IN_PROGRESS, NULL); |
513 | ej_event->device = acpi_device; | ||
514 | ej_event->event = ACPI_OST_EC_OSPM_EJECT; | ||
515 | get_device(&acpi_device->dev); | 485 | get_device(&acpi_device->dev); |
516 | status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event); | 486 | status = acpi_hotplug_execute(acpi_bus_device_eject, acpi_device, |
487 | ACPI_OST_EC_OSPM_EJECT); | ||
517 | if (ACPI_SUCCESS(status)) | 488 | if (ACPI_SUCCESS(status)) |
518 | return count; | 489 | return count; |
519 | 490 | ||
520 | put_device(&acpi_device->dev); | 491 | put_device(&acpi_device->dev); |
521 | kfree(ej_event); | ||
522 | ret = status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN; | ||
523 | |||
524 | err_out: | ||
525 | acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT, | 492 | acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT, |
526 | ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL); | 493 | ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL); |
527 | return ret; | 494 | return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN; |
528 | } | 495 | } |
529 | 496 | ||
530 | static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store); | 497 | static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store); |
@@ -1676,7 +1643,6 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, | |||
1676 | 1643 | ||
1677 | void acpi_device_add_finalize(struct acpi_device *device) | 1644 | void acpi_device_add_finalize(struct acpi_device *device) |
1678 | { | 1645 | { |
1679 | device->flags.match_driver = true; | ||
1680 | dev_set_uevent_suppress(&device->dev, false); | 1646 | dev_set_uevent_suppress(&device->dev, false); |
1681 | kobject_uevent(&device->dev.kobj, KOBJ_ADD); | 1647 | kobject_uevent(&device->dev.kobj, KOBJ_ADD); |
1682 | } | 1648 | } |
@@ -1915,8 +1881,12 @@ static acpi_status acpi_bus_device_attach(acpi_handle handle, u32 lvl_not_used, | |||
1915 | return AE_OK; | 1881 | return AE_OK; |
1916 | 1882 | ||
1917 | ret = acpi_scan_attach_handler(device); | 1883 | ret = acpi_scan_attach_handler(device); |
1918 | if (ret) | 1884 | if (ret < 0) |
1919 | return ret > 0 ? AE_OK : AE_CTRL_DEPTH; | 1885 | return AE_CTRL_DEPTH; |
1886 | |||
1887 | device->flags.match_driver = true; | ||
1888 | if (ret > 0) | ||
1889 | return AE_OK; | ||
1920 | 1890 | ||
1921 | ret = device_attach(&device->dev); | 1891 | ret = device_attach(&device->dev); |
1922 | return ret >= 0 ? AE_OK : AE_CTRL_DEPTH; | 1892 | return ret >= 0 ? AE_OK : AE_CTRL_DEPTH; |
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 05306a59aedc..db5293650f62 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c | |||
@@ -564,6 +564,7 @@ static ssize_t counter_set(struct kobject *kobj, | |||
564 | acpi_event_status status; | 564 | acpi_event_status status; |
565 | acpi_handle handle; | 565 | acpi_handle handle; |
566 | int result = 0; | 566 | int result = 0; |
567 | unsigned long tmp; | ||
567 | 568 | ||
568 | if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) { | 569 | if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) { |
569 | int i; | 570 | int i; |
@@ -596,8 +597,10 @@ static ssize_t counter_set(struct kobject *kobj, | |||
596 | else if (!strcmp(buf, "clear\n") && | 597 | else if (!strcmp(buf, "clear\n") && |
597 | (status & ACPI_EVENT_FLAG_SET)) | 598 | (status & ACPI_EVENT_FLAG_SET)) |
598 | result = acpi_clear_gpe(handle, index); | 599 | result = acpi_clear_gpe(handle, index); |
600 | else if (!kstrtoul(buf, 0, &tmp)) | ||
601 | all_counters[index].count = tmp; | ||
599 | else | 602 | else |
600 | all_counters[index].count = strtoul(buf, NULL, 0); | 603 | result = -EINVAL; |
601 | } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) { | 604 | } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) { |
602 | int event = index - num_gpes; | 605 | int event = index - num_gpes; |
603 | if (!strcmp(buf, "disable\n") && | 606 | if (!strcmp(buf, "disable\n") && |
@@ -609,8 +612,10 @@ static ssize_t counter_set(struct kobject *kobj, | |||
609 | else if (!strcmp(buf, "clear\n") && | 612 | else if (!strcmp(buf, "clear\n") && |
610 | (status & ACPI_EVENT_FLAG_SET)) | 613 | (status & ACPI_EVENT_FLAG_SET)) |
611 | result = acpi_clear_event(event); | 614 | result = acpi_clear_event(event); |
615 | else if (!kstrtoul(buf, 0, &tmp)) | ||
616 | all_counters[index].count = tmp; | ||
612 | else | 617 | else |
613 | all_counters[index].count = strtoul(buf, NULL, 0); | 618 | result = -EINVAL; |
614 | } else | 619 | } else |
615 | all_counters[index].count = strtoul(buf, NULL, 0); | 620 | all_counters[index].count = strtoul(buf, NULL, 0); |
616 | 621 | ||
@@ -762,13 +767,8 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug, | |||
762 | if (!hotplug_kobj) | 767 | if (!hotplug_kobj) |
763 | goto err_out; | 768 | goto err_out; |
764 | 769 | ||
765 | kobject_init(&hotplug->kobj, &acpi_hotplug_profile_ktype); | 770 | error = kobject_init_and_add(&hotplug->kobj, |
766 | error = kobject_set_name(&hotplug->kobj, "%s", name); | 771 | &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name); |
767 | if (error) | ||
768 | goto err_out; | ||
769 | |||
770 | hotplug->kobj.parent = hotplug_kobj; | ||
771 | error = kobject_add(&hotplug->kobj, hotplug_kobj, NULL); | ||
772 | if (error) | 772 | if (error) |
773 | goto err_out; | 773 | goto err_out; |
774 | 774 | ||
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 6a0329340b42..e600b5dbfcb6 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c | |||
@@ -299,8 +299,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) | |||
299 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 299 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
300 | "No critical threshold\n")); | 300 | "No critical threshold\n")); |
301 | } else if (tmp <= 2732) { | 301 | } else if (tmp <= 2732) { |
302 | printk(KERN_WARNING FW_BUG "Invalid critical threshold " | 302 | pr_warn(FW_BUG "Invalid critical threshold (%llu)\n", |
303 | "(%llu)\n", tmp); | 303 | tmp); |
304 | tz->trips.critical.flags.valid = 0; | 304 | tz->trips.critical.flags.valid = 0; |
305 | } else { | 305 | } else { |
306 | tz->trips.critical.flags.valid = 1; | 306 | tz->trips.critical.flags.valid = 1; |
@@ -317,8 +317,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) | |||
317 | * Allow override critical threshold | 317 | * Allow override critical threshold |
318 | */ | 318 | */ |
319 | if (crt_k > tz->trips.critical.temperature) | 319 | if (crt_k > tz->trips.critical.temperature) |
320 | printk(KERN_WARNING PREFIX | 320 | pr_warn(PREFIX "Critical threshold %d C\n", |
321 | "Critical threshold %d C\n", crt); | 321 | crt); |
322 | tz->trips.critical.temperature = crt_k; | 322 | tz->trips.critical.temperature = crt_k; |
323 | } | 323 | } |
324 | } | 324 | } |
@@ -390,8 +390,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) | |||
390 | status = acpi_evaluate_reference(tz->device->handle, "_PSL", | 390 | status = acpi_evaluate_reference(tz->device->handle, "_PSL", |
391 | NULL, &devices); | 391 | NULL, &devices); |
392 | if (ACPI_FAILURE(status)) { | 392 | if (ACPI_FAILURE(status)) { |
393 | printk(KERN_WARNING PREFIX | 393 | pr_warn(PREFIX "Invalid passive threshold\n"); |
394 | "Invalid passive threshold\n"); | ||
395 | tz->trips.passive.flags.valid = 0; | 394 | tz->trips.passive.flags.valid = 0; |
396 | } | 395 | } |
397 | else | 396 | else |
@@ -453,8 +452,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) | |||
453 | status = acpi_evaluate_reference(tz->device->handle, | 452 | status = acpi_evaluate_reference(tz->device->handle, |
454 | name, NULL, &devices); | 453 | name, NULL, &devices); |
455 | if (ACPI_FAILURE(status)) { | 454 | if (ACPI_FAILURE(status)) { |
456 | printk(KERN_WARNING PREFIX | 455 | pr_warn(PREFIX "Invalid active%d threshold\n", |
457 | "Invalid active%d threshold\n", i); | 456 | i); |
458 | tz->trips.active[i].flags.valid = 0; | 457 | tz->trips.active[i].flags.valid = 0; |
459 | } | 458 | } |
460 | else | 459 | else |
@@ -505,7 +504,7 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz) | |||
505 | valid |= tz->trips.active[i].flags.valid; | 504 | valid |= tz->trips.active[i].flags.valid; |
506 | 505 | ||
507 | if (!valid) { | 506 | if (!valid) { |
508 | printk(KERN_WARNING FW_BUG "No valid trip found\n"); | 507 | pr_warn(FW_BUG "No valid trip found\n"); |
509 | return -ENODEV; | 508 | return -ENODEV; |
510 | } | 509 | } |
511 | return 0; | 510 | return 0; |
@@ -923,8 +922,7 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz) | |||
923 | acpi_bus_private_data_handler, | 922 | acpi_bus_private_data_handler, |
924 | tz->thermal_zone); | 923 | tz->thermal_zone); |
925 | if (ACPI_FAILURE(status)) { | 924 | if (ACPI_FAILURE(status)) { |
926 | printk(KERN_ERR PREFIX | 925 | pr_err(PREFIX "Error attaching device data\n"); |
927 | "Error attaching device data\n"); | ||
928 | return -ENODEV; | 926 | return -ENODEV; |
929 | } | 927 | } |
930 | 928 | ||
@@ -1094,9 +1092,8 @@ static int acpi_thermal_add(struct acpi_device *device) | |||
1094 | if (result) | 1092 | if (result) |
1095 | goto free_memory; | 1093 | goto free_memory; |
1096 | 1094 | ||
1097 | printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n", | 1095 | pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device), |
1098 | acpi_device_name(device), acpi_device_bid(device), | 1096 | acpi_device_bid(device), KELVIN_TO_CELSIUS(tz->temperature)); |
1099 | KELVIN_TO_CELSIUS(tz->temperature)); | ||
1100 | goto end; | 1097 | goto end; |
1101 | 1098 | ||
1102 | free_memory: | 1099 | free_memory: |
@@ -1159,24 +1156,24 @@ static int acpi_thermal_resume(struct device *dev) | |||
1159 | static int thermal_act(const struct dmi_system_id *d) { | 1156 | static int thermal_act(const struct dmi_system_id *d) { |
1160 | 1157 | ||
1161 | if (act == 0) { | 1158 | if (act == 0) { |
1162 | printk(KERN_NOTICE "ACPI: %s detected: " | 1159 | pr_notice(PREFIX "%s detected: " |
1163 | "disabling all active thermal trip points\n", d->ident); | 1160 | "disabling all active thermal trip points\n", d->ident); |
1164 | act = -1; | 1161 | act = -1; |
1165 | } | 1162 | } |
1166 | return 0; | 1163 | return 0; |
1167 | } | 1164 | } |
1168 | static int thermal_nocrt(const struct dmi_system_id *d) { | 1165 | static int thermal_nocrt(const struct dmi_system_id *d) { |
1169 | 1166 | ||
1170 | printk(KERN_NOTICE "ACPI: %s detected: " | 1167 | pr_notice(PREFIX "%s detected: " |
1171 | "disabling all critical thermal trip point actions.\n", d->ident); | 1168 | "disabling all critical thermal trip point actions.\n", d->ident); |
1172 | nocrt = 1; | 1169 | nocrt = 1; |
1173 | return 0; | 1170 | return 0; |
1174 | } | 1171 | } |
1175 | static int thermal_tzp(const struct dmi_system_id *d) { | 1172 | static int thermal_tzp(const struct dmi_system_id *d) { |
1176 | 1173 | ||
1177 | if (tzp == 0) { | 1174 | if (tzp == 0) { |
1178 | printk(KERN_NOTICE "ACPI: %s detected: " | 1175 | pr_notice(PREFIX "%s detected: " |
1179 | "enabling thermal zone polling\n", d->ident); | 1176 | "enabling thermal zone polling\n", d->ident); |
1180 | tzp = 300; /* 300 dS = 30 Seconds */ | 1177 | tzp = 300; /* 300 dS = 30 Seconds */ |
1181 | } | 1178 | } |
1182 | return 0; | 1179 | return 0; |
@@ -1184,8 +1181,8 @@ static int thermal_tzp(const struct dmi_system_id *d) { | |||
1184 | static int thermal_psv(const struct dmi_system_id *d) { | 1181 | static int thermal_psv(const struct dmi_system_id *d) { |
1185 | 1182 | ||
1186 | if (psv == 0) { | 1183 | if (psv == 0) { |
1187 | printk(KERN_NOTICE "ACPI: %s detected: " | 1184 | pr_notice(PREFIX "%s detected: " |
1188 | "disabling all passive thermal trip points\n", d->ident); | 1185 | "disabling all passive thermal trip points\n", d->ident); |
1189 | psv = -1; | 1186 | psv = -1; |
1190 | } | 1187 | } |
1191 | return 0; | 1188 | return 0; |
@@ -1238,7 +1235,7 @@ static int __init acpi_thermal_init(void) | |||
1238 | dmi_check_system(thermal_dmi_table); | 1235 | dmi_check_system(thermal_dmi_table); |
1239 | 1236 | ||
1240 | if (off) { | 1237 | if (off) { |
1241 | printk(KERN_NOTICE "ACPI: thermal control disabled\n"); | 1238 | pr_notice(PREFIX "thermal control disabled\n"); |
1242 | return -ENODEV; | 1239 | return -ENODEV; |
1243 | } | 1240 | } |
1244 | 1241 | ||
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 552248b0005b..6d408bfbbb1d 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c | |||
@@ -121,7 +121,7 @@ acpi_extract_package(union acpi_object *package, | |||
121 | break; | 121 | break; |
122 | default: | 122 | default: |
123 | printk(KERN_WARNING PREFIX "Invalid package element" | 123 | printk(KERN_WARNING PREFIX "Invalid package element" |
124 | " [%d]: got number, expecing" | 124 | " [%d]: got number, expecting" |
125 | " [%c]\n", | 125 | " [%c]\n", |
126 | i, format_string[i]); | 126 | i, format_string[i]); |
127 | return AE_BAD_DATA; | 127 | return AE_BAD_DATA; |
@@ -148,7 +148,7 @@ acpi_extract_package(union acpi_object *package, | |||
148 | default: | 148 | default: |
149 | printk(KERN_WARNING PREFIX "Invalid package element" | 149 | printk(KERN_WARNING PREFIX "Invalid package element" |
150 | " [%d] got string/buffer," | 150 | " [%d] got string/buffer," |
151 | " expecing [%c]\n", | 151 | " expecting [%c]\n", |
152 | i, format_string[i]); | 152 | i, format_string[i]); |
153 | return AE_BAD_DATA; | 153 | return AE_BAD_DATA; |
154 | break; | 154 | break; |
@@ -169,11 +169,20 @@ acpi_extract_package(union acpi_object *package, | |||
169 | /* | 169 | /* |
170 | * Validate output buffer. | 170 | * Validate output buffer. |
171 | */ | 171 | */ |
172 | if (buffer->length < size_required) { | 172 | if (buffer->length == ACPI_ALLOCATE_BUFFER) { |
173 | buffer->pointer = ACPI_ALLOCATE(size_required); | ||
174 | if (!buffer->pointer) | ||
175 | return AE_NO_MEMORY; | ||
173 | buffer->length = size_required; | 176 | buffer->length = size_required; |
174 | return AE_BUFFER_OVERFLOW; | 177 | memset(buffer->pointer, 0, size_required); |
175 | } else if (buffer->length != size_required || !buffer->pointer) { | 178 | } else { |
176 | return AE_BAD_PARAMETER; | 179 | if (buffer->length < size_required) { |
180 | buffer->length = size_required; | ||
181 | return AE_BUFFER_OVERFLOW; | ||
182 | } else if (buffer->length != size_required || | ||
183 | !buffer->pointer) { | ||
184 | return AE_BAD_PARAMETER; | ||
185 | } | ||
177 | } | 186 | } |
178 | 187 | ||
179 | head = buffer->pointer; | 188 | head = buffer->pointer; |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index aebcf6355df4..18dbdff4656e 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -88,7 +88,16 @@ module_param(allow_duplicates, bool, 0644); | |||
88 | static bool use_bios_initial_backlight = 1; | 88 | static bool use_bios_initial_backlight = 1; |
89 | module_param(use_bios_initial_backlight, bool, 0644); | 89 | module_param(use_bios_initial_backlight, bool, 0644); |
90 | 90 | ||
91 | /* | ||
92 | * For Windows 8 systems: if set ture and the GPU driver has | ||
93 | * registered a backlight interface, skip registering ACPI video's. | ||
94 | */ | ||
95 | static bool use_native_backlight = false; | ||
96 | module_param(use_native_backlight, bool, 0644); | ||
97 | |||
91 | static int register_count; | 98 | static int register_count; |
99 | static struct mutex video_list_lock; | ||
100 | static struct list_head video_bus_head; | ||
92 | static int acpi_video_bus_add(struct acpi_device *device); | 101 | static int acpi_video_bus_add(struct acpi_device *device); |
93 | static int acpi_video_bus_remove(struct acpi_device *device); | 102 | static int acpi_video_bus_remove(struct acpi_device *device); |
94 | static void acpi_video_bus_notify(struct acpi_device *device, u32 event); | 103 | static void acpi_video_bus_notify(struct acpi_device *device, u32 event); |
@@ -157,6 +166,7 @@ struct acpi_video_bus { | |||
157 | struct acpi_video_bus_flags flags; | 166 | struct acpi_video_bus_flags flags; |
158 | struct list_head video_device_list; | 167 | struct list_head video_device_list; |
159 | struct mutex device_list_lock; /* protects video_device_list */ | 168 | struct mutex device_list_lock; /* protects video_device_list */ |
169 | struct list_head entry; | ||
160 | struct input_dev *input; | 170 | struct input_dev *input; |
161 | char phys[32]; /* for input device */ | 171 | char phys[32]; /* for input device */ |
162 | struct notifier_block pm_nb; | 172 | struct notifier_block pm_nb; |
@@ -229,6 +239,14 @@ static int acpi_video_get_next_level(struct acpi_video_device *device, | |||
229 | static int acpi_video_switch_brightness(struct acpi_video_device *device, | 239 | static int acpi_video_switch_brightness(struct acpi_video_device *device, |
230 | int event); | 240 | int event); |
231 | 241 | ||
242 | static bool acpi_video_verify_backlight_support(void) | ||
243 | { | ||
244 | if (acpi_osi_is_win8() && use_native_backlight && | ||
245 | backlight_device_registered(BACKLIGHT_RAW)) | ||
246 | return false; | ||
247 | return acpi_video_backlight_support(); | ||
248 | } | ||
249 | |||
232 | /* backlight device sysfs support */ | 250 | /* backlight device sysfs support */ |
233 | static int acpi_video_get_brightness(struct backlight_device *bd) | 251 | static int acpi_video_get_brightness(struct backlight_device *bd) |
234 | { | 252 | { |
@@ -830,9 +848,9 @@ acpi_video_init_brightness(struct acpi_video_device *device) | |||
830 | * or an index). Set the backlight to max_level in this case. | 848 | * or an index). Set the backlight to max_level in this case. |
831 | */ | 849 | */ |
832 | for (i = 2; i < br->count; i++) | 850 | for (i = 2; i < br->count; i++) |
833 | if (level_old == br->levels[i]) | 851 | if (level == br->levels[i]) |
834 | break; | 852 | break; |
835 | if (i == br->count) | 853 | if (i == br->count || !level) |
836 | level = max_level; | 854 | level = max_level; |
837 | } | 855 | } |
838 | 856 | ||
@@ -884,79 +902,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
884 | 902 | ||
885 | if (acpi_has_method(device->dev->handle, "_DDC")) | 903 | if (acpi_has_method(device->dev->handle, "_DDC")) |
886 | device->cap._DDC = 1; | 904 | device->cap._DDC = 1; |
887 | |||
888 | if (acpi_video_backlight_support()) { | ||
889 | struct backlight_properties props; | ||
890 | struct pci_dev *pdev; | ||
891 | acpi_handle acpi_parent; | ||
892 | struct device *parent = NULL; | ||
893 | int result; | ||
894 | static int count; | ||
895 | char *name; | ||
896 | |||
897 | result = acpi_video_init_brightness(device); | ||
898 | if (result) | ||
899 | return; | ||
900 | name = kasprintf(GFP_KERNEL, "acpi_video%d", count); | ||
901 | if (!name) | ||
902 | return; | ||
903 | count++; | ||
904 | |||
905 | acpi_get_parent(device->dev->handle, &acpi_parent); | ||
906 | |||
907 | pdev = acpi_get_pci_dev(acpi_parent); | ||
908 | if (pdev) { | ||
909 | parent = &pdev->dev; | ||
910 | pci_dev_put(pdev); | ||
911 | } | ||
912 | |||
913 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
914 | props.type = BACKLIGHT_FIRMWARE; | ||
915 | props.max_brightness = device->brightness->count - 3; | ||
916 | device->backlight = backlight_device_register(name, | ||
917 | parent, | ||
918 | device, | ||
919 | &acpi_backlight_ops, | ||
920 | &props); | ||
921 | kfree(name); | ||
922 | if (IS_ERR(device->backlight)) | ||
923 | return; | ||
924 | |||
925 | /* | ||
926 | * Save current brightness level in case we have to restore it | ||
927 | * before acpi_video_device_lcd_set_level() is called next time. | ||
928 | */ | ||
929 | device->backlight->props.brightness = | ||
930 | acpi_video_get_brightness(device->backlight); | ||
931 | |||
932 | device->cooling_dev = thermal_cooling_device_register("LCD", | ||
933 | device->dev, &video_cooling_ops); | ||
934 | if (IS_ERR(device->cooling_dev)) { | ||
935 | /* | ||
936 | * Set cooling_dev to NULL so we don't crash trying to | ||
937 | * free it. | ||
938 | * Also, why the hell we are returning early and | ||
939 | * not attempt to register video output if cooling | ||
940 | * device registration failed? | ||
941 | * -- dtor | ||
942 | */ | ||
943 | device->cooling_dev = NULL; | ||
944 | return; | ||
945 | } | ||
946 | |||
947 | dev_info(&device->dev->dev, "registered as cooling_device%d\n", | ||
948 | device->cooling_dev->id); | ||
949 | result = sysfs_create_link(&device->dev->dev.kobj, | ||
950 | &device->cooling_dev->device.kobj, | ||
951 | "thermal_cooling"); | ||
952 | if (result) | ||
953 | printk(KERN_ERR PREFIX "Create sysfs link\n"); | ||
954 | result = sysfs_create_link(&device->cooling_dev->device.kobj, | ||
955 | &device->dev->dev.kobj, "device"); | ||
956 | if (result) | ||
957 | printk(KERN_ERR PREFIX "Create sysfs link\n"); | ||
958 | |||
959 | } | ||
960 | } | 905 | } |
961 | 906 | ||
962 | /* | 907 | /* |
@@ -1143,13 +1088,6 @@ acpi_video_bus_get_one_device(struct acpi_device *device, | |||
1143 | acpi_video_device_bind(video, data); | 1088 | acpi_video_device_bind(video, data); |
1144 | acpi_video_device_find_cap(data); | 1089 | acpi_video_device_find_cap(data); |
1145 | 1090 | ||
1146 | status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, | ||
1147 | acpi_video_device_notify, data); | ||
1148 | if (ACPI_FAILURE(status)) | ||
1149 | dev_err(&device->dev, "Error installing notify handler\n"); | ||
1150 | else | ||
1151 | data->flags.notify = 1; | ||
1152 | |||
1153 | mutex_lock(&video->device_list_lock); | 1091 | mutex_lock(&video->device_list_lock); |
1154 | list_add_tail(&data->entry, &video->video_device_list); | 1092 | list_add_tail(&data->entry, &video->video_device_list); |
1155 | mutex_unlock(&video->device_list_lock); | 1093 | mutex_unlock(&video->device_list_lock); |
@@ -1333,8 +1271,8 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event) | |||
1333 | unsigned long long level_current, level_next; | 1271 | unsigned long long level_current, level_next; |
1334 | int result = -EINVAL; | 1272 | int result = -EINVAL; |
1335 | 1273 | ||
1336 | /* no warning message if acpi_backlight=vendor is used */ | 1274 | /* no warning message if acpi_backlight=vendor or a quirk is used */ |
1337 | if (!acpi_video_backlight_support()) | 1275 | if (!acpi_video_verify_backlight_support()) |
1338 | return 0; | 1276 | return 0; |
1339 | 1277 | ||
1340 | if (!device->brightness) | 1278 | if (!device->brightness) |
@@ -1454,64 +1392,6 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video, | |||
1454 | return status; | 1392 | return status; |
1455 | } | 1393 | } |
1456 | 1394 | ||
1457 | static int acpi_video_bus_put_one_device(struct acpi_video_device *device) | ||
1458 | { | ||
1459 | acpi_status status; | ||
1460 | |||
1461 | if (!device || !device->video) | ||
1462 | return -ENOENT; | ||
1463 | |||
1464 | if (device->flags.notify) { | ||
1465 | status = acpi_remove_notify_handler(device->dev->handle, | ||
1466 | ACPI_DEVICE_NOTIFY, acpi_video_device_notify); | ||
1467 | if (ACPI_FAILURE(status)) | ||
1468 | dev_err(&device->dev->dev, | ||
1469 | "Can't remove video notify handler\n"); | ||
1470 | } | ||
1471 | |||
1472 | if (device->backlight) { | ||
1473 | backlight_device_unregister(device->backlight); | ||
1474 | device->backlight = NULL; | ||
1475 | } | ||
1476 | if (device->cooling_dev) { | ||
1477 | sysfs_remove_link(&device->dev->dev.kobj, | ||
1478 | "thermal_cooling"); | ||
1479 | sysfs_remove_link(&device->cooling_dev->device.kobj, | ||
1480 | "device"); | ||
1481 | thermal_cooling_device_unregister(device->cooling_dev); | ||
1482 | device->cooling_dev = NULL; | ||
1483 | } | ||
1484 | |||
1485 | return 0; | ||
1486 | } | ||
1487 | |||
1488 | static int acpi_video_bus_put_devices(struct acpi_video_bus *video) | ||
1489 | { | ||
1490 | int status; | ||
1491 | struct acpi_video_device *dev, *next; | ||
1492 | |||
1493 | mutex_lock(&video->device_list_lock); | ||
1494 | |||
1495 | list_for_each_entry_safe(dev, next, &video->video_device_list, entry) { | ||
1496 | |||
1497 | status = acpi_video_bus_put_one_device(dev); | ||
1498 | if (ACPI_FAILURE(status)) | ||
1499 | printk(KERN_WARNING PREFIX | ||
1500 | "hhuuhhuu bug in acpi video driver.\n"); | ||
1501 | |||
1502 | if (dev->brightness) { | ||
1503 | kfree(dev->brightness->levels); | ||
1504 | kfree(dev->brightness); | ||
1505 | } | ||
1506 | list_del(&dev->entry); | ||
1507 | kfree(dev); | ||
1508 | } | ||
1509 | |||
1510 | mutex_unlock(&video->device_list_lock); | ||
1511 | |||
1512 | return 0; | ||
1513 | } | ||
1514 | |||
1515 | /* acpi_video interface */ | 1395 | /* acpi_video interface */ |
1516 | 1396 | ||
1517 | /* | 1397 | /* |
@@ -1521,13 +1401,13 @@ static int acpi_video_bus_put_devices(struct acpi_video_bus *video) | |||
1521 | static int acpi_video_bus_start_devices(struct acpi_video_bus *video) | 1401 | static int acpi_video_bus_start_devices(struct acpi_video_bus *video) |
1522 | { | 1402 | { |
1523 | return acpi_video_bus_DOS(video, 0, | 1403 | return acpi_video_bus_DOS(video, 0, |
1524 | acpi_video_backlight_quirks() ? 1 : 0); | 1404 | acpi_osi_is_win8() ? 1 : 0); |
1525 | } | 1405 | } |
1526 | 1406 | ||
1527 | static int acpi_video_bus_stop_devices(struct acpi_video_bus *video) | 1407 | static int acpi_video_bus_stop_devices(struct acpi_video_bus *video) |
1528 | { | 1408 | { |
1529 | return acpi_video_bus_DOS(video, 0, | 1409 | return acpi_video_bus_DOS(video, 0, |
1530 | acpi_video_backlight_quirks() ? 0 : 1); | 1410 | acpi_osi_is_win8() ? 0 : 1); |
1531 | } | 1411 | } |
1532 | 1412 | ||
1533 | static void acpi_video_bus_notify(struct acpi_device *device, u32 event) | 1413 | static void acpi_video_bus_notify(struct acpi_device *device, u32 event) |
@@ -1536,7 +1416,7 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event) | |||
1536 | struct input_dev *input; | 1416 | struct input_dev *input; |
1537 | int keycode = 0; | 1417 | int keycode = 0; |
1538 | 1418 | ||
1539 | if (!video) | 1419 | if (!video || !video->input) |
1540 | return; | 1420 | return; |
1541 | 1421 | ||
1542 | input = video->input; | 1422 | input = video->input; |
@@ -1691,12 +1571,236 @@ acpi_video_bus_match(acpi_handle handle, u32 level, void *context, | |||
1691 | return AE_OK; | 1571 | return AE_OK; |
1692 | } | 1572 | } |
1693 | 1573 | ||
1574 | static void acpi_video_dev_register_backlight(struct acpi_video_device *device) | ||
1575 | { | ||
1576 | if (acpi_video_verify_backlight_support()) { | ||
1577 | struct backlight_properties props; | ||
1578 | struct pci_dev *pdev; | ||
1579 | acpi_handle acpi_parent; | ||
1580 | struct device *parent = NULL; | ||
1581 | int result; | ||
1582 | static int count; | ||
1583 | char *name; | ||
1584 | |||
1585 | result = acpi_video_init_brightness(device); | ||
1586 | if (result) | ||
1587 | return; | ||
1588 | name = kasprintf(GFP_KERNEL, "acpi_video%d", count); | ||
1589 | if (!name) | ||
1590 | return; | ||
1591 | count++; | ||
1592 | |||
1593 | acpi_get_parent(device->dev->handle, &acpi_parent); | ||
1594 | |||
1595 | pdev = acpi_get_pci_dev(acpi_parent); | ||
1596 | if (pdev) { | ||
1597 | parent = &pdev->dev; | ||
1598 | pci_dev_put(pdev); | ||
1599 | } | ||
1600 | |||
1601 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
1602 | props.type = BACKLIGHT_FIRMWARE; | ||
1603 | props.max_brightness = device->brightness->count - 3; | ||
1604 | device->backlight = backlight_device_register(name, | ||
1605 | parent, | ||
1606 | device, | ||
1607 | &acpi_backlight_ops, | ||
1608 | &props); | ||
1609 | kfree(name); | ||
1610 | if (IS_ERR(device->backlight)) | ||
1611 | return; | ||
1612 | |||
1613 | /* | ||
1614 | * Save current brightness level in case we have to restore it | ||
1615 | * before acpi_video_device_lcd_set_level() is called next time. | ||
1616 | */ | ||
1617 | device->backlight->props.brightness = | ||
1618 | acpi_video_get_brightness(device->backlight); | ||
1619 | |||
1620 | device->cooling_dev = thermal_cooling_device_register("LCD", | ||
1621 | device->dev, &video_cooling_ops); | ||
1622 | if (IS_ERR(device->cooling_dev)) { | ||
1623 | /* | ||
1624 | * Set cooling_dev to NULL so we don't crash trying to | ||
1625 | * free it. | ||
1626 | * Also, why the hell we are returning early and | ||
1627 | * not attempt to register video output if cooling | ||
1628 | * device registration failed? | ||
1629 | * -- dtor | ||
1630 | */ | ||
1631 | device->cooling_dev = NULL; | ||
1632 | return; | ||
1633 | } | ||
1634 | |||
1635 | dev_info(&device->dev->dev, "registered as cooling_device%d\n", | ||
1636 | device->cooling_dev->id); | ||
1637 | result = sysfs_create_link(&device->dev->dev.kobj, | ||
1638 | &device->cooling_dev->device.kobj, | ||
1639 | "thermal_cooling"); | ||
1640 | if (result) | ||
1641 | printk(KERN_ERR PREFIX "Create sysfs link\n"); | ||
1642 | result = sysfs_create_link(&device->cooling_dev->device.kobj, | ||
1643 | &device->dev->dev.kobj, "device"); | ||
1644 | if (result) | ||
1645 | printk(KERN_ERR PREFIX "Create sysfs link\n"); | ||
1646 | } | ||
1647 | } | ||
1648 | |||
1649 | static int acpi_video_bus_register_backlight(struct acpi_video_bus *video) | ||
1650 | { | ||
1651 | struct acpi_video_device *dev; | ||
1652 | |||
1653 | mutex_lock(&video->device_list_lock); | ||
1654 | list_for_each_entry(dev, &video->video_device_list, entry) | ||
1655 | acpi_video_dev_register_backlight(dev); | ||
1656 | mutex_unlock(&video->device_list_lock); | ||
1657 | |||
1658 | video->pm_nb.notifier_call = acpi_video_resume; | ||
1659 | video->pm_nb.priority = 0; | ||
1660 | return register_pm_notifier(&video->pm_nb); | ||
1661 | } | ||
1662 | |||
1663 | static void acpi_video_dev_unregister_backlight(struct acpi_video_device *device) | ||
1664 | { | ||
1665 | if (device->backlight) { | ||
1666 | backlight_device_unregister(device->backlight); | ||
1667 | device->backlight = NULL; | ||
1668 | } | ||
1669 | if (device->brightness) { | ||
1670 | kfree(device->brightness->levels); | ||
1671 | kfree(device->brightness); | ||
1672 | device->brightness = NULL; | ||
1673 | } | ||
1674 | if (device->cooling_dev) { | ||
1675 | sysfs_remove_link(&device->dev->dev.kobj, "thermal_cooling"); | ||
1676 | sysfs_remove_link(&device->cooling_dev->device.kobj, "device"); | ||
1677 | thermal_cooling_device_unregister(device->cooling_dev); | ||
1678 | device->cooling_dev = NULL; | ||
1679 | } | ||
1680 | } | ||
1681 | |||
1682 | static int acpi_video_bus_unregister_backlight(struct acpi_video_bus *video) | ||
1683 | { | ||
1684 | struct acpi_video_device *dev; | ||
1685 | int error = unregister_pm_notifier(&video->pm_nb); | ||
1686 | |||
1687 | mutex_lock(&video->device_list_lock); | ||
1688 | list_for_each_entry(dev, &video->video_device_list, entry) | ||
1689 | acpi_video_dev_unregister_backlight(dev); | ||
1690 | mutex_unlock(&video->device_list_lock); | ||
1691 | |||
1692 | return error; | ||
1693 | } | ||
1694 | |||
1695 | static void acpi_video_dev_add_notify_handler(struct acpi_video_device *device) | ||
1696 | { | ||
1697 | acpi_status status; | ||
1698 | struct acpi_device *adev = device->dev; | ||
1699 | |||
1700 | status = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY, | ||
1701 | acpi_video_device_notify, device); | ||
1702 | if (ACPI_FAILURE(status)) | ||
1703 | dev_err(&adev->dev, "Error installing notify handler\n"); | ||
1704 | else | ||
1705 | device->flags.notify = 1; | ||
1706 | } | ||
1707 | |||
1708 | static int acpi_video_bus_add_notify_handler(struct acpi_video_bus *video) | ||
1709 | { | ||
1710 | struct input_dev *input; | ||
1711 | struct acpi_video_device *dev; | ||
1712 | int error; | ||
1713 | |||
1714 | video->input = input = input_allocate_device(); | ||
1715 | if (!input) { | ||
1716 | error = -ENOMEM; | ||
1717 | goto out; | ||
1718 | } | ||
1719 | |||
1720 | error = acpi_video_bus_start_devices(video); | ||
1721 | if (error) | ||
1722 | goto err_free_input; | ||
1723 | |||
1724 | snprintf(video->phys, sizeof(video->phys), | ||
1725 | "%s/video/input0", acpi_device_hid(video->device)); | ||
1726 | |||
1727 | input->name = acpi_device_name(video->device); | ||
1728 | input->phys = video->phys; | ||
1729 | input->id.bustype = BUS_HOST; | ||
1730 | input->id.product = 0x06; | ||
1731 | input->dev.parent = &video->device->dev; | ||
1732 | input->evbit[0] = BIT(EV_KEY); | ||
1733 | set_bit(KEY_SWITCHVIDEOMODE, input->keybit); | ||
1734 | set_bit(KEY_VIDEO_NEXT, input->keybit); | ||
1735 | set_bit(KEY_VIDEO_PREV, input->keybit); | ||
1736 | set_bit(KEY_BRIGHTNESS_CYCLE, input->keybit); | ||
1737 | set_bit(KEY_BRIGHTNESSUP, input->keybit); | ||
1738 | set_bit(KEY_BRIGHTNESSDOWN, input->keybit); | ||
1739 | set_bit(KEY_BRIGHTNESS_ZERO, input->keybit); | ||
1740 | set_bit(KEY_DISPLAY_OFF, input->keybit); | ||
1741 | |||
1742 | error = input_register_device(input); | ||
1743 | if (error) | ||
1744 | goto err_stop_dev; | ||
1745 | |||
1746 | mutex_lock(&video->device_list_lock); | ||
1747 | list_for_each_entry(dev, &video->video_device_list, entry) | ||
1748 | acpi_video_dev_add_notify_handler(dev); | ||
1749 | mutex_unlock(&video->device_list_lock); | ||
1750 | |||
1751 | return 0; | ||
1752 | |||
1753 | err_stop_dev: | ||
1754 | acpi_video_bus_stop_devices(video); | ||
1755 | err_free_input: | ||
1756 | input_free_device(input); | ||
1757 | video->input = NULL; | ||
1758 | out: | ||
1759 | return error; | ||
1760 | } | ||
1761 | |||
1762 | static void acpi_video_dev_remove_notify_handler(struct acpi_video_device *dev) | ||
1763 | { | ||
1764 | if (dev->flags.notify) { | ||
1765 | acpi_remove_notify_handler(dev->dev->handle, ACPI_DEVICE_NOTIFY, | ||
1766 | acpi_video_device_notify); | ||
1767 | dev->flags.notify = 0; | ||
1768 | } | ||
1769 | } | ||
1770 | |||
1771 | static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video) | ||
1772 | { | ||
1773 | struct acpi_video_device *dev; | ||
1774 | |||
1775 | mutex_lock(&video->device_list_lock); | ||
1776 | list_for_each_entry(dev, &video->video_device_list, entry) | ||
1777 | acpi_video_dev_remove_notify_handler(dev); | ||
1778 | mutex_unlock(&video->device_list_lock); | ||
1779 | |||
1780 | acpi_video_bus_stop_devices(video); | ||
1781 | input_unregister_device(video->input); | ||
1782 | video->input = NULL; | ||
1783 | } | ||
1784 | |||
1785 | static int acpi_video_bus_put_devices(struct acpi_video_bus *video) | ||
1786 | { | ||
1787 | struct acpi_video_device *dev, *next; | ||
1788 | |||
1789 | mutex_lock(&video->device_list_lock); | ||
1790 | list_for_each_entry_safe(dev, next, &video->video_device_list, entry) { | ||
1791 | list_del(&dev->entry); | ||
1792 | kfree(dev); | ||
1793 | } | ||
1794 | mutex_unlock(&video->device_list_lock); | ||
1795 | |||
1796 | return 0; | ||
1797 | } | ||
1798 | |||
1694 | static int instance; | 1799 | static int instance; |
1695 | 1800 | ||
1696 | static int acpi_video_bus_add(struct acpi_device *device) | 1801 | static int acpi_video_bus_add(struct acpi_device *device) |
1697 | { | 1802 | { |
1698 | struct acpi_video_bus *video; | 1803 | struct acpi_video_bus *video; |
1699 | struct input_dev *input; | ||
1700 | int error; | 1804 | int error; |
1701 | acpi_status status; | 1805 | acpi_status status; |
1702 | 1806 | ||
@@ -1748,62 +1852,24 @@ static int acpi_video_bus_add(struct acpi_device *device) | |||
1748 | if (error) | 1852 | if (error) |
1749 | goto err_put_video; | 1853 | goto err_put_video; |
1750 | 1854 | ||
1751 | video->input = input = input_allocate_device(); | ||
1752 | if (!input) { | ||
1753 | error = -ENOMEM; | ||
1754 | goto err_put_video; | ||
1755 | } | ||
1756 | |||
1757 | error = acpi_video_bus_start_devices(video); | ||
1758 | if (error) | ||
1759 | goto err_free_input_dev; | ||
1760 | |||
1761 | snprintf(video->phys, sizeof(video->phys), | ||
1762 | "%s/video/input0", acpi_device_hid(video->device)); | ||
1763 | |||
1764 | input->name = acpi_device_name(video->device); | ||
1765 | input->phys = video->phys; | ||
1766 | input->id.bustype = BUS_HOST; | ||
1767 | input->id.product = 0x06; | ||
1768 | input->dev.parent = &device->dev; | ||
1769 | input->evbit[0] = BIT(EV_KEY); | ||
1770 | set_bit(KEY_SWITCHVIDEOMODE, input->keybit); | ||
1771 | set_bit(KEY_VIDEO_NEXT, input->keybit); | ||
1772 | set_bit(KEY_VIDEO_PREV, input->keybit); | ||
1773 | set_bit(KEY_BRIGHTNESS_CYCLE, input->keybit); | ||
1774 | set_bit(KEY_BRIGHTNESSUP, input->keybit); | ||
1775 | set_bit(KEY_BRIGHTNESSDOWN, input->keybit); | ||
1776 | set_bit(KEY_BRIGHTNESS_ZERO, input->keybit); | ||
1777 | set_bit(KEY_DISPLAY_OFF, input->keybit); | ||
1778 | |||
1779 | printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n", | 1855 | printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n", |
1780 | ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device), | 1856 | ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device), |
1781 | video->flags.multihead ? "yes" : "no", | 1857 | video->flags.multihead ? "yes" : "no", |
1782 | video->flags.rom ? "yes" : "no", | 1858 | video->flags.rom ? "yes" : "no", |
1783 | video->flags.post ? "yes" : "no"); | 1859 | video->flags.post ? "yes" : "no"); |
1860 | mutex_lock(&video_list_lock); | ||
1861 | list_add_tail(&video->entry, &video_bus_head); | ||
1862 | mutex_unlock(&video_list_lock); | ||
1784 | 1863 | ||
1785 | video->pm_nb.notifier_call = acpi_video_resume; | 1864 | acpi_video_bus_register_backlight(video); |
1786 | video->pm_nb.priority = 0; | 1865 | acpi_video_bus_add_notify_handler(video); |
1787 | error = register_pm_notifier(&video->pm_nb); | ||
1788 | if (error) | ||
1789 | goto err_stop_video; | ||
1790 | |||
1791 | error = input_register_device(input); | ||
1792 | if (error) | ||
1793 | goto err_unregister_pm_notifier; | ||
1794 | 1866 | ||
1795 | return 0; | 1867 | return 0; |
1796 | 1868 | ||
1797 | err_unregister_pm_notifier: | 1869 | err_put_video: |
1798 | unregister_pm_notifier(&video->pm_nb); | ||
1799 | err_stop_video: | ||
1800 | acpi_video_bus_stop_devices(video); | ||
1801 | err_free_input_dev: | ||
1802 | input_free_device(input); | ||
1803 | err_put_video: | ||
1804 | acpi_video_bus_put_devices(video); | 1870 | acpi_video_bus_put_devices(video); |
1805 | kfree(video->attached_array); | 1871 | kfree(video->attached_array); |
1806 | err_free_video: | 1872 | err_free_video: |
1807 | kfree(video); | 1873 | kfree(video); |
1808 | device->driver_data = NULL; | 1874 | device->driver_data = NULL; |
1809 | 1875 | ||
@@ -1820,12 +1886,14 @@ static int acpi_video_bus_remove(struct acpi_device *device) | |||
1820 | 1886 | ||
1821 | video = acpi_driver_data(device); | 1887 | video = acpi_driver_data(device); |
1822 | 1888 | ||
1823 | unregister_pm_notifier(&video->pm_nb); | 1889 | acpi_video_bus_remove_notify_handler(video); |
1824 | 1890 | acpi_video_bus_unregister_backlight(video); | |
1825 | acpi_video_bus_stop_devices(video); | ||
1826 | acpi_video_bus_put_devices(video); | 1891 | acpi_video_bus_put_devices(video); |
1827 | 1892 | ||
1828 | input_unregister_device(video->input); | 1893 | mutex_lock(&video_list_lock); |
1894 | list_del(&video->entry); | ||
1895 | mutex_unlock(&video_list_lock); | ||
1896 | |||
1829 | kfree(video->attached_array); | 1897 | kfree(video->attached_array); |
1830 | kfree(video); | 1898 | kfree(video); |
1831 | 1899 | ||
@@ -1874,6 +1942,9 @@ int acpi_video_register(void) | |||
1874 | return 0; | 1942 | return 0; |
1875 | } | 1943 | } |
1876 | 1944 | ||
1945 | mutex_init(&video_list_lock); | ||
1946 | INIT_LIST_HEAD(&video_bus_head); | ||
1947 | |||
1877 | result = acpi_bus_register_driver(&acpi_video_bus); | 1948 | result = acpi_bus_register_driver(&acpi_video_bus); |
1878 | if (result < 0) | 1949 | if (result < 0) |
1879 | return -ENODEV; | 1950 | return -ENODEV; |
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 940edbf2fe8f..84875fd4c74f 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c | |||
@@ -168,6 +168,14 @@ static struct dmi_system_id video_detect_dmi_table[] = { | |||
168 | DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), | 168 | DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), |
169 | }, | 169 | }, |
170 | }, | 170 | }, |
171 | { | ||
172 | .callback = video_detect_force_vendor, | ||
173 | .ident = "Lenovo Yoga 13", | ||
174 | .matches = { | ||
175 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
176 | DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"), | ||
177 | }, | ||
178 | }, | ||
171 | { }, | 179 | { }, |
172 | }; | 180 | }; |
173 | 181 | ||
@@ -233,11 +241,11 @@ static void acpi_video_caps_check(void) | |||
233 | acpi_video_get_capabilities(NULL); | 241 | acpi_video_get_capabilities(NULL); |
234 | } | 242 | } |
235 | 243 | ||
236 | bool acpi_video_backlight_quirks(void) | 244 | bool acpi_osi_is_win8(void) |
237 | { | 245 | { |
238 | return acpi_gbl_osi_data >= ACPI_OSI_WIN_8; | 246 | return acpi_gbl_osi_data >= ACPI_OSI_WIN_8; |
239 | } | 247 | } |
240 | EXPORT_SYMBOL(acpi_video_backlight_quirks); | 248 | EXPORT_SYMBOL(acpi_osi_is_win8); |
241 | 249 | ||
242 | /* Promote the vendor interface instead of the generic video module. | 250 | /* Promote the vendor interface instead of the generic video module. |
243 | * This function allow DMI blacklists to be implemented by externals | 251 | * This function allow DMI blacklists to be implemented by externals |
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 848ebbd25717..f48370dfc908 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -44,13 +44,11 @@ static int __ref cpu_subsys_online(struct device *dev) | |||
44 | struct cpu *cpu = container_of(dev, struct cpu, dev); | 44 | struct cpu *cpu = container_of(dev, struct cpu, dev); |
45 | int cpuid = dev->id; | 45 | int cpuid = dev->id; |
46 | int from_nid, to_nid; | 46 | int from_nid, to_nid; |
47 | int ret = -ENODEV; | 47 | int ret; |
48 | |||
49 | cpu_hotplug_driver_lock(); | ||
50 | 48 | ||
51 | from_nid = cpu_to_node(cpuid); | 49 | from_nid = cpu_to_node(cpuid); |
52 | if (from_nid == NUMA_NO_NODE) | 50 | if (from_nid == NUMA_NO_NODE) |
53 | goto out; | 51 | return -ENODEV; |
54 | 52 | ||
55 | ret = cpu_up(cpuid); | 53 | ret = cpu_up(cpuid); |
56 | /* | 54 | /* |
@@ -61,19 +59,12 @@ static int __ref cpu_subsys_online(struct device *dev) | |||
61 | if (from_nid != to_nid) | 59 | if (from_nid != to_nid) |
62 | change_cpu_under_node(cpu, from_nid, to_nid); | 60 | change_cpu_under_node(cpu, from_nid, to_nid); |
63 | 61 | ||
64 | out: | ||
65 | cpu_hotplug_driver_unlock(); | ||
66 | return ret; | 62 | return ret; |
67 | } | 63 | } |
68 | 64 | ||
69 | static int cpu_subsys_offline(struct device *dev) | 65 | static int cpu_subsys_offline(struct device *dev) |
70 | { | 66 | { |
71 | int ret; | 67 | return cpu_down(dev->id); |
72 | |||
73 | cpu_hotplug_driver_lock(); | ||
74 | ret = cpu_down(dev->id); | ||
75 | cpu_hotplug_driver_unlock(); | ||
76 | return ret; | ||
77 | } | 68 | } |
78 | 69 | ||
79 | void unregister_cpu(struct cpu *cpu) | 70 | void unregister_cpu(struct cpu *cpu) |
@@ -93,7 +84,17 @@ static ssize_t cpu_probe_store(struct device *dev, | |||
93 | const char *buf, | 84 | const char *buf, |
94 | size_t count) | 85 | size_t count) |
95 | { | 86 | { |
96 | return arch_cpu_probe(buf, count); | 87 | ssize_t cnt; |
88 | int ret; | ||
89 | |||
90 | ret = lock_device_hotplug_sysfs(); | ||
91 | if (ret) | ||
92 | return ret; | ||
93 | |||
94 | cnt = arch_cpu_probe(buf, count); | ||
95 | |||
96 | unlock_device_hotplug(); | ||
97 | return cnt; | ||
97 | } | 98 | } |
98 | 99 | ||
99 | static ssize_t cpu_release_store(struct device *dev, | 100 | static ssize_t cpu_release_store(struct device *dev, |
@@ -101,7 +102,17 @@ static ssize_t cpu_release_store(struct device *dev, | |||
101 | const char *buf, | 102 | const char *buf, |
102 | size_t count) | 103 | size_t count) |
103 | { | 104 | { |
104 | return arch_cpu_release(buf, count); | 105 | ssize_t cnt; |
106 | int ret; | ||
107 | |||
108 | ret = lock_device_hotplug_sysfs(); | ||
109 | if (ret) | ||
110 | return ret; | ||
111 | |||
112 | cnt = arch_cpu_release(buf, count); | ||
113 | |||
114 | unlock_device_hotplug(); | ||
115 | return cnt; | ||
105 | } | 116 | } |
106 | 117 | ||
107 | static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); | 118 | static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); |
diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 35fa36898916..06051767393f 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c | |||
@@ -499,7 +499,7 @@ static void __device_release_driver(struct device *dev) | |||
499 | BUS_NOTIFY_UNBIND_DRIVER, | 499 | BUS_NOTIFY_UNBIND_DRIVER, |
500 | dev); | 500 | dev); |
501 | 501 | ||
502 | pm_runtime_put(dev); | 502 | pm_runtime_put_sync(dev); |
503 | 503 | ||
504 | if (dev->bus && dev->bus->remove) | 504 | if (dev->bus && dev->bus->remove) |
505 | dev->bus->remove(dev); | 505 | dev->bus->remove(dev); |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 9f098a82cf04..ee039afe9078 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include <linux/suspend.h> | 30 | #include <linux/suspend.h> |
31 | #include <trace/events/power.h> | 31 | #include <trace/events/power.h> |
32 | #include <linux/cpuidle.h> | 32 | #include <linux/cpuidle.h> |
33 | #include <linux/timer.h> | ||
34 | |||
33 | #include "../base.h" | 35 | #include "../base.h" |
34 | #include "power.h" | 36 | #include "power.h" |
35 | 37 | ||
@@ -390,6 +392,71 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev, | |||
390 | return error; | 392 | return error; |
391 | } | 393 | } |
392 | 394 | ||
395 | #ifdef CONFIG_DPM_WATCHDOG | ||
396 | struct dpm_watchdog { | ||
397 | struct device *dev; | ||
398 | struct task_struct *tsk; | ||
399 | struct timer_list timer; | ||
400 | }; | ||
401 | |||
402 | #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ | ||
403 | struct dpm_watchdog wd | ||
404 | |||
405 | /** | ||
406 | * dpm_watchdog_handler - Driver suspend / resume watchdog handler. | ||
407 | * @data: Watchdog object address. | ||
408 | * | ||
409 | * Called when a driver has timed out suspending or resuming. | ||
410 | * There's not much we can do here to recover so panic() to | ||
411 | * capture a crash-dump in pstore. | ||
412 | */ | ||
413 | static void dpm_watchdog_handler(unsigned long data) | ||
414 | { | ||
415 | struct dpm_watchdog *wd = (void *)data; | ||
416 | |||
417 | dev_emerg(wd->dev, "**** DPM device timeout ****\n"); | ||
418 | show_stack(wd->tsk, NULL); | ||
419 | panic("%s %s: unrecoverable failure\n", | ||
420 | dev_driver_string(wd->dev), dev_name(wd->dev)); | ||
421 | } | ||
422 | |||
423 | /** | ||
424 | * dpm_watchdog_set - Enable pm watchdog for given device. | ||
425 | * @wd: Watchdog. Must be allocated on the stack. | ||
426 | * @dev: Device to handle. | ||
427 | */ | ||
428 | static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) | ||
429 | { | ||
430 | struct timer_list *timer = &wd->timer; | ||
431 | |||
432 | wd->dev = dev; | ||
433 | wd->tsk = current; | ||
434 | |||
435 | init_timer_on_stack(timer); | ||
436 | /* use same timeout value for both suspend and resume */ | ||
437 | timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT; | ||
438 | timer->function = dpm_watchdog_handler; | ||
439 | timer->data = (unsigned long)wd; | ||
440 | add_timer(timer); | ||
441 | } | ||
442 | |||
443 | /** | ||
444 | * dpm_watchdog_clear - Disable suspend/resume watchdog. | ||
445 | * @wd: Watchdog to disable. | ||
446 | */ | ||
447 | static void dpm_watchdog_clear(struct dpm_watchdog *wd) | ||
448 | { | ||
449 | struct timer_list *timer = &wd->timer; | ||
450 | |||
451 | del_timer_sync(timer); | ||
452 | destroy_timer_on_stack(timer); | ||
453 | } | ||
454 | #else | ||
455 | #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) | ||
456 | #define dpm_watchdog_set(x, y) | ||
457 | #define dpm_watchdog_clear(x) | ||
458 | #endif | ||
459 | |||
393 | /*------------------------- Resume routines -------------------------*/ | 460 | /*------------------------- Resume routines -------------------------*/ |
394 | 461 | ||
395 | /** | 462 | /** |
@@ -576,6 +643,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) | |||
576 | pm_callback_t callback = NULL; | 643 | pm_callback_t callback = NULL; |
577 | char *info = NULL; | 644 | char *info = NULL; |
578 | int error = 0; | 645 | int error = 0; |
646 | DECLARE_DPM_WATCHDOG_ON_STACK(wd); | ||
579 | 647 | ||
580 | TRACE_DEVICE(dev); | 648 | TRACE_DEVICE(dev); |
581 | TRACE_RESUME(0); | 649 | TRACE_RESUME(0); |
@@ -584,6 +652,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) | |||
584 | goto Complete; | 652 | goto Complete; |
585 | 653 | ||
586 | dpm_wait(dev->parent, async); | 654 | dpm_wait(dev->parent, async); |
655 | dpm_watchdog_set(&wd, dev); | ||
587 | device_lock(dev); | 656 | device_lock(dev); |
588 | 657 | ||
589 | /* | 658 | /* |
@@ -642,6 +711,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) | |||
642 | 711 | ||
643 | Unlock: | 712 | Unlock: |
644 | device_unlock(dev); | 713 | device_unlock(dev); |
714 | dpm_watchdog_clear(&wd); | ||
645 | 715 | ||
646 | Complete: | 716 | Complete: |
647 | complete_all(&dev->power.completion); | 717 | complete_all(&dev->power.completion); |
@@ -1060,6 +1130,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
1060 | pm_callback_t callback = NULL; | 1130 | pm_callback_t callback = NULL; |
1061 | char *info = NULL; | 1131 | char *info = NULL; |
1062 | int error = 0; | 1132 | int error = 0; |
1133 | DECLARE_DPM_WATCHDOG_ON_STACK(wd); | ||
1063 | 1134 | ||
1064 | dpm_wait_for_children(dev, async); | 1135 | dpm_wait_for_children(dev, async); |
1065 | 1136 | ||
@@ -1083,6 +1154,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
1083 | if (dev->power.syscore) | 1154 | if (dev->power.syscore) |
1084 | goto Complete; | 1155 | goto Complete; |
1085 | 1156 | ||
1157 | dpm_watchdog_set(&wd, dev); | ||
1086 | device_lock(dev); | 1158 | device_lock(dev); |
1087 | 1159 | ||
1088 | if (dev->pm_domain) { | 1160 | if (dev->pm_domain) { |
@@ -1139,6 +1211,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
1139 | } | 1211 | } |
1140 | 1212 | ||
1141 | device_unlock(dev); | 1213 | device_unlock(dev); |
1214 | dpm_watchdog_clear(&wd); | ||
1142 | 1215 | ||
1143 | Complete: | 1216 | Complete: |
1144 | complete_all(&dev->power.completion); | 1217 | complete_all(&dev->power.completion); |
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index ef89897c6043..fa4187418440 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/list.h> | 21 | #include <linux/list.h> |
22 | #include <linux/rculist.h> | 22 | #include <linux/rculist.h> |
23 | #include <linux/rcupdate.h> | 23 | #include <linux/rcupdate.h> |
24 | #include <linux/opp.h> | 24 | #include <linux/pm_opp.h> |
25 | #include <linux/of.h> | 25 | #include <linux/of.h> |
26 | #include <linux/export.h> | 26 | #include <linux/export.h> |
27 | 27 | ||
@@ -42,7 +42,7 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | /** | 44 | /** |
45 | * struct opp - Generic OPP description structure | 45 | * struct dev_pm_opp - Generic OPP description structure |
46 | * @node: opp list node. The nodes are maintained throughout the lifetime | 46 | * @node: opp list node. The nodes are maintained throughout the lifetime |
47 | * of boot. It is expected only an optimal set of OPPs are | 47 | * of boot. It is expected only an optimal set of OPPs are |
48 | * added to the library by the SoC framework. | 48 | * added to the library by the SoC framework. |
@@ -59,7 +59,7 @@ | |||
59 | * | 59 | * |
60 | * This structure stores the OPP information for a given device. | 60 | * This structure stores the OPP information for a given device. |
61 | */ | 61 | */ |
62 | struct opp { | 62 | struct dev_pm_opp { |
63 | struct list_head node; | 63 | struct list_head node; |
64 | 64 | ||
65 | bool available; | 65 | bool available; |
@@ -136,7 +136,7 @@ static struct device_opp *find_device_opp(struct device *dev) | |||
136 | } | 136 | } |
137 | 137 | ||
138 | /** | 138 | /** |
139 | * opp_get_voltage() - Gets the voltage corresponding to an available opp | 139 | * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp |
140 | * @opp: opp for which voltage has to be returned for | 140 | * @opp: opp for which voltage has to be returned for |
141 | * | 141 | * |
142 | * Return voltage in micro volt corresponding to the opp, else | 142 | * Return voltage in micro volt corresponding to the opp, else |
@@ -150,9 +150,9 @@ static struct device_opp *find_device_opp(struct device *dev) | |||
150 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the | 150 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the |
151 | * pointer. | 151 | * pointer. |
152 | */ | 152 | */ |
153 | unsigned long opp_get_voltage(struct opp *opp) | 153 | unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) |
154 | { | 154 | { |
155 | struct opp *tmp_opp; | 155 | struct dev_pm_opp *tmp_opp; |
156 | unsigned long v = 0; | 156 | unsigned long v = 0; |
157 | 157 | ||
158 | tmp_opp = rcu_dereference(opp); | 158 | tmp_opp = rcu_dereference(opp); |
@@ -163,10 +163,10 @@ unsigned long opp_get_voltage(struct opp *opp) | |||
163 | 163 | ||
164 | return v; | 164 | return v; |
165 | } | 165 | } |
166 | EXPORT_SYMBOL_GPL(opp_get_voltage); | 166 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); |
167 | 167 | ||
168 | /** | 168 | /** |
169 | * opp_get_freq() - Gets the frequency corresponding to an available opp | 169 | * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp |
170 | * @opp: opp for which frequency has to be returned for | 170 | * @opp: opp for which frequency has to be returned for |
171 | * | 171 | * |
172 | * Return frequency in hertz corresponding to the opp, else | 172 | * Return frequency in hertz corresponding to the opp, else |
@@ -180,9 +180,9 @@ EXPORT_SYMBOL_GPL(opp_get_voltage); | |||
180 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the | 180 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the |
181 | * pointer. | 181 | * pointer. |
182 | */ | 182 | */ |
183 | unsigned long opp_get_freq(struct opp *opp) | 183 | unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) |
184 | { | 184 | { |
185 | struct opp *tmp_opp; | 185 | struct dev_pm_opp *tmp_opp; |
186 | unsigned long f = 0; | 186 | unsigned long f = 0; |
187 | 187 | ||
188 | tmp_opp = rcu_dereference(opp); | 188 | tmp_opp = rcu_dereference(opp); |
@@ -193,10 +193,10 @@ unsigned long opp_get_freq(struct opp *opp) | |||
193 | 193 | ||
194 | return f; | 194 | return f; |
195 | } | 195 | } |
196 | EXPORT_SYMBOL_GPL(opp_get_freq); | 196 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); |
197 | 197 | ||
198 | /** | 198 | /** |
199 | * opp_get_opp_count() - Get number of opps available in the opp list | 199 | * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list |
200 | * @dev: device for which we do this operation | 200 | * @dev: device for which we do this operation |
201 | * | 201 | * |
202 | * This function returns the number of available opps if there are any, | 202 | * This function returns the number of available opps if there are any, |
@@ -206,10 +206,10 @@ EXPORT_SYMBOL_GPL(opp_get_freq); | |||
206 | * internally references two RCU protected structures: device_opp and opp which | 206 | * internally references two RCU protected structures: device_opp and opp which |
207 | * are safe as long as we are under a common RCU locked section. | 207 | * are safe as long as we are under a common RCU locked section. |
208 | */ | 208 | */ |
209 | int opp_get_opp_count(struct device *dev) | 209 | int dev_pm_opp_get_opp_count(struct device *dev) |
210 | { | 210 | { |
211 | struct device_opp *dev_opp; | 211 | struct device_opp *dev_opp; |
212 | struct opp *temp_opp; | 212 | struct dev_pm_opp *temp_opp; |
213 | int count = 0; | 213 | int count = 0; |
214 | 214 | ||
215 | dev_opp = find_device_opp(dev); | 215 | dev_opp = find_device_opp(dev); |
@@ -226,10 +226,10 @@ int opp_get_opp_count(struct device *dev) | |||
226 | 226 | ||
227 | return count; | 227 | return count; |
228 | } | 228 | } |
229 | EXPORT_SYMBOL_GPL(opp_get_opp_count); | 229 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); |
230 | 230 | ||
231 | /** | 231 | /** |
232 | * opp_find_freq_exact() - search for an exact frequency | 232 | * dev_pm_opp_find_freq_exact() - search for an exact frequency |
233 | * @dev: device for which we do this operation | 233 | * @dev: device for which we do this operation |
234 | * @freq: frequency to search for | 234 | * @freq: frequency to search for |
235 | * @available: true/false - match for available opp | 235 | * @available: true/false - match for available opp |
@@ -254,11 +254,12 @@ EXPORT_SYMBOL_GPL(opp_get_opp_count); | |||
254 | * under the locked area. The pointer returned must be used prior to unlocking | 254 | * under the locked area. The pointer returned must be used prior to unlocking |
255 | * with rcu_read_unlock() to maintain the integrity of the pointer. | 255 | * with rcu_read_unlock() to maintain the integrity of the pointer. |
256 | */ | 256 | */ |
257 | struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, | 257 | struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, |
258 | bool available) | 258 | unsigned long freq, |
259 | bool available) | ||
259 | { | 260 | { |
260 | struct device_opp *dev_opp; | 261 | struct device_opp *dev_opp; |
261 | struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); | 262 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
262 | 263 | ||
263 | dev_opp = find_device_opp(dev); | 264 | dev_opp = find_device_opp(dev); |
264 | if (IS_ERR(dev_opp)) { | 265 | if (IS_ERR(dev_opp)) { |
@@ -277,10 +278,10 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, | |||
277 | 278 | ||
278 | return opp; | 279 | return opp; |
279 | } | 280 | } |
280 | EXPORT_SYMBOL_GPL(opp_find_freq_exact); | 281 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); |
281 | 282 | ||
282 | /** | 283 | /** |
283 | * opp_find_freq_ceil() - Search for an rounded ceil freq | 284 | * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq |
284 | * @dev: device for which we do this operation | 285 | * @dev: device for which we do this operation |
285 | * @freq: Start frequency | 286 | * @freq: Start frequency |
286 | * | 287 | * |
@@ -300,10 +301,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_exact); | |||
300 | * under the locked area. The pointer returned must be used prior to unlocking | 301 | * under the locked area. The pointer returned must be used prior to unlocking |
301 | * with rcu_read_unlock() to maintain the integrity of the pointer. | 302 | * with rcu_read_unlock() to maintain the integrity of the pointer. |
302 | */ | 303 | */ |
303 | struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | 304 | struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, |
305 | unsigned long *freq) | ||
304 | { | 306 | { |
305 | struct device_opp *dev_opp; | 307 | struct device_opp *dev_opp; |
306 | struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); | 308 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
307 | 309 | ||
308 | if (!dev || !freq) { | 310 | if (!dev || !freq) { |
309 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | 311 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); |
@@ -324,10 +326,10 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | |||
324 | 326 | ||
325 | return opp; | 327 | return opp; |
326 | } | 328 | } |
327 | EXPORT_SYMBOL_GPL(opp_find_freq_ceil); | 329 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); |
328 | 330 | ||
329 | /** | 331 | /** |
330 | * opp_find_freq_floor() - Search for a rounded floor freq | 332 | * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq |
331 | * @dev: device for which we do this operation | 333 | * @dev: device for which we do this operation |
332 | * @freq: Start frequency | 334 | * @freq: Start frequency |
333 | * | 335 | * |
@@ -347,10 +349,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_ceil); | |||
347 | * under the locked area. The pointer returned must be used prior to unlocking | 349 | * under the locked area. The pointer returned must be used prior to unlocking |
348 | * with rcu_read_unlock() to maintain the integrity of the pointer. | 350 | * with rcu_read_unlock() to maintain the integrity of the pointer. |
349 | */ | 351 | */ |
350 | struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) | 352 | struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, |
353 | unsigned long *freq) | ||
351 | { | 354 | { |
352 | struct device_opp *dev_opp; | 355 | struct device_opp *dev_opp; |
353 | struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); | 356 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
354 | 357 | ||
355 | if (!dev || !freq) { | 358 | if (!dev || !freq) { |
356 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | 359 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); |
@@ -375,17 +378,17 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) | |||
375 | 378 | ||
376 | return opp; | 379 | return opp; |
377 | } | 380 | } |
378 | EXPORT_SYMBOL_GPL(opp_find_freq_floor); | 381 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); |
379 | 382 | ||
380 | /** | 383 | /** |
381 | * opp_add() - Add an OPP table from a table definitions | 384 | * dev_pm_opp_add() - Add an OPP table from a table definitions |
382 | * @dev: device for which we do this operation | 385 | * @dev: device for which we do this operation |
383 | * @freq: Frequency in Hz for this OPP | 386 | * @freq: Frequency in Hz for this OPP |
384 | * @u_volt: Voltage in uVolts for this OPP | 387 | * @u_volt: Voltage in uVolts for this OPP |
385 | * | 388 | * |
386 | * This function adds an opp definition to the opp list and returns status. | 389 | * This function adds an opp definition to the opp list and returns status. |
387 | * The opp is made available by default and it can be controlled using | 390 | * The opp is made available by default and it can be controlled using |
388 | * opp_enable/disable functions. | 391 | * dev_pm_opp_enable/disable functions. |
389 | * | 392 | * |
390 | * Locking: The internal device_opp and opp structures are RCU protected. | 393 | * Locking: The internal device_opp and opp structures are RCU protected. |
391 | * Hence this function internally uses RCU updater strategy with mutex locks | 394 | * Hence this function internally uses RCU updater strategy with mutex locks |
@@ -393,14 +396,14 @@ EXPORT_SYMBOL_GPL(opp_find_freq_floor); | |||
393 | * that this function is *NOT* called under RCU protection or in contexts where | 396 | * that this function is *NOT* called under RCU protection or in contexts where |
394 | * mutex cannot be locked. | 397 | * mutex cannot be locked. |
395 | */ | 398 | */ |
396 | int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | 399 | int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) |
397 | { | 400 | { |
398 | struct device_opp *dev_opp = NULL; | 401 | struct device_opp *dev_opp = NULL; |
399 | struct opp *opp, *new_opp; | 402 | struct dev_pm_opp *opp, *new_opp; |
400 | struct list_head *head; | 403 | struct list_head *head; |
401 | 404 | ||
402 | /* allocate new OPP node */ | 405 | /* allocate new OPP node */ |
403 | new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL); | 406 | new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL); |
404 | if (!new_opp) { | 407 | if (!new_opp) { |
405 | dev_warn(dev, "%s: Unable to create new OPP node\n", __func__); | 408 | dev_warn(dev, "%s: Unable to create new OPP node\n", __func__); |
406 | return -ENOMEM; | 409 | return -ENOMEM; |
@@ -460,7 +463,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | |||
460 | srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp); | 463 | srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp); |
461 | return 0; | 464 | return 0; |
462 | } | 465 | } |
463 | EXPORT_SYMBOL_GPL(opp_add); | 466 | EXPORT_SYMBOL_GPL(dev_pm_opp_add); |
464 | 467 | ||
465 | /** | 468 | /** |
466 | * opp_set_availability() - helper to set the availability of an opp | 469 | * opp_set_availability() - helper to set the availability of an opp |
@@ -485,11 +488,11 @@ static int opp_set_availability(struct device *dev, unsigned long freq, | |||
485 | bool availability_req) | 488 | bool availability_req) |
486 | { | 489 | { |
487 | struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); | 490 | struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); |
488 | struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); | 491 | struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); |
489 | int r = 0; | 492 | int r = 0; |
490 | 493 | ||
491 | /* keep the node allocated */ | 494 | /* keep the node allocated */ |
492 | new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL); | 495 | new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL); |
493 | if (!new_opp) { | 496 | if (!new_opp) { |
494 | dev_warn(dev, "%s: Unable to create OPP\n", __func__); | 497 | dev_warn(dev, "%s: Unable to create OPP\n", __func__); |
495 | return -ENOMEM; | 498 | return -ENOMEM; |
@@ -552,13 +555,13 @@ unlock: | |||
552 | } | 555 | } |
553 | 556 | ||
554 | /** | 557 | /** |
555 | * opp_enable() - Enable a specific OPP | 558 | * dev_pm_opp_enable() - Enable a specific OPP |
556 | * @dev: device for which we do this operation | 559 | * @dev: device for which we do this operation |
557 | * @freq: OPP frequency to enable | 560 | * @freq: OPP frequency to enable |
558 | * | 561 | * |
559 | * Enables a provided opp. If the operation is valid, this returns 0, else the | 562 | * Enables a provided opp. If the operation is valid, this returns 0, else the |
560 | * corresponding error value. It is meant to be used for users an OPP available | 563 | * corresponding error value. It is meant to be used for users an OPP available |
561 | * after being temporarily made unavailable with opp_disable. | 564 | * after being temporarily made unavailable with dev_pm_opp_disable. |
562 | * | 565 | * |
563 | * Locking: The internal device_opp and opp structures are RCU protected. | 566 | * Locking: The internal device_opp and opp structures are RCU protected. |
564 | * Hence this function indirectly uses RCU and mutex locks to keep the | 567 | * Hence this function indirectly uses RCU and mutex locks to keep the |
@@ -566,21 +569,21 @@ unlock: | |||
566 | * this function is *NOT* called under RCU protection or in contexts where | 569 | * this function is *NOT* called under RCU protection or in contexts where |
567 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | 570 | * mutex locking or synchronize_rcu() blocking calls cannot be used. |
568 | */ | 571 | */ |
569 | int opp_enable(struct device *dev, unsigned long freq) | 572 | int dev_pm_opp_enable(struct device *dev, unsigned long freq) |
570 | { | 573 | { |
571 | return opp_set_availability(dev, freq, true); | 574 | return opp_set_availability(dev, freq, true); |
572 | } | 575 | } |
573 | EXPORT_SYMBOL_GPL(opp_enable); | 576 | EXPORT_SYMBOL_GPL(dev_pm_opp_enable); |
574 | 577 | ||
575 | /** | 578 | /** |
576 | * opp_disable() - Disable a specific OPP | 579 | * dev_pm_opp_disable() - Disable a specific OPP |
577 | * @dev: device for which we do this operation | 580 | * @dev: device for which we do this operation |
578 | * @freq: OPP frequency to disable | 581 | * @freq: OPP frequency to disable |
579 | * | 582 | * |
580 | * Disables a provided opp. If the operation is valid, this returns | 583 | * Disables a provided opp. If the operation is valid, this returns |
581 | * 0, else the corresponding error value. It is meant to be a temporary | 584 | * 0, else the corresponding error value. It is meant to be a temporary |
582 | * control by users to make this OPP not available until the circumstances are | 585 | * control by users to make this OPP not available until the circumstances are |
583 | * right to make it available again (with a call to opp_enable). | 586 | * right to make it available again (with a call to dev_pm_opp_enable). |
584 | * | 587 | * |
585 | * Locking: The internal device_opp and opp structures are RCU protected. | 588 | * Locking: The internal device_opp and opp structures are RCU protected. |
586 | * Hence this function indirectly uses RCU and mutex locks to keep the | 589 | * Hence this function indirectly uses RCU and mutex locks to keep the |
@@ -588,15 +591,15 @@ EXPORT_SYMBOL_GPL(opp_enable); | |||
588 | * this function is *NOT* called under RCU protection or in contexts where | 591 | * this function is *NOT* called under RCU protection or in contexts where |
589 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | 592 | * mutex locking or synchronize_rcu() blocking calls cannot be used. |
590 | */ | 593 | */ |
591 | int opp_disable(struct device *dev, unsigned long freq) | 594 | int dev_pm_opp_disable(struct device *dev, unsigned long freq) |
592 | { | 595 | { |
593 | return opp_set_availability(dev, freq, false); | 596 | return opp_set_availability(dev, freq, false); |
594 | } | 597 | } |
595 | EXPORT_SYMBOL_GPL(opp_disable); | 598 | EXPORT_SYMBOL_GPL(dev_pm_opp_disable); |
596 | 599 | ||
597 | #ifdef CONFIG_CPU_FREQ | 600 | #ifdef CONFIG_CPU_FREQ |
598 | /** | 601 | /** |
599 | * opp_init_cpufreq_table() - create a cpufreq table for a device | 602 | * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device |
600 | * @dev: device for which we do this operation | 603 | * @dev: device for which we do this operation |
601 | * @table: Cpufreq table returned back to caller | 604 | * @table: Cpufreq table returned back to caller |
602 | * | 605 | * |
@@ -619,11 +622,11 @@ EXPORT_SYMBOL_GPL(opp_disable); | |||
619 | * Callers should ensure that this function is *NOT* called under RCU protection | 622 | * Callers should ensure that this function is *NOT* called under RCU protection |
620 | * or in contexts where mutex locking cannot be used. | 623 | * or in contexts where mutex locking cannot be used. |
621 | */ | 624 | */ |
622 | int opp_init_cpufreq_table(struct device *dev, | 625 | int dev_pm_opp_init_cpufreq_table(struct device *dev, |
623 | struct cpufreq_frequency_table **table) | 626 | struct cpufreq_frequency_table **table) |
624 | { | 627 | { |
625 | struct device_opp *dev_opp; | 628 | struct device_opp *dev_opp; |
626 | struct opp *opp; | 629 | struct dev_pm_opp *opp; |
627 | struct cpufreq_frequency_table *freq_table; | 630 | struct cpufreq_frequency_table *freq_table; |
628 | int i = 0; | 631 | int i = 0; |
629 | 632 | ||
@@ -639,7 +642,7 @@ int opp_init_cpufreq_table(struct device *dev, | |||
639 | } | 642 | } |
640 | 643 | ||
641 | freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * | 644 | freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * |
642 | (opp_get_opp_count(dev) + 1), GFP_KERNEL); | 645 | (dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL); |
643 | if (!freq_table) { | 646 | if (!freq_table) { |
644 | mutex_unlock(&dev_opp_list_lock); | 647 | mutex_unlock(&dev_opp_list_lock); |
645 | dev_warn(dev, "%s: Unable to allocate frequency table\n", | 648 | dev_warn(dev, "%s: Unable to allocate frequency table\n", |
@@ -663,16 +666,16 @@ int opp_init_cpufreq_table(struct device *dev, | |||
663 | 666 | ||
664 | return 0; | 667 | return 0; |
665 | } | 668 | } |
666 | EXPORT_SYMBOL_GPL(opp_init_cpufreq_table); | 669 | EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table); |
667 | 670 | ||
668 | /** | 671 | /** |
669 | * opp_free_cpufreq_table() - free the cpufreq table | 672 | * dev_pm_opp_free_cpufreq_table() - free the cpufreq table |
670 | * @dev: device for which we do this operation | 673 | * @dev: device for which we do this operation |
671 | * @table: table to free | 674 | * @table: table to free |
672 | * | 675 | * |
673 | * Free up the table allocated by opp_init_cpufreq_table | 676 | * Free up the table allocated by dev_pm_opp_init_cpufreq_table |
674 | */ | 677 | */ |
675 | void opp_free_cpufreq_table(struct device *dev, | 678 | void dev_pm_opp_free_cpufreq_table(struct device *dev, |
676 | struct cpufreq_frequency_table **table) | 679 | struct cpufreq_frequency_table **table) |
677 | { | 680 | { |
678 | if (!table) | 681 | if (!table) |
@@ -681,14 +684,14 @@ void opp_free_cpufreq_table(struct device *dev, | |||
681 | kfree(*table); | 684 | kfree(*table); |
682 | *table = NULL; | 685 | *table = NULL; |
683 | } | 686 | } |
684 | EXPORT_SYMBOL_GPL(opp_free_cpufreq_table); | 687 | EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); |
685 | #endif /* CONFIG_CPU_FREQ */ | 688 | #endif /* CONFIG_CPU_FREQ */ |
686 | 689 | ||
687 | /** | 690 | /** |
688 | * opp_get_notifier() - find notifier_head of the device with opp | 691 | * dev_pm_opp_get_notifier() - find notifier_head of the device with opp |
689 | * @dev: device pointer used to lookup device OPPs. | 692 | * @dev: device pointer used to lookup device OPPs. |
690 | */ | 693 | */ |
691 | struct srcu_notifier_head *opp_get_notifier(struct device *dev) | 694 | struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev) |
692 | { | 695 | { |
693 | struct device_opp *dev_opp = find_device_opp(dev); | 696 | struct device_opp *dev_opp = find_device_opp(dev); |
694 | 697 | ||
@@ -732,7 +735,7 @@ int of_init_opp_table(struct device *dev) | |||
732 | unsigned long freq = be32_to_cpup(val++) * 1000; | 735 | unsigned long freq = be32_to_cpup(val++) * 1000; |
733 | unsigned long volt = be32_to_cpup(val++); | 736 | unsigned long volt = be32_to_cpup(val++); |
734 | 737 | ||
735 | if (opp_add(dev, freq, volt)) { | 738 | if (dev_pm_opp_add(dev, freq, volt)) { |
736 | dev_warn(dev, "%s: Failed to add OPP %ld\n", | 739 | dev_warn(dev, "%s: Failed to add OPP %ld\n", |
737 | __func__, freq); | 740 | __func__, freq); |
738 | continue; | 741 | continue; |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 268a35097578..72e00e66ecc5 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -258,7 +258,8 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev) | |||
258 | * Check if the device's runtime PM status allows it to be suspended. If | 258 | * Check if the device's runtime PM status allows it to be suspended. If |
259 | * another idle notification has been started earlier, return immediately. If | 259 | * another idle notification has been started earlier, return immediately. If |
260 | * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise | 260 | * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise |
261 | * run the ->runtime_idle() callback directly. | 261 | * run the ->runtime_idle() callback directly. If the ->runtime_idle callback |
262 | * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag. | ||
262 | * | 263 | * |
263 | * This function must be called under dev->power.lock with interrupts disabled. | 264 | * This function must be called under dev->power.lock with interrupts disabled. |
264 | */ | 265 | */ |
@@ -331,7 +332,7 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
331 | 332 | ||
332 | out: | 333 | out: |
333 | trace_rpm_return_int(dev, _THIS_IP_, retval); | 334 | trace_rpm_return_int(dev, _THIS_IP_, retval); |
334 | return retval ? retval : rpm_suspend(dev, rpmflags); | 335 | return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO); |
335 | } | 336 | } |
336 | 337 | ||
337 | /** | 338 | /** |
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 534fcb825153..38093e272377 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig | |||
@@ -17,15 +17,11 @@ config CPU_FREQ | |||
17 | 17 | ||
18 | if CPU_FREQ | 18 | if CPU_FREQ |
19 | 19 | ||
20 | config CPU_FREQ_TABLE | ||
21 | tristate | ||
22 | |||
23 | config CPU_FREQ_GOV_COMMON | 20 | config CPU_FREQ_GOV_COMMON |
24 | bool | 21 | bool |
25 | 22 | ||
26 | config CPU_FREQ_STAT | 23 | config CPU_FREQ_STAT |
27 | tristate "CPU frequency translation statistics" | 24 | tristate "CPU frequency translation statistics" |
28 | select CPU_FREQ_TABLE | ||
29 | default y | 25 | default y |
30 | help | 26 | help |
31 | This driver exports CPU frequency statistics information through sysfs | 27 | This driver exports CPU frequency statistics information through sysfs |
@@ -143,7 +139,6 @@ config CPU_FREQ_GOV_USERSPACE | |||
143 | 139 | ||
144 | config CPU_FREQ_GOV_ONDEMAND | 140 | config CPU_FREQ_GOV_ONDEMAND |
145 | tristate "'ondemand' cpufreq policy governor" | 141 | tristate "'ondemand' cpufreq policy governor" |
146 | select CPU_FREQ_TABLE | ||
147 | select CPU_FREQ_GOV_COMMON | 142 | select CPU_FREQ_GOV_COMMON |
148 | help | 143 | help |
149 | 'ondemand' - This driver adds a dynamic cpufreq policy governor. | 144 | 'ondemand' - This driver adds a dynamic cpufreq policy governor. |
@@ -187,7 +182,6 @@ config CPU_FREQ_GOV_CONSERVATIVE | |||
187 | config GENERIC_CPUFREQ_CPU0 | 182 | config GENERIC_CPUFREQ_CPU0 |
188 | tristate "Generic CPU0 cpufreq driver" | 183 | tristate "Generic CPU0 cpufreq driver" |
189 | depends on HAVE_CLK && REGULATOR && PM_OPP && OF | 184 | depends on HAVE_CLK && REGULATOR && PM_OPP && OF |
190 | select CPU_FREQ_TABLE | ||
191 | help | 185 | help |
192 | This adds a generic cpufreq driver for CPU0 frequency management. | 186 | This adds a generic cpufreq driver for CPU0 frequency management. |
193 | It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) | 187 | It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) |
@@ -223,7 +217,6 @@ depends on IA64 | |||
223 | 217 | ||
224 | config IA64_ACPI_CPUFREQ | 218 | config IA64_ACPI_CPUFREQ |
225 | tristate "ACPI Processor P-States driver" | 219 | tristate "ACPI Processor P-States driver" |
226 | select CPU_FREQ_TABLE | ||
227 | depends on ACPI_PROCESSOR | 220 | depends on ACPI_PROCESSOR |
228 | help | 221 | help |
229 | This driver adds a CPUFreq driver which utilizes the ACPI | 222 | This driver adds a CPUFreq driver which utilizes the ACPI |
@@ -240,7 +233,6 @@ depends on MIPS | |||
240 | 233 | ||
241 | config LOONGSON2_CPUFREQ | 234 | config LOONGSON2_CPUFREQ |
242 | tristate "Loongson2 CPUFreq Driver" | 235 | tristate "Loongson2 CPUFreq Driver" |
243 | select CPU_FREQ_TABLE | ||
244 | help | 236 | help |
245 | This option adds a CPUFreq driver for loongson processors which | 237 | This option adds a CPUFreq driver for loongson processors which |
246 | support software configurable cpu frequency. | 238 | support software configurable cpu frequency. |
@@ -262,7 +254,6 @@ menu "SPARC CPU frequency scaling drivers" | |||
262 | depends on SPARC64 | 254 | depends on SPARC64 |
263 | config SPARC_US3_CPUFREQ | 255 | config SPARC_US3_CPUFREQ |
264 | tristate "UltraSPARC-III CPU Frequency driver" | 256 | tristate "UltraSPARC-III CPU Frequency driver" |
265 | select CPU_FREQ_TABLE | ||
266 | help | 257 | help |
267 | This adds the CPUFreq driver for UltraSPARC-III processors. | 258 | This adds the CPUFreq driver for UltraSPARC-III processors. |
268 | 259 | ||
@@ -272,7 +263,6 @@ config SPARC_US3_CPUFREQ | |||
272 | 263 | ||
273 | config SPARC_US2E_CPUFREQ | 264 | config SPARC_US2E_CPUFREQ |
274 | tristate "UltraSPARC-IIe CPU Frequency driver" | 265 | tristate "UltraSPARC-IIe CPU Frequency driver" |
275 | select CPU_FREQ_TABLE | ||
276 | help | 266 | help |
277 | This adds the CPUFreq driver for UltraSPARC-IIe processors. | 267 | This adds the CPUFreq driver for UltraSPARC-IIe processors. |
278 | 268 | ||
@@ -285,7 +275,6 @@ menu "SH CPU Frequency scaling" | |||
285 | depends on SUPERH | 275 | depends on SUPERH |
286 | config SH_CPU_FREQ | 276 | config SH_CPU_FREQ |
287 | tristate "SuperH CPU Frequency driver" | 277 | tristate "SuperH CPU Frequency driver" |
288 | select CPU_FREQ_TABLE | ||
289 | help | 278 | help |
290 | This adds the cpufreq driver for SuperH. Any CPU that supports | 279 | This adds the cpufreq driver for SuperH. Any CPU that supports |
291 | clock rate rounding through the clock framework can use this | 280 | clock rate rounding through the clock framework can use this |
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 0fa204b244bd..ce52ed949249 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
@@ -5,7 +5,6 @@ | |||
5 | config ARM_BIG_LITTLE_CPUFREQ | 5 | config ARM_BIG_LITTLE_CPUFREQ |
6 | tristate "Generic ARM big LITTLE CPUfreq driver" | 6 | tristate "Generic ARM big LITTLE CPUfreq driver" |
7 | depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK | 7 | depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK |
8 | select CPU_FREQ_TABLE | ||
9 | help | 8 | help |
10 | This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. | 9 | This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. |
11 | 10 | ||
@@ -18,7 +17,6 @@ config ARM_DT_BL_CPUFREQ | |||
18 | 17 | ||
19 | config ARM_EXYNOS_CPUFREQ | 18 | config ARM_EXYNOS_CPUFREQ |
20 | bool | 19 | bool |
21 | select CPU_FREQ_TABLE | ||
22 | 20 | ||
23 | config ARM_EXYNOS4210_CPUFREQ | 21 | config ARM_EXYNOS4210_CPUFREQ |
24 | bool "SAMSUNG EXYNOS4210" | 22 | bool "SAMSUNG EXYNOS4210" |
@@ -58,7 +56,6 @@ config ARM_EXYNOS5440_CPUFREQ | |||
58 | depends on SOC_EXYNOS5440 | 56 | depends on SOC_EXYNOS5440 |
59 | depends on HAVE_CLK && PM_OPP && OF | 57 | depends on HAVE_CLK && PM_OPP && OF |
60 | default y | 58 | default y |
61 | select CPU_FREQ_TABLE | ||
62 | help | 59 | help |
63 | This adds the CPUFreq driver for Samsung EXYNOS5440 | 60 | This adds the CPUFreq driver for Samsung EXYNOS5440 |
64 | SoC. The nature of exynos5440 clock controller is | 61 | SoC. The nature of exynos5440 clock controller is |
@@ -85,7 +82,6 @@ config ARM_IMX6Q_CPUFREQ | |||
85 | tristate "Freescale i.MX6Q cpufreq support" | 82 | tristate "Freescale i.MX6Q cpufreq support" |
86 | depends on SOC_IMX6Q | 83 | depends on SOC_IMX6Q |
87 | depends on REGULATOR_ANATOP | 84 | depends on REGULATOR_ANATOP |
88 | select CPU_FREQ_TABLE | ||
89 | help | 85 | help |
90 | This adds cpufreq driver support for Freescale i.MX6Q SOC. | 86 | This adds cpufreq driver support for Freescale i.MX6Q SOC. |
91 | 87 | ||
@@ -101,7 +97,6 @@ config ARM_INTEGRATOR | |||
101 | 97 | ||
102 | config ARM_KIRKWOOD_CPUFREQ | 98 | config ARM_KIRKWOOD_CPUFREQ |
103 | def_bool ARCH_KIRKWOOD && OF | 99 | def_bool ARCH_KIRKWOOD && OF |
104 | select CPU_FREQ_TABLE | ||
105 | help | 100 | help |
106 | This adds the CPUFreq driver for Marvell Kirkwood | 101 | This adds the CPUFreq driver for Marvell Kirkwood |
107 | SoCs. | 102 | SoCs. |
@@ -110,7 +105,6 @@ config ARM_OMAP2PLUS_CPUFREQ | |||
110 | bool "TI OMAP2+" | 105 | bool "TI OMAP2+" |
111 | depends on ARCH_OMAP2PLUS | 106 | depends on ARCH_OMAP2PLUS |
112 | default ARCH_OMAP2PLUS | 107 | default ARCH_OMAP2PLUS |
113 | select CPU_FREQ_TABLE | ||
114 | 108 | ||
115 | config ARM_S3C_CPUFREQ | 109 | config ARM_S3C_CPUFREQ |
116 | bool | 110 | bool |
@@ -165,7 +159,6 @@ config ARM_S3C2412_CPUFREQ | |||
165 | config ARM_S3C2416_CPUFREQ | 159 | config ARM_S3C2416_CPUFREQ |
166 | bool "S3C2416 CPU Frequency scaling support" | 160 | bool "S3C2416 CPU Frequency scaling support" |
167 | depends on CPU_S3C2416 | 161 | depends on CPU_S3C2416 |
168 | select CPU_FREQ_TABLE | ||
169 | help | 162 | help |
170 | This adds the CPUFreq driver for the Samsung S3C2416 and | 163 | This adds the CPUFreq driver for the Samsung S3C2416 and |
171 | S3C2450 SoC. The S3C2416 supports changing the rate of the | 164 | S3C2450 SoC. The S3C2416 supports changing the rate of the |
@@ -196,7 +189,6 @@ config ARM_S3C2440_CPUFREQ | |||
196 | config ARM_S3C64XX_CPUFREQ | 189 | config ARM_S3C64XX_CPUFREQ |
197 | bool "Samsung S3C64XX" | 190 | bool "Samsung S3C64XX" |
198 | depends on CPU_S3C6410 | 191 | depends on CPU_S3C6410 |
199 | select CPU_FREQ_TABLE | ||
200 | default y | 192 | default y |
201 | help | 193 | help |
202 | This adds the CPUFreq driver for Samsung S3C6410 SoC. | 194 | This adds the CPUFreq driver for Samsung S3C6410 SoC. |
@@ -206,7 +198,6 @@ config ARM_S3C64XX_CPUFREQ | |||
206 | config ARM_S5PV210_CPUFREQ | 198 | config ARM_S5PV210_CPUFREQ |
207 | bool "Samsung S5PV210 and S5PC110" | 199 | bool "Samsung S5PV210 and S5PC110" |
208 | depends on CPU_S5PV210 | 200 | depends on CPU_S5PV210 |
209 | select CPU_FREQ_TABLE | ||
210 | default y | 201 | default y |
211 | help | 202 | help |
212 | This adds the CPUFreq driver for Samsung S5PV210 and | 203 | This adds the CPUFreq driver for Samsung S5PV210 and |
@@ -223,7 +214,6 @@ config ARM_SA1110_CPUFREQ | |||
223 | config ARM_SPEAR_CPUFREQ | 214 | config ARM_SPEAR_CPUFREQ |
224 | bool "SPEAr CPUFreq support" | 215 | bool "SPEAr CPUFreq support" |
225 | depends on PLAT_SPEAR | 216 | depends on PLAT_SPEAR |
226 | select CPU_FREQ_TABLE | ||
227 | default y | 217 | default y |
228 | help | 218 | help |
229 | This adds the CPUFreq driver support for SPEAr SOCs. | 219 | This adds the CPUFreq driver support for SPEAr SOCs. |
@@ -231,7 +221,14 @@ config ARM_SPEAR_CPUFREQ | |||
231 | config ARM_TEGRA_CPUFREQ | 221 | config ARM_TEGRA_CPUFREQ |
232 | bool "TEGRA CPUFreq support" | 222 | bool "TEGRA CPUFreq support" |
233 | depends on ARCH_TEGRA | 223 | depends on ARCH_TEGRA |
234 | select CPU_FREQ_TABLE | ||
235 | default y | 224 | default y |
236 | help | 225 | help |
237 | This adds the CPUFreq driver support for TEGRA SOCs. | 226 | This adds the CPUFreq driver support for TEGRA SOCs. |
227 | |||
228 | config ARM_VEXPRESS_SPC_CPUFREQ | ||
229 | tristate "Versatile Express SPC based CPUfreq driver" | ||
230 | select ARM_BIG_LITTLE_CPUFREQ | ||
231 | depends on ARCH_VEXPRESS_SPC | ||
232 | help | ||
233 | This add the CPUfreq driver support for Versatile Express | ||
234 | big.LITTLE platforms using SPC for power management. | ||
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc index 25ca9db62e09..ca0021a96e19 100644 --- a/drivers/cpufreq/Kconfig.powerpc +++ b/drivers/cpufreq/Kconfig.powerpc | |||
@@ -1,7 +1,6 @@ | |||
1 | config CPU_FREQ_CBE | 1 | config CPU_FREQ_CBE |
2 | tristate "CBE frequency scaling" | 2 | tristate "CBE frequency scaling" |
3 | depends on CBE_RAS && PPC_CELL | 3 | depends on CBE_RAS && PPC_CELL |
4 | select CPU_FREQ_TABLE | ||
5 | default m | 4 | default m |
6 | help | 5 | help |
7 | This adds the cpufreq driver for Cell BE processors. | 6 | This adds the cpufreq driver for Cell BE processors. |
@@ -20,7 +19,6 @@ config CPU_FREQ_CBE_PMI | |||
20 | config CPU_FREQ_MAPLE | 19 | config CPU_FREQ_MAPLE |
21 | bool "Support for Maple 970FX Evaluation Board" | 20 | bool "Support for Maple 970FX Evaluation Board" |
22 | depends on PPC_MAPLE | 21 | depends on PPC_MAPLE |
23 | select CPU_FREQ_TABLE | ||
24 | help | 22 | help |
25 | This adds support for frequency switching on Maple 970FX | 23 | This adds support for frequency switching on Maple 970FX |
26 | Evaluation Board and compatible boards (IBM JS2x blades). | 24 | Evaluation Board and compatible boards (IBM JS2x blades). |
@@ -28,7 +26,6 @@ config CPU_FREQ_MAPLE | |||
28 | config PPC_CORENET_CPUFREQ | 26 | config PPC_CORENET_CPUFREQ |
29 | tristate "CPU frequency scaling driver for Freescale E500MC SoCs" | 27 | tristate "CPU frequency scaling driver for Freescale E500MC SoCs" |
30 | depends on PPC_E500MC && OF && COMMON_CLK | 28 | depends on PPC_E500MC && OF && COMMON_CLK |
31 | select CPU_FREQ_TABLE | ||
32 | select CLK_PPC_CORENET | 29 | select CLK_PPC_CORENET |
33 | help | 30 | help |
34 | This adds the CPUFreq driver support for Freescale e500mc, | 31 | This adds the CPUFreq driver support for Freescale e500mc, |
@@ -38,7 +35,6 @@ config PPC_CORENET_CPUFREQ | |||
38 | config CPU_FREQ_PMAC | 35 | config CPU_FREQ_PMAC |
39 | bool "Support for Apple PowerBooks" | 36 | bool "Support for Apple PowerBooks" |
40 | depends on ADB_PMU && PPC32 | 37 | depends on ADB_PMU && PPC32 |
41 | select CPU_FREQ_TABLE | ||
42 | help | 38 | help |
43 | This adds support for frequency switching on Apple PowerBooks, | 39 | This adds support for frequency switching on Apple PowerBooks, |
44 | this currently includes some models of iBook & Titanium | 40 | this currently includes some models of iBook & Titanium |
@@ -47,7 +43,6 @@ config CPU_FREQ_PMAC | |||
47 | config CPU_FREQ_PMAC64 | 43 | config CPU_FREQ_PMAC64 |
48 | bool "Support for some Apple G5s" | 44 | bool "Support for some Apple G5s" |
49 | depends on PPC_PMAC && PPC64 | 45 | depends on PPC_PMAC && PPC64 |
50 | select CPU_FREQ_TABLE | ||
51 | help | 46 | help |
52 | This adds support for frequency switching on Apple iMac G5, | 47 | This adds support for frequency switching on Apple iMac G5, |
53 | and some of the more recent desktop G5 machines as well. | 48 | and some of the more recent desktop G5 machines as well. |
@@ -55,7 +50,6 @@ config CPU_FREQ_PMAC64 | |||
55 | config PPC_PASEMI_CPUFREQ | 50 | config PPC_PASEMI_CPUFREQ |
56 | bool "Support for PA Semi PWRficient" | 51 | bool "Support for PA Semi PWRficient" |
57 | depends on PPC_PASEMI | 52 | depends on PPC_PASEMI |
58 | select CPU_FREQ_TABLE | ||
59 | default y | 53 | default y |
60 | help | 54 | help |
61 | This adds the support for frequency switching on PA Semi | 55 | This adds the support for frequency switching on PA Semi |
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index e2b6eabef221..6897ad85b046 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 | |||
@@ -31,7 +31,6 @@ config X86_PCC_CPUFREQ | |||
31 | 31 | ||
32 | config X86_ACPI_CPUFREQ | 32 | config X86_ACPI_CPUFREQ |
33 | tristate "ACPI Processor P-States driver" | 33 | tristate "ACPI Processor P-States driver" |
34 | select CPU_FREQ_TABLE | ||
35 | depends on ACPI_PROCESSOR | 34 | depends on ACPI_PROCESSOR |
36 | help | 35 | help |
37 | This driver adds a CPUFreq driver which utilizes the ACPI | 36 | This driver adds a CPUFreq driver which utilizes the ACPI |
@@ -60,7 +59,6 @@ config X86_ACPI_CPUFREQ_CPB | |||
60 | 59 | ||
61 | config ELAN_CPUFREQ | 60 | config ELAN_CPUFREQ |
62 | tristate "AMD Elan SC400 and SC410" | 61 | tristate "AMD Elan SC400 and SC410" |
63 | select CPU_FREQ_TABLE | ||
64 | depends on MELAN | 62 | depends on MELAN |
65 | ---help--- | 63 | ---help--- |
66 | This adds the CPUFreq driver for AMD Elan SC400 and SC410 | 64 | This adds the CPUFreq driver for AMD Elan SC400 and SC410 |
@@ -76,7 +74,6 @@ config ELAN_CPUFREQ | |||
76 | 74 | ||
77 | config SC520_CPUFREQ | 75 | config SC520_CPUFREQ |
78 | tristate "AMD Elan SC520" | 76 | tristate "AMD Elan SC520" |
79 | select CPU_FREQ_TABLE | ||
80 | depends on MELAN | 77 | depends on MELAN |
81 | ---help--- | 78 | ---help--- |
82 | This adds the CPUFreq driver for AMD Elan SC520 processor. | 79 | This adds the CPUFreq driver for AMD Elan SC520 processor. |
@@ -88,7 +85,6 @@ config SC520_CPUFREQ | |||
88 | 85 | ||
89 | config X86_POWERNOW_K6 | 86 | config X86_POWERNOW_K6 |
90 | tristate "AMD Mobile K6-2/K6-3 PowerNow!" | 87 | tristate "AMD Mobile K6-2/K6-3 PowerNow!" |
91 | select CPU_FREQ_TABLE | ||
92 | depends on X86_32 | 88 | depends on X86_32 |
93 | help | 89 | help |
94 | This adds the CPUFreq driver for mobile AMD K6-2+ and mobile | 90 | This adds the CPUFreq driver for mobile AMD K6-2+ and mobile |
@@ -100,7 +96,6 @@ config X86_POWERNOW_K6 | |||
100 | 96 | ||
101 | config X86_POWERNOW_K7 | 97 | config X86_POWERNOW_K7 |
102 | tristate "AMD Mobile Athlon/Duron PowerNow!" | 98 | tristate "AMD Mobile Athlon/Duron PowerNow!" |
103 | select CPU_FREQ_TABLE | ||
104 | depends on X86_32 | 99 | depends on X86_32 |
105 | help | 100 | help |
106 | This adds the CPUFreq driver for mobile AMD K7 mobile processors. | 101 | This adds the CPUFreq driver for mobile AMD K7 mobile processors. |
@@ -118,7 +113,6 @@ config X86_POWERNOW_K7_ACPI | |||
118 | 113 | ||
119 | config X86_POWERNOW_K8 | 114 | config X86_POWERNOW_K8 |
120 | tristate "AMD Opteron/Athlon64 PowerNow!" | 115 | tristate "AMD Opteron/Athlon64 PowerNow!" |
121 | select CPU_FREQ_TABLE | ||
122 | depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ | 116 | depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ |
123 | help | 117 | help |
124 | This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. | 118 | This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. |
@@ -132,7 +126,6 @@ config X86_POWERNOW_K8 | |||
132 | config X86_AMD_FREQ_SENSITIVITY | 126 | config X86_AMD_FREQ_SENSITIVITY |
133 | tristate "AMD frequency sensitivity feedback powersave bias" | 127 | tristate "AMD frequency sensitivity feedback powersave bias" |
134 | depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD | 128 | depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD |
135 | select CPU_FREQ_TABLE | ||
136 | help | 129 | help |
137 | This adds AMD-specific powersave bias function to the ondemand | 130 | This adds AMD-specific powersave bias function to the ondemand |
138 | governor, which allows it to make more power-conscious frequency | 131 | governor, which allows it to make more power-conscious frequency |
@@ -160,7 +153,6 @@ config X86_GX_SUSPMOD | |||
160 | 153 | ||
161 | config X86_SPEEDSTEP_CENTRINO | 154 | config X86_SPEEDSTEP_CENTRINO |
162 | tristate "Intel Enhanced SpeedStep (deprecated)" | 155 | tristate "Intel Enhanced SpeedStep (deprecated)" |
163 | select CPU_FREQ_TABLE | ||
164 | select X86_SPEEDSTEP_CENTRINO_TABLE if X86_32 | 156 | select X86_SPEEDSTEP_CENTRINO_TABLE if X86_32 |
165 | depends on X86_32 || (X86_64 && ACPI_PROCESSOR) | 157 | depends on X86_32 || (X86_64 && ACPI_PROCESSOR) |
166 | help | 158 | help |
@@ -190,7 +182,6 @@ config X86_SPEEDSTEP_CENTRINO_TABLE | |||
190 | 182 | ||
191 | config X86_SPEEDSTEP_ICH | 183 | config X86_SPEEDSTEP_ICH |
192 | tristate "Intel Speedstep on ICH-M chipsets (ioport interface)" | 184 | tristate "Intel Speedstep on ICH-M chipsets (ioport interface)" |
193 | select CPU_FREQ_TABLE | ||
194 | depends on X86_32 | 185 | depends on X86_32 |
195 | help | 186 | help |
196 | This adds the CPUFreq driver for certain mobile Intel Pentium III | 187 | This adds the CPUFreq driver for certain mobile Intel Pentium III |
@@ -204,7 +195,6 @@ config X86_SPEEDSTEP_ICH | |||
204 | 195 | ||
205 | config X86_SPEEDSTEP_SMI | 196 | config X86_SPEEDSTEP_SMI |
206 | tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)" | 197 | tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)" |
207 | select CPU_FREQ_TABLE | ||
208 | depends on X86_32 | 198 | depends on X86_32 |
209 | help | 199 | help |
210 | This adds the CPUFreq driver for certain mobile Intel Pentium III | 200 | This adds the CPUFreq driver for certain mobile Intel Pentium III |
@@ -217,7 +207,6 @@ config X86_SPEEDSTEP_SMI | |||
217 | 207 | ||
218 | config X86_P4_CLOCKMOD | 208 | config X86_P4_CLOCKMOD |
219 | tristate "Intel Pentium 4 clock modulation" | 209 | tristate "Intel Pentium 4 clock modulation" |
220 | select CPU_FREQ_TABLE | ||
221 | help | 210 | help |
222 | This adds the CPUFreq driver for Intel Pentium 4 / XEON | 211 | This adds the CPUFreq driver for Intel Pentium 4 / XEON |
223 | processors. When enabled it will lower CPU temperature by skipping | 212 | processors. When enabled it will lower CPU temperature by skipping |
@@ -259,7 +248,6 @@ config X86_LONGRUN | |||
259 | 248 | ||
260 | config X86_LONGHAUL | 249 | config X86_LONGHAUL |
261 | tristate "VIA Cyrix III Longhaul" | 250 | tristate "VIA Cyrix III Longhaul" |
262 | select CPU_FREQ_TABLE | ||
263 | depends on X86_32 && ACPI_PROCESSOR | 251 | depends on X86_32 && ACPI_PROCESSOR |
264 | help | 252 | help |
265 | This adds the CPUFreq driver for VIA Samuel/CyrixIII, | 253 | This adds the CPUFreq driver for VIA Samuel/CyrixIII, |
@@ -272,7 +260,6 @@ config X86_LONGHAUL | |||
272 | 260 | ||
273 | config X86_E_POWERSAVER | 261 | config X86_E_POWERSAVER |
274 | tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)" | 262 | tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)" |
275 | select CPU_FREQ_TABLE | ||
276 | depends on X86_32 && ACPI_PROCESSOR | 263 | depends on X86_32 && ACPI_PROCESSOR |
277 | help | 264 | help |
278 | This adds the CPUFreq driver for VIA C7 processors. However, this driver | 265 | This adds the CPUFreq driver for VIA C7 processors. However, this driver |
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index ad5866c2ada0..74945652dd7a 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | # CPUfreq core | 1 | # CPUfreq core |
2 | obj-$(CONFIG_CPU_FREQ) += cpufreq.o | 2 | obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o |
3 | # CPUfreq stats | 3 | # CPUfreq stats |
4 | obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o | 4 | obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o |
5 | 5 | ||
@@ -11,9 +11,6 @@ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o | |||
11 | obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o | 11 | obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o |
12 | obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o | 12 | obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o |
13 | 13 | ||
14 | # CPUfreq cross-arch helpers | ||
15 | obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o | ||
16 | |||
17 | obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o | 14 | obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o |
18 | 15 | ||
19 | ################################################################################## | 16 | ################################################################################## |
@@ -77,6 +74,7 @@ obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o | |||
77 | obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o | 74 | obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o |
78 | obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o | 75 | obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o |
79 | obj-$(CONFIG_ARM_TEGRA_CPUFREQ) += tegra-cpufreq.o | 76 | obj-$(CONFIG_ARM_TEGRA_CPUFREQ) += tegra-cpufreq.o |
77 | obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o | ||
80 | 78 | ||
81 | ################################################################################## | 79 | ################################################################################## |
82 | # PowerPC platform drivers | 80 | # PowerPC platform drivers |
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 506fd23c7550..caf41ebea184 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c | |||
@@ -424,34 +424,21 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, | |||
424 | } | 424 | } |
425 | 425 | ||
426 | static int acpi_cpufreq_target(struct cpufreq_policy *policy, | 426 | static int acpi_cpufreq_target(struct cpufreq_policy *policy, |
427 | unsigned int target_freq, unsigned int relation) | 427 | unsigned int index) |
428 | { | 428 | { |
429 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); | 429 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
430 | struct acpi_processor_performance *perf; | 430 | struct acpi_processor_performance *perf; |
431 | struct cpufreq_freqs freqs; | ||
432 | struct drv_cmd cmd; | 431 | struct drv_cmd cmd; |
433 | unsigned int next_state = 0; /* Index into freq_table */ | ||
434 | unsigned int next_perf_state = 0; /* Index into perf table */ | 432 | unsigned int next_perf_state = 0; /* Index into perf table */ |
435 | int result = 0; | 433 | int result = 0; |
436 | 434 | ||
437 | pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); | ||
438 | |||
439 | if (unlikely(data == NULL || | 435 | if (unlikely(data == NULL || |
440 | data->acpi_data == NULL || data->freq_table == NULL)) { | 436 | data->acpi_data == NULL || data->freq_table == NULL)) { |
441 | return -ENODEV; | 437 | return -ENODEV; |
442 | } | 438 | } |
443 | 439 | ||
444 | perf = data->acpi_data; | 440 | perf = data->acpi_data; |
445 | result = cpufreq_frequency_table_target(policy, | 441 | next_perf_state = data->freq_table[index].driver_data; |
446 | data->freq_table, | ||
447 | target_freq, | ||
448 | relation, &next_state); | ||
449 | if (unlikely(result)) { | ||
450 | result = -ENODEV; | ||
451 | goto out; | ||
452 | } | ||
453 | |||
454 | next_perf_state = data->freq_table[next_state].driver_data; | ||
455 | if (perf->state == next_perf_state) { | 442 | if (perf->state == next_perf_state) { |
456 | if (unlikely(data->resume)) { | 443 | if (unlikely(data->resume)) { |
457 | pr_debug("Called after resume, resetting to P%d\n", | 444 | pr_debug("Called after resume, resetting to P%d\n", |
@@ -492,23 +479,17 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
492 | else | 479 | else |
493 | cmd.mask = cpumask_of(policy->cpu); | 480 | cmd.mask = cpumask_of(policy->cpu); |
494 | 481 | ||
495 | freqs.old = perf->states[perf->state].core_frequency * 1000; | ||
496 | freqs.new = data->freq_table[next_state].frequency; | ||
497 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
498 | |||
499 | drv_write(&cmd); | 482 | drv_write(&cmd); |
500 | 483 | ||
501 | if (acpi_pstate_strict) { | 484 | if (acpi_pstate_strict) { |
502 | if (!check_freqs(cmd.mask, freqs.new, data)) { | 485 | if (!check_freqs(cmd.mask, data->freq_table[index].frequency, |
486 | data)) { | ||
503 | pr_debug("acpi_cpufreq_target failed (%d)\n", | 487 | pr_debug("acpi_cpufreq_target failed (%d)\n", |
504 | policy->cpu); | 488 | policy->cpu); |
505 | result = -EAGAIN; | 489 | result = -EAGAIN; |
506 | freqs.new = freqs.old; | ||
507 | } | 490 | } |
508 | } | 491 | } |
509 | 492 | ||
510 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
511 | |||
512 | if (!result) | 493 | if (!result) |
513 | perf->state = next_perf_state; | 494 | perf->state = next_perf_state; |
514 | 495 | ||
@@ -516,15 +497,6 @@ out: | |||
516 | return result; | 497 | return result; |
517 | } | 498 | } |
518 | 499 | ||
519 | static int acpi_cpufreq_verify(struct cpufreq_policy *policy) | ||
520 | { | ||
521 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); | ||
522 | |||
523 | pr_debug("acpi_cpufreq_verify\n"); | ||
524 | |||
525 | return cpufreq_frequency_table_verify(policy, data->freq_table); | ||
526 | } | ||
527 | |||
528 | static unsigned long | 500 | static unsigned long |
529 | acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) | 501 | acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) |
530 | { | 502 | { |
@@ -837,7 +809,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
837 | data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END; | 809 | data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END; |
838 | perf->state = 0; | 810 | perf->state = 0; |
839 | 811 | ||
840 | result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); | 812 | result = cpufreq_table_validate_and_show(policy, data->freq_table); |
841 | if (result) | 813 | if (result) |
842 | goto err_freqfree; | 814 | goto err_freqfree; |
843 | 815 | ||
@@ -846,12 +818,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
846 | 818 | ||
847 | switch (perf->control_register.space_id) { | 819 | switch (perf->control_register.space_id) { |
848 | case ACPI_ADR_SPACE_SYSTEM_IO: | 820 | case ACPI_ADR_SPACE_SYSTEM_IO: |
849 | /* Current speed is unknown and not detectable by IO port */ | 821 | /* |
822 | * The core will not set policy->cur, because | ||
823 | * cpufreq_driver->get is NULL, so we need to set it here. | ||
824 | * However, we have to guess it, because the current speed is | ||
825 | * unknown and not detectable via IO ports. | ||
826 | */ | ||
850 | policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); | 827 | policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); |
851 | break; | 828 | break; |
852 | case ACPI_ADR_SPACE_FIXED_HARDWARE: | 829 | case ACPI_ADR_SPACE_FIXED_HARDWARE: |
853 | acpi_cpufreq_driver.get = get_cur_freq_on_cpu; | 830 | acpi_cpufreq_driver.get = get_cur_freq_on_cpu; |
854 | policy->cur = get_cur_freq_on_cpu(cpu); | ||
855 | break; | 831 | break; |
856 | default: | 832 | default: |
857 | break; | 833 | break; |
@@ -868,8 +844,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
868 | (u32) perf->states[i].power, | 844 | (u32) perf->states[i].power, |
869 | (u32) perf->states[i].transition_latency); | 845 | (u32) perf->states[i].transition_latency); |
870 | 846 | ||
871 | cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); | ||
872 | |||
873 | /* | 847 | /* |
874 | * the first call to ->target() should result in us actually | 848 | * the first call to ->target() should result in us actually |
875 | * writing something to the appropriate registers. | 849 | * writing something to the appropriate registers. |
@@ -929,8 +903,8 @@ static struct freq_attr *acpi_cpufreq_attr[] = { | |||
929 | }; | 903 | }; |
930 | 904 | ||
931 | static struct cpufreq_driver acpi_cpufreq_driver = { | 905 | static struct cpufreq_driver acpi_cpufreq_driver = { |
932 | .verify = acpi_cpufreq_verify, | 906 | .verify = cpufreq_generic_frequency_table_verify, |
933 | .target = acpi_cpufreq_target, | 907 | .target_index = acpi_cpufreq_target, |
934 | .bios_limit = acpi_processor_get_bios_limit, | 908 | .bios_limit = acpi_processor_get_bios_limit, |
935 | .init = acpi_cpufreq_cpu_init, | 909 | .init = acpi_cpufreq_cpu_init, |
936 | .exit = acpi_cpufreq_cpu_exit, | 910 | .exit = acpi_cpufreq_cpu_exit, |
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index 3549f0784af1..5519933813ea 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c | |||
@@ -24,110 +24,323 @@ | |||
24 | #include <linux/cpufreq.h> | 24 | #include <linux/cpufreq.h> |
25 | #include <linux/cpumask.h> | 25 | #include <linux/cpumask.h> |
26 | #include <linux/export.h> | 26 | #include <linux/export.h> |
27 | #include <linux/mutex.h> | ||
27 | #include <linux/of_platform.h> | 28 | #include <linux/of_platform.h> |
28 | #include <linux/opp.h> | 29 | #include <linux/pm_opp.h> |
29 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
30 | #include <linux/topology.h> | 31 | #include <linux/topology.h> |
31 | #include <linux/types.h> | 32 | #include <linux/types.h> |
33 | #include <asm/bL_switcher.h> | ||
32 | 34 | ||
33 | #include "arm_big_little.h" | 35 | #include "arm_big_little.h" |
34 | 36 | ||
35 | /* Currently we support only two clusters */ | 37 | /* Currently we support only two clusters */ |
38 | #define A15_CLUSTER 0 | ||
39 | #define A7_CLUSTER 1 | ||
36 | #define MAX_CLUSTERS 2 | 40 | #define MAX_CLUSTERS 2 |
37 | 41 | ||
42 | #ifdef CONFIG_BL_SWITCHER | ||
43 | static bool bL_switching_enabled; | ||
44 | #define is_bL_switching_enabled() bL_switching_enabled | ||
45 | #define set_switching_enabled(x) (bL_switching_enabled = (x)) | ||
46 | #else | ||
47 | #define is_bL_switching_enabled() false | ||
48 | #define set_switching_enabled(x) do { } while (0) | ||
49 | #endif | ||
50 | |||
51 | #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) | ||
52 | #define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq) | ||
53 | |||
38 | static struct cpufreq_arm_bL_ops *arm_bL_ops; | 54 | static struct cpufreq_arm_bL_ops *arm_bL_ops; |
39 | static struct clk *clk[MAX_CLUSTERS]; | 55 | static struct clk *clk[MAX_CLUSTERS]; |
40 | static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS]; | 56 | static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1]; |
41 | static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)}; | 57 | static atomic_t cluster_usage[MAX_CLUSTERS + 1]; |
58 | |||
59 | static unsigned int clk_big_min; /* (Big) clock frequencies */ | ||
60 | static unsigned int clk_little_max; /* Maximum clock frequency (Little) */ | ||
61 | |||
62 | static DEFINE_PER_CPU(unsigned int, physical_cluster); | ||
63 | static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq); | ||
42 | 64 | ||
43 | static unsigned int bL_cpufreq_get(unsigned int cpu) | 65 | static struct mutex cluster_lock[MAX_CLUSTERS]; |
66 | |||
67 | static inline int raw_cpu_to_cluster(int cpu) | ||
44 | { | 68 | { |
45 | u32 cur_cluster = cpu_to_cluster(cpu); | 69 | return topology_physical_package_id(cpu); |
70 | } | ||
46 | 71 | ||
47 | return clk_get_rate(clk[cur_cluster]) / 1000; | 72 | static inline int cpu_to_cluster(int cpu) |
73 | { | ||
74 | return is_bL_switching_enabled() ? | ||
75 | MAX_CLUSTERS : raw_cpu_to_cluster(cpu); | ||
48 | } | 76 | } |
49 | 77 | ||
50 | /* Validate policy frequency range */ | 78 | static unsigned int find_cluster_maxfreq(int cluster) |
51 | static int bL_cpufreq_verify_policy(struct cpufreq_policy *policy) | ||
52 | { | 79 | { |
53 | u32 cur_cluster = cpu_to_cluster(policy->cpu); | 80 | int j; |
81 | u32 max_freq = 0, cpu_freq; | ||
82 | |||
83 | for_each_online_cpu(j) { | ||
84 | cpu_freq = per_cpu(cpu_last_req_freq, j); | ||
85 | |||
86 | if ((cluster == per_cpu(physical_cluster, j)) && | ||
87 | (max_freq < cpu_freq)) | ||
88 | max_freq = cpu_freq; | ||
89 | } | ||
90 | |||
91 | pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster, | ||
92 | max_freq); | ||
93 | |||
94 | return max_freq; | ||
95 | } | ||
96 | |||
97 | static unsigned int clk_get_cpu_rate(unsigned int cpu) | ||
98 | { | ||
99 | u32 cur_cluster = per_cpu(physical_cluster, cpu); | ||
100 | u32 rate = clk_get_rate(clk[cur_cluster]) / 1000; | ||
101 | |||
102 | /* For switcher we use virtual A7 clock rates */ | ||
103 | if (is_bL_switching_enabled()) | ||
104 | rate = VIRT_FREQ(cur_cluster, rate); | ||
105 | |||
106 | pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu, | ||
107 | cur_cluster, rate); | ||
108 | |||
109 | return rate; | ||
110 | } | ||
111 | |||
112 | static unsigned int bL_cpufreq_get_rate(unsigned int cpu) | ||
113 | { | ||
114 | if (is_bL_switching_enabled()) { | ||
115 | pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq, | ||
116 | cpu)); | ||
117 | |||
118 | return per_cpu(cpu_last_req_freq, cpu); | ||
119 | } else { | ||
120 | return clk_get_cpu_rate(cpu); | ||
121 | } | ||
122 | } | ||
123 | |||
124 | static unsigned int | ||
125 | bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) | ||
126 | { | ||
127 | u32 new_rate, prev_rate; | ||
128 | int ret; | ||
129 | bool bLs = is_bL_switching_enabled(); | ||
130 | |||
131 | mutex_lock(&cluster_lock[new_cluster]); | ||
54 | 132 | ||
55 | return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]); | 133 | if (bLs) { |
134 | prev_rate = per_cpu(cpu_last_req_freq, cpu); | ||
135 | per_cpu(cpu_last_req_freq, cpu) = rate; | ||
136 | per_cpu(physical_cluster, cpu) = new_cluster; | ||
137 | |||
138 | new_rate = find_cluster_maxfreq(new_cluster); | ||
139 | new_rate = ACTUAL_FREQ(new_cluster, new_rate); | ||
140 | } else { | ||
141 | new_rate = rate; | ||
142 | } | ||
143 | |||
144 | pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n", | ||
145 | __func__, cpu, old_cluster, new_cluster, new_rate); | ||
146 | |||
147 | ret = clk_set_rate(clk[new_cluster], new_rate * 1000); | ||
148 | if (WARN_ON(ret)) { | ||
149 | pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret, | ||
150 | new_cluster); | ||
151 | if (bLs) { | ||
152 | per_cpu(cpu_last_req_freq, cpu) = prev_rate; | ||
153 | per_cpu(physical_cluster, cpu) = old_cluster; | ||
154 | } | ||
155 | |||
156 | mutex_unlock(&cluster_lock[new_cluster]); | ||
157 | |||
158 | return ret; | ||
159 | } | ||
160 | |||
161 | mutex_unlock(&cluster_lock[new_cluster]); | ||
162 | |||
163 | /* Recalc freq for old cluster when switching clusters */ | ||
164 | if (old_cluster != new_cluster) { | ||
165 | pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n", | ||
166 | __func__, cpu, old_cluster, new_cluster); | ||
167 | |||
168 | /* Switch cluster */ | ||
169 | bL_switch_request(cpu, new_cluster); | ||
170 | |||
171 | mutex_lock(&cluster_lock[old_cluster]); | ||
172 | |||
173 | /* Set freq of old cluster if there are cpus left on it */ | ||
174 | new_rate = find_cluster_maxfreq(old_cluster); | ||
175 | new_rate = ACTUAL_FREQ(old_cluster, new_rate); | ||
176 | |||
177 | if (new_rate) { | ||
178 | pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n", | ||
179 | __func__, old_cluster, new_rate); | ||
180 | |||
181 | if (clk_set_rate(clk[old_cluster], new_rate * 1000)) | ||
182 | pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n", | ||
183 | __func__, ret, old_cluster); | ||
184 | } | ||
185 | mutex_unlock(&cluster_lock[old_cluster]); | ||
186 | } | ||
187 | |||
188 | return 0; | ||
56 | } | 189 | } |
57 | 190 | ||
58 | /* Set clock frequency */ | 191 | /* Set clock frequency */ |
59 | static int bL_cpufreq_set_target(struct cpufreq_policy *policy, | 192 | static int bL_cpufreq_set_target(struct cpufreq_policy *policy, |
60 | unsigned int target_freq, unsigned int relation) | 193 | unsigned int index) |
61 | { | 194 | { |
62 | struct cpufreq_freqs freqs; | 195 | u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster; |
63 | u32 cpu = policy->cpu, freq_tab_idx, cur_cluster; | 196 | unsigned int freqs_new; |
64 | int ret = 0; | 197 | |
198 | cur_cluster = cpu_to_cluster(cpu); | ||
199 | new_cluster = actual_cluster = per_cpu(physical_cluster, cpu); | ||
200 | |||
201 | freqs_new = freq_table[cur_cluster][index].frequency; | ||
202 | |||
203 | if (is_bL_switching_enabled()) { | ||
204 | if ((actual_cluster == A15_CLUSTER) && | ||
205 | (freqs_new < clk_big_min)) { | ||
206 | new_cluster = A7_CLUSTER; | ||
207 | } else if ((actual_cluster == A7_CLUSTER) && | ||
208 | (freqs_new > clk_little_max)) { | ||
209 | new_cluster = A15_CLUSTER; | ||
210 | } | ||
211 | } | ||
65 | 212 | ||
66 | cur_cluster = cpu_to_cluster(policy->cpu); | 213 | return bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new); |
214 | } | ||
67 | 215 | ||
68 | freqs.old = bL_cpufreq_get(policy->cpu); | 216 | static inline u32 get_table_count(struct cpufreq_frequency_table *table) |
217 | { | ||
218 | int count; | ||
69 | 219 | ||
70 | /* Determine valid target frequency using freq_table */ | 220 | for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++) |
71 | cpufreq_frequency_table_target(policy, freq_table[cur_cluster], | 221 | ; |
72 | target_freq, relation, &freq_tab_idx); | ||
73 | freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency; | ||
74 | 222 | ||
75 | pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n", | 223 | return count; |
76 | __func__, cpu, cur_cluster, freqs.old, target_freq, | 224 | } |
77 | freqs.new); | ||
78 | 225 | ||
79 | if (freqs.old == freqs.new) | 226 | /* get the minimum frequency in the cpufreq_frequency_table */ |
80 | return 0; | 227 | static inline u32 get_table_min(struct cpufreq_frequency_table *table) |
228 | { | ||
229 | int i; | ||
230 | uint32_t min_freq = ~0; | ||
231 | for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) | ||
232 | if (table[i].frequency < min_freq) | ||
233 | min_freq = table[i].frequency; | ||
234 | return min_freq; | ||
235 | } | ||
81 | 236 | ||
82 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 237 | /* get the maximum frequency in the cpufreq_frequency_table */ |
238 | static inline u32 get_table_max(struct cpufreq_frequency_table *table) | ||
239 | { | ||
240 | int i; | ||
241 | uint32_t max_freq = 0; | ||
242 | for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) | ||
243 | if (table[i].frequency > max_freq) | ||
244 | max_freq = table[i].frequency; | ||
245 | return max_freq; | ||
246 | } | ||
83 | 247 | ||
84 | ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000); | 248 | static int merge_cluster_tables(void) |
85 | if (ret) { | 249 | { |
86 | pr_err("clk_set_rate failed: %d\n", ret); | 250 | int i, j, k = 0, count = 1; |
87 | freqs.new = freqs.old; | 251 | struct cpufreq_frequency_table *table; |
252 | |||
253 | for (i = 0; i < MAX_CLUSTERS; i++) | ||
254 | count += get_table_count(freq_table[i]); | ||
255 | |||
256 | table = kzalloc(sizeof(*table) * count, GFP_KERNEL); | ||
257 | if (!table) | ||
258 | return -ENOMEM; | ||
259 | |||
260 | freq_table[MAX_CLUSTERS] = table; | ||
261 | |||
262 | /* Add in reverse order to get freqs in increasing order */ | ||
263 | for (i = MAX_CLUSTERS - 1; i >= 0; i--) { | ||
264 | for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END; | ||
265 | j++) { | ||
266 | table[k].frequency = VIRT_FREQ(i, | ||
267 | freq_table[i][j].frequency); | ||
268 | pr_debug("%s: index: %d, freq: %d\n", __func__, k, | ||
269 | table[k].frequency); | ||
270 | k++; | ||
271 | } | ||
88 | } | 272 | } |
89 | 273 | ||
90 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 274 | table[k].driver_data = k; |
275 | table[k].frequency = CPUFREQ_TABLE_END; | ||
91 | 276 | ||
92 | return ret; | 277 | pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k); |
278 | |||
279 | return 0; | ||
280 | } | ||
281 | |||
282 | static void _put_cluster_clk_and_freq_table(struct device *cpu_dev) | ||
283 | { | ||
284 | u32 cluster = raw_cpu_to_cluster(cpu_dev->id); | ||
285 | |||
286 | if (!freq_table[cluster]) | ||
287 | return; | ||
288 | |||
289 | clk_put(clk[cluster]); | ||
290 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); | ||
291 | dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster); | ||
93 | } | 292 | } |
94 | 293 | ||
95 | static void put_cluster_clk_and_freq_table(struct device *cpu_dev) | 294 | static void put_cluster_clk_and_freq_table(struct device *cpu_dev) |
96 | { | 295 | { |
97 | u32 cluster = cpu_to_cluster(cpu_dev->id); | 296 | u32 cluster = cpu_to_cluster(cpu_dev->id); |
297 | int i; | ||
298 | |||
299 | if (atomic_dec_return(&cluster_usage[cluster])) | ||
300 | return; | ||
301 | |||
302 | if (cluster < MAX_CLUSTERS) | ||
303 | return _put_cluster_clk_and_freq_table(cpu_dev); | ||
98 | 304 | ||
99 | if (!atomic_dec_return(&cluster_usage[cluster])) { | 305 | for_each_present_cpu(i) { |
100 | clk_put(clk[cluster]); | 306 | struct device *cdev = get_cpu_device(i); |
101 | opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); | 307 | if (!cdev) { |
102 | dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster); | 308 | pr_err("%s: failed to get cpu%d device\n", __func__, i); |
309 | return; | ||
310 | } | ||
311 | |||
312 | _put_cluster_clk_and_freq_table(cdev); | ||
103 | } | 313 | } |
314 | |||
315 | /* free virtual table */ | ||
316 | kfree(freq_table[cluster]); | ||
104 | } | 317 | } |
105 | 318 | ||
106 | static int get_cluster_clk_and_freq_table(struct device *cpu_dev) | 319 | static int _get_cluster_clk_and_freq_table(struct device *cpu_dev) |
107 | { | 320 | { |
108 | u32 cluster = cpu_to_cluster(cpu_dev->id); | 321 | u32 cluster = raw_cpu_to_cluster(cpu_dev->id); |
109 | char name[14] = "cpu-cluster."; | 322 | char name[14] = "cpu-cluster."; |
110 | int ret; | 323 | int ret; |
111 | 324 | ||
112 | if (atomic_inc_return(&cluster_usage[cluster]) != 1) | 325 | if (freq_table[cluster]) |
113 | return 0; | 326 | return 0; |
114 | 327 | ||
115 | ret = arm_bL_ops->init_opp_table(cpu_dev); | 328 | ret = arm_bL_ops->init_opp_table(cpu_dev); |
116 | if (ret) { | 329 | if (ret) { |
117 | dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n", | 330 | dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n", |
118 | __func__, cpu_dev->id, ret); | 331 | __func__, cpu_dev->id, ret); |
119 | goto atomic_dec; | 332 | goto out; |
120 | } | 333 | } |
121 | 334 | ||
122 | ret = opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); | 335 | ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); |
123 | if (ret) { | 336 | if (ret) { |
124 | dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n", | 337 | dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n", |
125 | __func__, cpu_dev->id, ret); | 338 | __func__, cpu_dev->id, ret); |
126 | goto atomic_dec; | 339 | goto out; |
127 | } | 340 | } |
128 | 341 | ||
129 | name[12] = cluster + '0'; | 342 | name[12] = cluster + '0'; |
130 | clk[cluster] = clk_get_sys(name, NULL); | 343 | clk[cluster] = clk_get(cpu_dev, name); |
131 | if (!IS_ERR(clk[cluster])) { | 344 | if (!IS_ERR(clk[cluster])) { |
132 | dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n", | 345 | dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n", |
133 | __func__, clk[cluster], freq_table[cluster], | 346 | __func__, clk[cluster], freq_table[cluster], |
@@ -138,15 +351,74 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev) | |||
138 | dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n", | 351 | dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n", |
139 | __func__, cpu_dev->id, cluster); | 352 | __func__, cpu_dev->id, cluster); |
140 | ret = PTR_ERR(clk[cluster]); | 353 | ret = PTR_ERR(clk[cluster]); |
141 | opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); | 354 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); |
142 | 355 | ||
143 | atomic_dec: | 356 | out: |
144 | atomic_dec(&cluster_usage[cluster]); | ||
145 | dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__, | 357 | dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__, |
146 | cluster); | 358 | cluster); |
147 | return ret; | 359 | return ret; |
148 | } | 360 | } |
149 | 361 | ||
362 | static int get_cluster_clk_and_freq_table(struct device *cpu_dev) | ||
363 | { | ||
364 | u32 cluster = cpu_to_cluster(cpu_dev->id); | ||
365 | int i, ret; | ||
366 | |||
367 | if (atomic_inc_return(&cluster_usage[cluster]) != 1) | ||
368 | return 0; | ||
369 | |||
370 | if (cluster < MAX_CLUSTERS) { | ||
371 | ret = _get_cluster_clk_and_freq_table(cpu_dev); | ||
372 | if (ret) | ||
373 | atomic_dec(&cluster_usage[cluster]); | ||
374 | return ret; | ||
375 | } | ||
376 | |||
377 | /* | ||
378 | * Get data for all clusters and fill virtual cluster with a merge of | ||
379 | * both | ||
380 | */ | ||
381 | for_each_present_cpu(i) { | ||
382 | struct device *cdev = get_cpu_device(i); | ||
383 | if (!cdev) { | ||
384 | pr_err("%s: failed to get cpu%d device\n", __func__, i); | ||
385 | return -ENODEV; | ||
386 | } | ||
387 | |||
388 | ret = _get_cluster_clk_and_freq_table(cdev); | ||
389 | if (ret) | ||
390 | goto put_clusters; | ||
391 | } | ||
392 | |||
393 | ret = merge_cluster_tables(); | ||
394 | if (ret) | ||
395 | goto put_clusters; | ||
396 | |||
397 | /* Assuming 2 cluster, set clk_big_min and clk_little_max */ | ||
398 | clk_big_min = get_table_min(freq_table[0]); | ||
399 | clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1])); | ||
400 | |||
401 | pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n", | ||
402 | __func__, cluster, clk_big_min, clk_little_max); | ||
403 | |||
404 | return 0; | ||
405 | |||
406 | put_clusters: | ||
407 | for_each_present_cpu(i) { | ||
408 | struct device *cdev = get_cpu_device(i); | ||
409 | if (!cdev) { | ||
410 | pr_err("%s: failed to get cpu%d device\n", __func__, i); | ||
411 | return -ENODEV; | ||
412 | } | ||
413 | |||
414 | _put_cluster_clk_and_freq_table(cdev); | ||
415 | } | ||
416 | |||
417 | atomic_dec(&cluster_usage[cluster]); | ||
418 | |||
419 | return ret; | ||
420 | } | ||
421 | |||
150 | /* Per-CPU initialization */ | 422 | /* Per-CPU initialization */ |
151 | static int bL_cpufreq_init(struct cpufreq_policy *policy) | 423 | static int bL_cpufreq_init(struct cpufreq_policy *policy) |
152 | { | 424 | { |
@@ -165,7 +437,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) | |||
165 | if (ret) | 437 | if (ret) |
166 | return ret; | 438 | return ret; |
167 | 439 | ||
168 | ret = cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]); | 440 | ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]); |
169 | if (ret) { | 441 | if (ret) { |
170 | dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n", | 442 | dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n", |
171 | policy->cpu, cur_cluster); | 443 | policy->cpu, cur_cluster); |
@@ -173,7 +445,14 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) | |||
173 | return ret; | 445 | return ret; |
174 | } | 446 | } |
175 | 447 | ||
176 | cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu); | 448 | if (cur_cluster < MAX_CLUSTERS) { |
449 | cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); | ||
450 | |||
451 | per_cpu(physical_cluster, policy->cpu) = cur_cluster; | ||
452 | } else { | ||
453 | /* Assumption: during init, we are always running on A15 */ | ||
454 | per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER; | ||
455 | } | ||
177 | 456 | ||
178 | if (arm_bL_ops->get_transition_latency) | 457 | if (arm_bL_ops->get_transition_latency) |
179 | policy->cpuinfo.transition_latency = | 458 | policy->cpuinfo.transition_latency = |
@@ -181,9 +460,8 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) | |||
181 | else | 460 | else |
182 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 461 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
183 | 462 | ||
184 | policy->cur = bL_cpufreq_get(policy->cpu); | 463 | if (is_bL_switching_enabled()) |
185 | 464 | per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); | |
186 | cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); | ||
187 | 465 | ||
188 | dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); | 466 | dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); |
189 | return 0; | 467 | return 0; |
@@ -200,33 +478,60 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy) | |||
200 | return -ENODEV; | 478 | return -ENODEV; |
201 | } | 479 | } |
202 | 480 | ||
481 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
203 | put_cluster_clk_and_freq_table(cpu_dev); | 482 | put_cluster_clk_and_freq_table(cpu_dev); |
204 | dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu); | 483 | dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu); |
205 | 484 | ||
206 | return 0; | 485 | return 0; |
207 | } | 486 | } |
208 | 487 | ||
209 | /* Export freq_table to sysfs */ | ||
210 | static struct freq_attr *bL_cpufreq_attr[] = { | ||
211 | &cpufreq_freq_attr_scaling_available_freqs, | ||
212 | NULL, | ||
213 | }; | ||
214 | |||
215 | static struct cpufreq_driver bL_cpufreq_driver = { | 488 | static struct cpufreq_driver bL_cpufreq_driver = { |
216 | .name = "arm-big-little", | 489 | .name = "arm-big-little", |
217 | .flags = CPUFREQ_STICKY, | 490 | .flags = CPUFREQ_STICKY | |
218 | .verify = bL_cpufreq_verify_policy, | 491 | CPUFREQ_HAVE_GOVERNOR_PER_POLICY, |
219 | .target = bL_cpufreq_set_target, | 492 | .verify = cpufreq_generic_frequency_table_verify, |
220 | .get = bL_cpufreq_get, | 493 | .target_index = bL_cpufreq_set_target, |
494 | .get = bL_cpufreq_get_rate, | ||
221 | .init = bL_cpufreq_init, | 495 | .init = bL_cpufreq_init, |
222 | .exit = bL_cpufreq_exit, | 496 | .exit = bL_cpufreq_exit, |
223 | .have_governor_per_policy = true, | 497 | .attr = cpufreq_generic_attr, |
224 | .attr = bL_cpufreq_attr, | 498 | }; |
499 | |||
500 | static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb, | ||
501 | unsigned long action, void *_arg) | ||
502 | { | ||
503 | pr_debug("%s: action: %ld\n", __func__, action); | ||
504 | |||
505 | switch (action) { | ||
506 | case BL_NOTIFY_PRE_ENABLE: | ||
507 | case BL_NOTIFY_PRE_DISABLE: | ||
508 | cpufreq_unregister_driver(&bL_cpufreq_driver); | ||
509 | break; | ||
510 | |||
511 | case BL_NOTIFY_POST_ENABLE: | ||
512 | set_switching_enabled(true); | ||
513 | cpufreq_register_driver(&bL_cpufreq_driver); | ||
514 | break; | ||
515 | |||
516 | case BL_NOTIFY_POST_DISABLE: | ||
517 | set_switching_enabled(false); | ||
518 | cpufreq_register_driver(&bL_cpufreq_driver); | ||
519 | break; | ||
520 | |||
521 | default: | ||
522 | return NOTIFY_DONE; | ||
523 | } | ||
524 | |||
525 | return NOTIFY_OK; | ||
526 | } | ||
527 | |||
528 | static struct notifier_block bL_switcher_notifier = { | ||
529 | .notifier_call = bL_cpufreq_switcher_notifier, | ||
225 | }; | 530 | }; |
226 | 531 | ||
227 | int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) | 532 | int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) |
228 | { | 533 | { |
229 | int ret; | 534 | int ret, i; |
230 | 535 | ||
231 | if (arm_bL_ops) { | 536 | if (arm_bL_ops) { |
232 | pr_debug("%s: Already registered: %s, exiting\n", __func__, | 537 | pr_debug("%s: Already registered: %s, exiting\n", __func__, |
@@ -241,16 +546,29 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) | |||
241 | 546 | ||
242 | arm_bL_ops = ops; | 547 | arm_bL_ops = ops; |
243 | 548 | ||
549 | ret = bL_switcher_get_enabled(); | ||
550 | set_switching_enabled(ret); | ||
551 | |||
552 | for (i = 0; i < MAX_CLUSTERS; i++) | ||
553 | mutex_init(&cluster_lock[i]); | ||
554 | |||
244 | ret = cpufreq_register_driver(&bL_cpufreq_driver); | 555 | ret = cpufreq_register_driver(&bL_cpufreq_driver); |
245 | if (ret) { | 556 | if (ret) { |
246 | pr_info("%s: Failed registering platform driver: %s, err: %d\n", | 557 | pr_info("%s: Failed registering platform driver: %s, err: %d\n", |
247 | __func__, ops->name, ret); | 558 | __func__, ops->name, ret); |
248 | arm_bL_ops = NULL; | 559 | arm_bL_ops = NULL; |
249 | } else { | 560 | } else { |
250 | pr_info("%s: Registered platform driver: %s\n", __func__, | 561 | ret = bL_switcher_register_notifier(&bL_switcher_notifier); |
251 | ops->name); | 562 | if (ret) { |
563 | cpufreq_unregister_driver(&bL_cpufreq_driver); | ||
564 | arm_bL_ops = NULL; | ||
565 | } else { | ||
566 | pr_info("%s: Registered platform driver: %s\n", | ||
567 | __func__, ops->name); | ||
568 | } | ||
252 | } | 569 | } |
253 | 570 | ||
571 | bL_switcher_put_enabled(); | ||
254 | return ret; | 572 | return ret; |
255 | } | 573 | } |
256 | EXPORT_SYMBOL_GPL(bL_cpufreq_register); | 574 | EXPORT_SYMBOL_GPL(bL_cpufreq_register); |
@@ -263,7 +581,10 @@ void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops) | |||
263 | return; | 581 | return; |
264 | } | 582 | } |
265 | 583 | ||
584 | bL_switcher_get_enabled(); | ||
585 | bL_switcher_unregister_notifier(&bL_switcher_notifier); | ||
266 | cpufreq_unregister_driver(&bL_cpufreq_driver); | 586 | cpufreq_unregister_driver(&bL_cpufreq_driver); |
587 | bL_switcher_put_enabled(); | ||
267 | pr_info("%s: Un-registered platform driver: %s\n", __func__, | 588 | pr_info("%s: Un-registered platform driver: %s\n", __func__, |
268 | arm_bL_ops->name); | 589 | arm_bL_ops->name); |
269 | arm_bL_ops = NULL; | 590 | arm_bL_ops = NULL; |
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h index 79b2ce17884d..70f18fc12d4a 100644 --- a/drivers/cpufreq/arm_big_little.h +++ b/drivers/cpufreq/arm_big_little.h | |||
@@ -34,11 +34,6 @@ struct cpufreq_arm_bL_ops { | |||
34 | int (*init_opp_table)(struct device *cpu_dev); | 34 | int (*init_opp_table)(struct device *cpu_dev); |
35 | }; | 35 | }; |
36 | 36 | ||
37 | static inline int cpu_to_cluster(int cpu) | ||
38 | { | ||
39 | return topology_physical_package_id(cpu); | ||
40 | } | ||
41 | |||
42 | int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops); | 37 | int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops); |
43 | void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops); | 38 | void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops); |
44 | 39 | ||
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c index 480c0bd0468d..8d9d59108906 100644 --- a/drivers/cpufreq/arm_big_little_dt.c +++ b/drivers/cpufreq/arm_big_little_dt.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <linux/export.h> | 24 | #include <linux/export.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/of_device.h> | 26 | #include <linux/of_device.h> |
27 | #include <linux/opp.h> | 27 | #include <linux/pm_opp.h> |
28 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/types.h> | 30 | #include <linux/types.h> |
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c index e0c38d938997..856ad80418ae 100644 --- a/drivers/cpufreq/at32ap-cpufreq.c +++ b/drivers/cpufreq/at32ap-cpufreq.c | |||
@@ -19,18 +19,10 @@ | |||
19 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
20 | #include <linux/err.h> | 20 | #include <linux/err.h> |
21 | #include <linux/export.h> | 21 | #include <linux/export.h> |
22 | #include <linux/slab.h> | ||
22 | 23 | ||
23 | static struct clk *cpuclk; | 24 | static struct clk *cpuclk; |
24 | 25 | static struct cpufreq_frequency_table *freq_table; | |
25 | static int at32_verify_speed(struct cpufreq_policy *policy) | ||
26 | { | ||
27 | if (policy->cpu != 0) | ||
28 | return -EINVAL; | ||
29 | |||
30 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | ||
31 | policy->cpuinfo.max_freq); | ||
32 | return 0; | ||
33 | } | ||
34 | 26 | ||
35 | static unsigned int at32_get_speed(unsigned int cpu) | 27 | static unsigned int at32_get_speed(unsigned int cpu) |
36 | { | 28 | { |
@@ -43,74 +35,94 @@ static unsigned int at32_get_speed(unsigned int cpu) | |||
43 | static unsigned int ref_freq; | 35 | static unsigned int ref_freq; |
44 | static unsigned long loops_per_jiffy_ref; | 36 | static unsigned long loops_per_jiffy_ref; |
45 | 37 | ||
46 | static int at32_set_target(struct cpufreq_policy *policy, | 38 | static int at32_set_target(struct cpufreq_policy *policy, unsigned int index) |
47 | unsigned int target_freq, | ||
48 | unsigned int relation) | ||
49 | { | 39 | { |
50 | struct cpufreq_freqs freqs; | 40 | unsigned int old_freq, new_freq; |
51 | long freq; | ||
52 | |||
53 | /* Convert target_freq from kHz to Hz */ | ||
54 | freq = clk_round_rate(cpuclk, target_freq * 1000); | ||
55 | |||
56 | /* Check if policy->min <= new_freq <= policy->max */ | ||
57 | if(freq < (policy->min * 1000) || freq > (policy->max * 1000)) | ||
58 | return -EINVAL; | ||
59 | |||
60 | pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); | ||
61 | 41 | ||
62 | freqs.old = at32_get_speed(0); | 42 | old_freq = at32_get_speed(0); |
63 | freqs.new = (freq + 500) / 1000; | 43 | new_freq = freq_table[index].frequency; |
64 | freqs.flags = 0; | ||
65 | 44 | ||
66 | if (!ref_freq) { | 45 | if (!ref_freq) { |
67 | ref_freq = freqs.old; | 46 | ref_freq = old_freq; |
68 | loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy; | 47 | loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy; |
69 | } | 48 | } |
70 | 49 | ||
71 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 50 | if (old_freq < new_freq) |
72 | if (freqs.old < freqs.new) | ||
73 | boot_cpu_data.loops_per_jiffy = cpufreq_scale( | 51 | boot_cpu_data.loops_per_jiffy = cpufreq_scale( |
74 | loops_per_jiffy_ref, ref_freq, freqs.new); | 52 | loops_per_jiffy_ref, ref_freq, new_freq); |
75 | clk_set_rate(cpuclk, freq); | 53 | clk_set_rate(cpuclk, new_freq * 1000); |
76 | if (freqs.new < freqs.old) | 54 | if (new_freq < old_freq) |
77 | boot_cpu_data.loops_per_jiffy = cpufreq_scale( | 55 | boot_cpu_data.loops_per_jiffy = cpufreq_scale( |
78 | loops_per_jiffy_ref, ref_freq, freqs.new); | 56 | loops_per_jiffy_ref, ref_freq, new_freq); |
79 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
80 | |||
81 | pr_debug("cpufreq: set frequency %lu Hz\n", freq); | ||
82 | 57 | ||
83 | return 0; | 58 | return 0; |
84 | } | 59 | } |
85 | 60 | ||
86 | static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy) | 61 | static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy) |
87 | { | 62 | { |
63 | unsigned int frequency, rate, min_freq; | ||
64 | int retval, steps, i; | ||
65 | |||
88 | if (policy->cpu != 0) | 66 | if (policy->cpu != 0) |
89 | return -EINVAL; | 67 | return -EINVAL; |
90 | 68 | ||
91 | cpuclk = clk_get(NULL, "cpu"); | 69 | cpuclk = clk_get(NULL, "cpu"); |
92 | if (IS_ERR(cpuclk)) { | 70 | if (IS_ERR(cpuclk)) { |
93 | pr_debug("cpufreq: could not get CPU clk\n"); | 71 | pr_debug("cpufreq: could not get CPU clk\n"); |
94 | return PTR_ERR(cpuclk); | 72 | retval = PTR_ERR(cpuclk); |
73 | goto out_err; | ||
95 | } | 74 | } |
96 | 75 | ||
97 | policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000; | 76 | min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000; |
98 | policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; | 77 | frequency = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; |
99 | policy->cpuinfo.transition_latency = 0; | 78 | policy->cpuinfo.transition_latency = 0; |
100 | policy->cur = at32_get_speed(0); | ||
101 | policy->min = policy->cpuinfo.min_freq; | ||
102 | policy->max = policy->cpuinfo.max_freq; | ||
103 | 79 | ||
104 | printk("cpufreq: AT32AP CPU frequency driver\n"); | 80 | /* |
81 | * AVR32 CPU frequency rate scales in power of two between maximum and | ||
82 | * minimum, also add space for the table end marker. | ||
83 | * | ||
84 | * Further validate that the frequency is usable, and append it to the | ||
85 | * frequency table. | ||
86 | */ | ||
87 | steps = fls(frequency / min_freq) + 1; | ||
88 | freq_table = kzalloc(steps * sizeof(struct cpufreq_frequency_table), | ||
89 | GFP_KERNEL); | ||
90 | if (!freq_table) { | ||
91 | retval = -ENOMEM; | ||
92 | goto out_err_put_clk; | ||
93 | } | ||
105 | 94 | ||
106 | return 0; | 95 | for (i = 0; i < (steps - 1); i++) { |
96 | rate = clk_round_rate(cpuclk, frequency * 1000) / 1000; | ||
97 | |||
98 | if (rate != frequency) | ||
99 | freq_table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
100 | else | ||
101 | freq_table[i].frequency = frequency; | ||
102 | |||
103 | frequency /= 2; | ||
104 | } | ||
105 | |||
106 | freq_table[steps - 1].frequency = CPUFREQ_TABLE_END; | ||
107 | |||
108 | retval = cpufreq_table_validate_and_show(policy, freq_table); | ||
109 | if (!retval) { | ||
110 | printk("cpufreq: AT32AP CPU frequency driver\n"); | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | kfree(freq_table); | ||
115 | out_err_put_clk: | ||
116 | clk_put(cpuclk); | ||
117 | out_err: | ||
118 | return retval; | ||
107 | } | 119 | } |
108 | 120 | ||
109 | static struct cpufreq_driver at32_driver = { | 121 | static struct cpufreq_driver at32_driver = { |
110 | .name = "at32ap", | 122 | .name = "at32ap", |
111 | .init = at32_cpufreq_driver_init, | 123 | .init = at32_cpufreq_driver_init, |
112 | .verify = at32_verify_speed, | 124 | .verify = cpufreq_generic_frequency_table_verify, |
113 | .target = at32_set_target, | 125 | .target_index = at32_set_target, |
114 | .get = at32_get_speed, | 126 | .get = at32_get_speed, |
115 | .flags = CPUFREQ_STICKY, | 127 | .flags = CPUFREQ_STICKY, |
116 | }; | 128 | }; |
diff --git a/drivers/cpufreq/blackfin-cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c index ef05978a7237..e9e63fc9c2c9 100644 --- a/drivers/cpufreq/blackfin-cpufreq.c +++ b/drivers/cpufreq/blackfin-cpufreq.c | |||
@@ -127,41 +127,28 @@ unsigned long cpu_set_cclk(int cpu, unsigned long new) | |||
127 | } | 127 | } |
128 | #endif | 128 | #endif |
129 | 129 | ||
130 | static int bfin_target(struct cpufreq_policy *policy, | 130 | static int bfin_target(struct cpufreq_policy *policy, unsigned int index) |
131 | unsigned int target_freq, unsigned int relation) | ||
132 | { | 131 | { |
133 | #ifndef CONFIG_BF60x | 132 | #ifndef CONFIG_BF60x |
134 | unsigned int plldiv; | 133 | unsigned int plldiv; |
135 | #endif | 134 | #endif |
136 | unsigned int index; | ||
137 | unsigned long cclk_hz; | ||
138 | struct cpufreq_freqs freqs; | ||
139 | static unsigned long lpj_ref; | 135 | static unsigned long lpj_ref; |
140 | static unsigned int lpj_ref_freq; | 136 | static unsigned int lpj_ref_freq; |
137 | unsigned int old_freq, new_freq; | ||
141 | int ret = 0; | 138 | int ret = 0; |
142 | 139 | ||
143 | #if defined(CONFIG_CYCLES_CLOCKSOURCE) | 140 | #if defined(CONFIG_CYCLES_CLOCKSOURCE) |
144 | cycles_t cycles; | 141 | cycles_t cycles; |
145 | #endif | 142 | #endif |
146 | 143 | ||
147 | if (cpufreq_frequency_table_target(policy, bfin_freq_table, target_freq, | 144 | old_freq = bfin_getfreq_khz(0); |
148 | relation, &index)) | 145 | new_freq = bfin_freq_table[index].frequency; |
149 | return -EINVAL; | ||
150 | 146 | ||
151 | cclk_hz = bfin_freq_table[index].frequency; | ||
152 | |||
153 | freqs.old = bfin_getfreq_khz(0); | ||
154 | freqs.new = cclk_hz; | ||
155 | |||
156 | pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n", | ||
157 | cclk_hz, target_freq, freqs.old); | ||
158 | |||
159 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
160 | #ifndef CONFIG_BF60x | 147 | #ifndef CONFIG_BF60x |
161 | plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel; | 148 | plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel; |
162 | bfin_write_PLL_DIV(plldiv); | 149 | bfin_write_PLL_DIV(plldiv); |
163 | #else | 150 | #else |
164 | ret = cpu_set_cclk(policy->cpu, freqs.new * 1000); | 151 | ret = cpu_set_cclk(policy->cpu, new_freq * 1000); |
165 | if (ret != 0) { | 152 | if (ret != 0) { |
166 | WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret); | 153 | WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret); |
167 | return ret; | 154 | return ret; |
@@ -177,25 +164,16 @@ static int bfin_target(struct cpufreq_policy *policy, | |||
177 | #endif | 164 | #endif |
178 | if (!lpj_ref_freq) { | 165 | if (!lpj_ref_freq) { |
179 | lpj_ref = loops_per_jiffy; | 166 | lpj_ref = loops_per_jiffy; |
180 | lpj_ref_freq = freqs.old; | 167 | lpj_ref_freq = old_freq; |
181 | } | 168 | } |
182 | if (freqs.new != freqs.old) { | 169 | if (new_freq != old_freq) { |
183 | loops_per_jiffy = cpufreq_scale(lpj_ref, | 170 | loops_per_jiffy = cpufreq_scale(lpj_ref, |
184 | lpj_ref_freq, freqs.new); | 171 | lpj_ref_freq, new_freq); |
185 | } | 172 | } |
186 | 173 | ||
187 | /* TODO: just test case for cycles clock source, remove later */ | ||
188 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
189 | |||
190 | pr_debug("cpufreq: done\n"); | ||
191 | return ret; | 174 | return ret; |
192 | } | 175 | } |
193 | 176 | ||
194 | static int bfin_verify_speed(struct cpufreq_policy *policy) | ||
195 | { | ||
196 | return cpufreq_frequency_table_verify(policy, bfin_freq_table); | ||
197 | } | ||
198 | |||
199 | static int __bfin_cpu_init(struct cpufreq_policy *policy) | 177 | static int __bfin_cpu_init(struct cpufreq_policy *policy) |
200 | { | 178 | { |
201 | 179 | ||
@@ -209,23 +187,17 @@ static int __bfin_cpu_init(struct cpufreq_policy *policy) | |||
209 | 187 | ||
210 | policy->cpuinfo.transition_latency = 50000; /* 50us assumed */ | 188 | policy->cpuinfo.transition_latency = 50000; /* 50us assumed */ |
211 | 189 | ||
212 | policy->cur = cclk; | 190 | return cpufreq_table_validate_and_show(policy, bfin_freq_table); |
213 | cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu); | ||
214 | return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table); | ||
215 | } | 191 | } |
216 | 192 | ||
217 | static struct freq_attr *bfin_freq_attr[] = { | ||
218 | &cpufreq_freq_attr_scaling_available_freqs, | ||
219 | NULL, | ||
220 | }; | ||
221 | |||
222 | static struct cpufreq_driver bfin_driver = { | 193 | static struct cpufreq_driver bfin_driver = { |
223 | .verify = bfin_verify_speed, | 194 | .verify = cpufreq_generic_frequency_table_verify, |
224 | .target = bfin_target, | 195 | .target_index = bfin_target, |
225 | .get = bfin_getfreq_khz, | 196 | .get = bfin_getfreq_khz, |
226 | .init = __bfin_cpu_init, | 197 | .init = __bfin_cpu_init, |
198 | .exit = cpufreq_generic_exit, | ||
227 | .name = "bfin cpufreq", | 199 | .name = "bfin cpufreq", |
228 | .attr = bfin_freq_attr, | 200 | .attr = cpufreq_generic_attr, |
229 | }; | 201 | }; |
230 | 202 | ||
231 | static int __init bfin_cpu_init(void) | 203 | static int __init bfin_cpu_init(void) |
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index c522a95c0e16..d4585ce2346c 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/of.h> | 19 | #include <linux/of.h> |
20 | #include <linux/opp.h> | 20 | #include <linux/pm_opp.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/regulator/consumer.h> | 22 | #include <linux/regulator/consumer.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
@@ -30,73 +30,51 @@ static struct clk *cpu_clk; | |||
30 | static struct regulator *cpu_reg; | 30 | static struct regulator *cpu_reg; |
31 | static struct cpufreq_frequency_table *freq_table; | 31 | static struct cpufreq_frequency_table *freq_table; |
32 | 32 | ||
33 | static int cpu0_verify_speed(struct cpufreq_policy *policy) | ||
34 | { | ||
35 | return cpufreq_frequency_table_verify(policy, freq_table); | ||
36 | } | ||
37 | |||
38 | static unsigned int cpu0_get_speed(unsigned int cpu) | 33 | static unsigned int cpu0_get_speed(unsigned int cpu) |
39 | { | 34 | { |
40 | return clk_get_rate(cpu_clk) / 1000; | 35 | return clk_get_rate(cpu_clk) / 1000; |
41 | } | 36 | } |
42 | 37 | ||
43 | static int cpu0_set_target(struct cpufreq_policy *policy, | 38 | static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index) |
44 | unsigned int target_freq, unsigned int relation) | ||
45 | { | 39 | { |
46 | struct cpufreq_freqs freqs; | 40 | struct dev_pm_opp *opp; |
47 | struct opp *opp; | ||
48 | unsigned long volt = 0, volt_old = 0, tol = 0; | 41 | unsigned long volt = 0, volt_old = 0, tol = 0; |
42 | unsigned int old_freq, new_freq; | ||
49 | long freq_Hz, freq_exact; | 43 | long freq_Hz, freq_exact; |
50 | unsigned int index; | ||
51 | int ret; | 44 | int ret; |
52 | 45 | ||
53 | ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, | ||
54 | relation, &index); | ||
55 | if (ret) { | ||
56 | pr_err("failed to match target freqency %d: %d\n", | ||
57 | target_freq, ret); | ||
58 | return ret; | ||
59 | } | ||
60 | |||
61 | freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); | 46 | freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); |
62 | if (freq_Hz < 0) | 47 | if (freq_Hz < 0) |
63 | freq_Hz = freq_table[index].frequency * 1000; | 48 | freq_Hz = freq_table[index].frequency * 1000; |
64 | freq_exact = freq_Hz; | ||
65 | freqs.new = freq_Hz / 1000; | ||
66 | freqs.old = clk_get_rate(cpu_clk) / 1000; | ||
67 | 49 | ||
68 | if (freqs.old == freqs.new) | 50 | freq_exact = freq_Hz; |
69 | return 0; | 51 | new_freq = freq_Hz / 1000; |
70 | 52 | old_freq = clk_get_rate(cpu_clk) / 1000; | |
71 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
72 | 53 | ||
73 | if (!IS_ERR(cpu_reg)) { | 54 | if (!IS_ERR(cpu_reg)) { |
74 | rcu_read_lock(); | 55 | rcu_read_lock(); |
75 | opp = opp_find_freq_ceil(cpu_dev, &freq_Hz); | 56 | opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz); |
76 | if (IS_ERR(opp)) { | 57 | if (IS_ERR(opp)) { |
77 | rcu_read_unlock(); | 58 | rcu_read_unlock(); |
78 | pr_err("failed to find OPP for %ld\n", freq_Hz); | 59 | pr_err("failed to find OPP for %ld\n", freq_Hz); |
79 | freqs.new = freqs.old; | 60 | return PTR_ERR(opp); |
80 | ret = PTR_ERR(opp); | ||
81 | goto post_notify; | ||
82 | } | 61 | } |
83 | volt = opp_get_voltage(opp); | 62 | volt = dev_pm_opp_get_voltage(opp); |
84 | rcu_read_unlock(); | 63 | rcu_read_unlock(); |
85 | tol = volt * voltage_tolerance / 100; | 64 | tol = volt * voltage_tolerance / 100; |
86 | volt_old = regulator_get_voltage(cpu_reg); | 65 | volt_old = regulator_get_voltage(cpu_reg); |
87 | } | 66 | } |
88 | 67 | ||
89 | pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n", | 68 | pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n", |
90 | freqs.old / 1000, volt_old ? volt_old / 1000 : -1, | 69 | old_freq / 1000, volt_old ? volt_old / 1000 : -1, |
91 | freqs.new / 1000, volt ? volt / 1000 : -1); | 70 | new_freq / 1000, volt ? volt / 1000 : -1); |
92 | 71 | ||
93 | /* scaling up? scale voltage before frequency */ | 72 | /* scaling up? scale voltage before frequency */ |
94 | if (!IS_ERR(cpu_reg) && freqs.new > freqs.old) { | 73 | if (!IS_ERR(cpu_reg) && new_freq > old_freq) { |
95 | ret = regulator_set_voltage_tol(cpu_reg, volt, tol); | 74 | ret = regulator_set_voltage_tol(cpu_reg, volt, tol); |
96 | if (ret) { | 75 | if (ret) { |
97 | pr_err("failed to scale voltage up: %d\n", ret); | 76 | pr_err("failed to scale voltage up: %d\n", ret); |
98 | freqs.new = freqs.old; | 77 | return ret; |
99 | goto post_notify; | ||
100 | } | 78 | } |
101 | } | 79 | } |
102 | 80 | ||
@@ -105,72 +83,35 @@ static int cpu0_set_target(struct cpufreq_policy *policy, | |||
105 | pr_err("failed to set clock rate: %d\n", ret); | 83 | pr_err("failed to set clock rate: %d\n", ret); |
106 | if (!IS_ERR(cpu_reg)) | 84 | if (!IS_ERR(cpu_reg)) |
107 | regulator_set_voltage_tol(cpu_reg, volt_old, tol); | 85 | regulator_set_voltage_tol(cpu_reg, volt_old, tol); |
108 | freqs.new = freqs.old; | 86 | return ret; |
109 | goto post_notify; | ||
110 | } | 87 | } |
111 | 88 | ||
112 | /* scaling down? scale voltage after frequency */ | 89 | /* scaling down? scale voltage after frequency */ |
113 | if (!IS_ERR(cpu_reg) && freqs.new < freqs.old) { | 90 | if (!IS_ERR(cpu_reg) && new_freq < old_freq) { |
114 | ret = regulator_set_voltage_tol(cpu_reg, volt, tol); | 91 | ret = regulator_set_voltage_tol(cpu_reg, volt, tol); |
115 | if (ret) { | 92 | if (ret) { |
116 | pr_err("failed to scale voltage down: %d\n", ret); | 93 | pr_err("failed to scale voltage down: %d\n", ret); |
117 | clk_set_rate(cpu_clk, freqs.old * 1000); | 94 | clk_set_rate(cpu_clk, old_freq * 1000); |
118 | freqs.new = freqs.old; | ||
119 | } | 95 | } |
120 | } | 96 | } |
121 | 97 | ||
122 | post_notify: | ||
123 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
124 | |||
125 | return ret; | 98 | return ret; |
126 | } | 99 | } |
127 | 100 | ||
128 | static int cpu0_cpufreq_init(struct cpufreq_policy *policy) | 101 | static int cpu0_cpufreq_init(struct cpufreq_policy *policy) |
129 | { | 102 | { |
130 | int ret; | 103 | return cpufreq_generic_init(policy, freq_table, transition_latency); |
131 | |||
132 | ret = cpufreq_frequency_table_cpuinfo(policy, freq_table); | ||
133 | if (ret) { | ||
134 | pr_err("invalid frequency table: %d\n", ret); | ||
135 | return ret; | ||
136 | } | ||
137 | |||
138 | policy->cpuinfo.transition_latency = transition_latency; | ||
139 | policy->cur = clk_get_rate(cpu_clk) / 1000; | ||
140 | |||
141 | /* | ||
142 | * The driver only supports the SMP configuartion where all processors | ||
143 | * share the clock and voltage and clock. Use cpufreq affected_cpus | ||
144 | * interface to have all CPUs scaled together. | ||
145 | */ | ||
146 | cpumask_setall(policy->cpus); | ||
147 | |||
148 | cpufreq_frequency_table_get_attr(freq_table, policy->cpu); | ||
149 | |||
150 | return 0; | ||
151 | } | 104 | } |
152 | 105 | ||
153 | static int cpu0_cpufreq_exit(struct cpufreq_policy *policy) | ||
154 | { | ||
155 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
156 | |||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | static struct freq_attr *cpu0_cpufreq_attr[] = { | ||
161 | &cpufreq_freq_attr_scaling_available_freqs, | ||
162 | NULL, | ||
163 | }; | ||
164 | |||
165 | static struct cpufreq_driver cpu0_cpufreq_driver = { | 106 | static struct cpufreq_driver cpu0_cpufreq_driver = { |
166 | .flags = CPUFREQ_STICKY, | 107 | .flags = CPUFREQ_STICKY, |
167 | .verify = cpu0_verify_speed, | 108 | .verify = cpufreq_generic_frequency_table_verify, |
168 | .target = cpu0_set_target, | 109 | .target_index = cpu0_set_target, |
169 | .get = cpu0_get_speed, | 110 | .get = cpu0_get_speed, |
170 | .init = cpu0_cpufreq_init, | 111 | .init = cpu0_cpufreq_init, |
171 | .exit = cpu0_cpufreq_exit, | 112 | .exit = cpufreq_generic_exit, |
172 | .name = "generic_cpu0", | 113 | .name = "generic_cpu0", |
173 | .attr = cpu0_cpufreq_attr, | 114 | .attr = cpufreq_generic_attr, |
174 | }; | 115 | }; |
175 | 116 | ||
176 | static int cpu0_cpufreq_probe(struct platform_device *pdev) | 117 | static int cpu0_cpufreq_probe(struct platform_device *pdev) |
@@ -218,7 +159,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) | |||
218 | goto out_put_node; | 159 | goto out_put_node; |
219 | } | 160 | } |
220 | 161 | ||
221 | ret = opp_init_cpufreq_table(cpu_dev, &freq_table); | 162 | ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); |
222 | if (ret) { | 163 | if (ret) { |
223 | pr_err("failed to init cpufreq table: %d\n", ret); | 164 | pr_err("failed to init cpufreq table: %d\n", ret); |
224 | goto out_put_node; | 165 | goto out_put_node; |
@@ -230,7 +171,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) | |||
230 | transition_latency = CPUFREQ_ETERNAL; | 171 | transition_latency = CPUFREQ_ETERNAL; |
231 | 172 | ||
232 | if (!IS_ERR(cpu_reg)) { | 173 | if (!IS_ERR(cpu_reg)) { |
233 | struct opp *opp; | 174 | struct dev_pm_opp *opp; |
234 | unsigned long min_uV, max_uV; | 175 | unsigned long min_uV, max_uV; |
235 | int i; | 176 | int i; |
236 | 177 | ||
@@ -242,12 +183,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) | |||
242 | for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) | 183 | for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) |
243 | ; | 184 | ; |
244 | rcu_read_lock(); | 185 | rcu_read_lock(); |
245 | opp = opp_find_freq_exact(cpu_dev, | 186 | opp = dev_pm_opp_find_freq_exact(cpu_dev, |
246 | freq_table[0].frequency * 1000, true); | 187 | freq_table[0].frequency * 1000, true); |
247 | min_uV = opp_get_voltage(opp); | 188 | min_uV = dev_pm_opp_get_voltage(opp); |
248 | opp = opp_find_freq_exact(cpu_dev, | 189 | opp = dev_pm_opp_find_freq_exact(cpu_dev, |
249 | freq_table[i-1].frequency * 1000, true); | 190 | freq_table[i-1].frequency * 1000, true); |
250 | max_uV = opp_get_voltage(opp); | 191 | max_uV = dev_pm_opp_get_voltage(opp); |
251 | rcu_read_unlock(); | 192 | rcu_read_unlock(); |
252 | ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); | 193 | ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); |
253 | if (ret > 0) | 194 | if (ret > 0) |
@@ -264,7 +205,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) | |||
264 | return 0; | 205 | return 0; |
265 | 206 | ||
266 | out_free_table: | 207 | out_free_table: |
267 | opp_free_cpufreq_table(cpu_dev, &freq_table); | 208 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); |
268 | out_put_node: | 209 | out_put_node: |
269 | of_node_put(np); | 210 | of_node_put(np); |
270 | return ret; | 211 | return ret; |
@@ -273,7 +214,7 @@ out_put_node: | |||
273 | static int cpu0_cpufreq_remove(struct platform_device *pdev) | 214 | static int cpu0_cpufreq_remove(struct platform_device *pdev) |
274 | { | 215 | { |
275 | cpufreq_unregister_driver(&cpu0_cpufreq_driver); | 216 | cpufreq_unregister_driver(&cpu0_cpufreq_driver); |
276 | opp_free_cpufreq_table(cpu_dev, &freq_table); | 217 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); |
277 | 218 | ||
278 | return 0; | 219 | return 0; |
279 | } | 220 | } |
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index b83d45f68574..a05b876f375e 100644 --- a/drivers/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c | |||
@@ -303,9 +303,7 @@ static int nforce2_verify(struct cpufreq_policy *policy) | |||
303 | if (policy->min < (fsb_pol_max * fid * 100)) | 303 | if (policy->min < (fsb_pol_max * fid * 100)) |
304 | policy->max = (fsb_pol_max + 1) * fid * 100; | 304 | policy->max = (fsb_pol_max + 1) * fid * 100; |
305 | 305 | ||
306 | cpufreq_verify_within_limits(policy, | 306 | cpufreq_verify_within_cpu_limits(policy); |
307 | policy->cpuinfo.min_freq, | ||
308 | policy->cpuinfo.max_freq); | ||
309 | return 0; | 307 | return 0; |
310 | } | 308 | } |
311 | 309 | ||
@@ -362,7 +360,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy) | |||
362 | policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100; | 360 | policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100; |
363 | policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100; | 361 | policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100; |
364 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 362 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
365 | policy->cur = nforce2_get(policy->cpu); | ||
366 | 363 | ||
367 | return 0; | 364 | return 0; |
368 | } | 365 | } |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 04548f7023af..02d534da22dd 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -47,49 +47,11 @@ static LIST_HEAD(cpufreq_policy_list); | |||
47 | static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); | 47 | static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | /* | 50 | static inline bool has_target(void) |
51 | * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure | 51 | { |
52 | * all cpufreq/hotplug/workqueue/etc related lock issues. | 52 | return cpufreq_driver->target_index || cpufreq_driver->target; |
53 | * | ||
54 | * The rules for this semaphore: | ||
55 | * - Any routine that wants to read from the policy structure will | ||
56 | * do a down_read on this semaphore. | ||
57 | * - Any routine that will write to the policy structure and/or may take away | ||
58 | * the policy altogether (eg. CPU hotplug), will hold this lock in write | ||
59 | * mode before doing so. | ||
60 | * | ||
61 | * Additional rules: | ||
62 | * - Governor routines that can be called in cpufreq hotplug path should not | ||
63 | * take this sem as top level hotplug notifier handler takes this. | ||
64 | * - Lock should not be held across | ||
65 | * __cpufreq_governor(data, CPUFREQ_GOV_STOP); | ||
66 | */ | ||
67 | static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); | ||
68 | |||
69 | #define lock_policy_rwsem(mode, cpu) \ | ||
70 | static int lock_policy_rwsem_##mode(int cpu) \ | ||
71 | { \ | ||
72 | struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \ | ||
73 | BUG_ON(!policy); \ | ||
74 | down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \ | ||
75 | \ | ||
76 | return 0; \ | ||
77 | } | ||
78 | |||
79 | lock_policy_rwsem(read, cpu); | ||
80 | lock_policy_rwsem(write, cpu); | ||
81 | |||
82 | #define unlock_policy_rwsem(mode, cpu) \ | ||
83 | static void unlock_policy_rwsem_##mode(int cpu) \ | ||
84 | { \ | ||
85 | struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \ | ||
86 | BUG_ON(!policy); \ | ||
87 | up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \ | ||
88 | } | 53 | } |
89 | 54 | ||
90 | unlock_policy_rwsem(read, cpu); | ||
91 | unlock_policy_rwsem(write, cpu); | ||
92 | |||
93 | /* | 55 | /* |
94 | * rwsem to guarantee that cpufreq driver module doesn't unload during critical | 56 | * rwsem to guarantee that cpufreq driver module doesn't unload during critical |
95 | * sections | 57 | * sections |
@@ -135,7 +97,7 @@ static DEFINE_MUTEX(cpufreq_governor_mutex); | |||
135 | 97 | ||
136 | bool have_governor_per_policy(void) | 98 | bool have_governor_per_policy(void) |
137 | { | 99 | { |
138 | return cpufreq_driver->have_governor_per_policy; | 100 | return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY); |
139 | } | 101 | } |
140 | EXPORT_SYMBOL_GPL(have_governor_per_policy); | 102 | EXPORT_SYMBOL_GPL(have_governor_per_policy); |
141 | 103 | ||
@@ -183,6 +145,37 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) | |||
183 | } | 145 | } |
184 | EXPORT_SYMBOL_GPL(get_cpu_idle_time); | 146 | EXPORT_SYMBOL_GPL(get_cpu_idle_time); |
185 | 147 | ||
148 | /* | ||
149 | * This is a generic cpufreq init() routine which can be used by cpufreq | ||
150 | * drivers of SMP systems. It will do following: | ||
151 | * - validate & show freq table passed | ||
152 | * - set policies transition latency | ||
153 | * - policy->cpus with all possible CPUs | ||
154 | */ | ||
155 | int cpufreq_generic_init(struct cpufreq_policy *policy, | ||
156 | struct cpufreq_frequency_table *table, | ||
157 | unsigned int transition_latency) | ||
158 | { | ||
159 | int ret; | ||
160 | |||
161 | ret = cpufreq_table_validate_and_show(policy, table); | ||
162 | if (ret) { | ||
163 | pr_err("%s: invalid frequency table: %d\n", __func__, ret); | ||
164 | return ret; | ||
165 | } | ||
166 | |||
167 | policy->cpuinfo.transition_latency = transition_latency; | ||
168 | |||
169 | /* | ||
170 | * The driver only supports the SMP configuartion where all processors | ||
171 | * share the clock and voltage and clock. | ||
172 | */ | ||
173 | cpumask_setall(policy->cpus); | ||
174 | |||
175 | return 0; | ||
176 | } | ||
177 | EXPORT_SYMBOL_GPL(cpufreq_generic_init); | ||
178 | |||
186 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) | 179 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) |
187 | { | 180 | { |
188 | struct cpufreq_policy *policy = NULL; | 181 | struct cpufreq_policy *policy = NULL; |
@@ -363,7 +356,7 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, | |||
363 | *policy = CPUFREQ_POLICY_POWERSAVE; | 356 | *policy = CPUFREQ_POLICY_POWERSAVE; |
364 | err = 0; | 357 | err = 0; |
365 | } | 358 | } |
366 | } else if (cpufreq_driver->target) { | 359 | } else if (has_target()) { |
367 | struct cpufreq_governor *t; | 360 | struct cpufreq_governor *t; |
368 | 361 | ||
369 | mutex_lock(&cpufreq_governor_mutex); | 362 | mutex_lock(&cpufreq_governor_mutex); |
@@ -414,7 +407,7 @@ show_one(scaling_min_freq, min); | |||
414 | show_one(scaling_max_freq, max); | 407 | show_one(scaling_max_freq, max); |
415 | show_one(scaling_cur_freq, cur); | 408 | show_one(scaling_cur_freq, cur); |
416 | 409 | ||
417 | static int __cpufreq_set_policy(struct cpufreq_policy *policy, | 410 | static int cpufreq_set_policy(struct cpufreq_policy *policy, |
418 | struct cpufreq_policy *new_policy); | 411 | struct cpufreq_policy *new_policy); |
419 | 412 | ||
420 | /** | 413 | /** |
@@ -435,7 +428,7 @@ static ssize_t store_##file_name \ | |||
435 | if (ret != 1) \ | 428 | if (ret != 1) \ |
436 | return -EINVAL; \ | 429 | return -EINVAL; \ |
437 | \ | 430 | \ |
438 | ret = __cpufreq_set_policy(policy, &new_policy); \ | 431 | ret = cpufreq_set_policy(policy, &new_policy); \ |
439 | policy->user_policy.object = policy->object; \ | 432 | policy->user_policy.object = policy->object; \ |
440 | \ | 433 | \ |
441 | return ret ? ret : count; \ | 434 | return ret ? ret : count; \ |
@@ -493,11 +486,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, | |||
493 | &new_policy.governor)) | 486 | &new_policy.governor)) |
494 | return -EINVAL; | 487 | return -EINVAL; |
495 | 488 | ||
496 | /* | 489 | ret = cpufreq_set_policy(policy, &new_policy); |
497 | * Do not use cpufreq_set_policy here or the user_policy.max | ||
498 | * will be wrongly overridden | ||
499 | */ | ||
500 | ret = __cpufreq_set_policy(policy, &new_policy); | ||
501 | 490 | ||
502 | policy->user_policy.policy = policy->policy; | 491 | policy->user_policy.policy = policy->policy; |
503 | policy->user_policy.governor = policy->governor; | 492 | policy->user_policy.governor = policy->governor; |
@@ -525,7 +514,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, | |||
525 | ssize_t i = 0; | 514 | ssize_t i = 0; |
526 | struct cpufreq_governor *t; | 515 | struct cpufreq_governor *t; |
527 | 516 | ||
528 | if (!cpufreq_driver->target) { | 517 | if (!has_target()) { |
529 | i += sprintf(buf, "performance powersave"); | 518 | i += sprintf(buf, "performance powersave"); |
530 | goto out; | 519 | goto out; |
531 | } | 520 | } |
@@ -653,24 +642,21 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) | |||
653 | { | 642 | { |
654 | struct cpufreq_policy *policy = to_policy(kobj); | 643 | struct cpufreq_policy *policy = to_policy(kobj); |
655 | struct freq_attr *fattr = to_attr(attr); | 644 | struct freq_attr *fattr = to_attr(attr); |
656 | ssize_t ret = -EINVAL; | 645 | ssize_t ret; |
657 | 646 | ||
658 | if (!down_read_trylock(&cpufreq_rwsem)) | 647 | if (!down_read_trylock(&cpufreq_rwsem)) |
659 | goto exit; | 648 | return -EINVAL; |
660 | 649 | ||
661 | if (lock_policy_rwsem_read(policy->cpu) < 0) | 650 | down_read(&policy->rwsem); |
662 | goto up_read; | ||
663 | 651 | ||
664 | if (fattr->show) | 652 | if (fattr->show) |
665 | ret = fattr->show(policy, buf); | 653 | ret = fattr->show(policy, buf); |
666 | else | 654 | else |
667 | ret = -EIO; | 655 | ret = -EIO; |
668 | 656 | ||
669 | unlock_policy_rwsem_read(policy->cpu); | 657 | up_read(&policy->rwsem); |
670 | |||
671 | up_read: | ||
672 | up_read(&cpufreq_rwsem); | 658 | up_read(&cpufreq_rwsem); |
673 | exit: | 659 | |
674 | return ret; | 660 | return ret; |
675 | } | 661 | } |
676 | 662 | ||
@@ -689,17 +675,15 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, | |||
689 | if (!down_read_trylock(&cpufreq_rwsem)) | 675 | if (!down_read_trylock(&cpufreq_rwsem)) |
690 | goto unlock; | 676 | goto unlock; |
691 | 677 | ||
692 | if (lock_policy_rwsem_write(policy->cpu) < 0) | 678 | down_write(&policy->rwsem); |
693 | goto up_read; | ||
694 | 679 | ||
695 | if (fattr->store) | 680 | if (fattr->store) |
696 | ret = fattr->store(policy, buf, count); | 681 | ret = fattr->store(policy, buf, count); |
697 | else | 682 | else |
698 | ret = -EIO; | 683 | ret = -EIO; |
699 | 684 | ||
700 | unlock_policy_rwsem_write(policy->cpu); | 685 | up_write(&policy->rwsem); |
701 | 686 | ||
702 | up_read: | ||
703 | up_read(&cpufreq_rwsem); | 687 | up_read(&cpufreq_rwsem); |
704 | unlock: | 688 | unlock: |
705 | put_online_cpus(); | 689 | put_online_cpus(); |
@@ -815,7 +799,7 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, | |||
815 | if (ret) | 799 | if (ret) |
816 | goto err_out_kobj_put; | 800 | goto err_out_kobj_put; |
817 | } | 801 | } |
818 | if (cpufreq_driver->target) { | 802 | if (has_target()) { |
819 | ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); | 803 | ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); |
820 | if (ret) | 804 | if (ret) |
821 | goto err_out_kobj_put; | 805 | goto err_out_kobj_put; |
@@ -844,11 +828,11 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) | |||
844 | int ret = 0; | 828 | int ret = 0; |
845 | 829 | ||
846 | memcpy(&new_policy, policy, sizeof(*policy)); | 830 | memcpy(&new_policy, policy, sizeof(*policy)); |
847 | /* assure that the starting sequence is run in __cpufreq_set_policy */ | 831 | /* assure that the starting sequence is run in cpufreq_set_policy */ |
848 | policy->governor = NULL; | 832 | policy->governor = NULL; |
849 | 833 | ||
850 | /* set default policy */ | 834 | /* set default policy */ |
851 | ret = __cpufreq_set_policy(policy, &new_policy); | 835 | ret = cpufreq_set_policy(policy, &new_policy); |
852 | policy->user_policy.policy = policy->policy; | 836 | policy->user_policy.policy = policy->policy; |
853 | policy->user_policy.governor = policy->governor; | 837 | policy->user_policy.governor = policy->governor; |
854 | 838 | ||
@@ -864,10 +848,10 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, | |||
864 | unsigned int cpu, struct device *dev, | 848 | unsigned int cpu, struct device *dev, |
865 | bool frozen) | 849 | bool frozen) |
866 | { | 850 | { |
867 | int ret = 0, has_target = !!cpufreq_driver->target; | 851 | int ret = 0; |
868 | unsigned long flags; | 852 | unsigned long flags; |
869 | 853 | ||
870 | if (has_target) { | 854 | if (has_target()) { |
871 | ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); | 855 | ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); |
872 | if (ret) { | 856 | if (ret) { |
873 | pr_err("%s: Failed to stop governor\n", __func__); | 857 | pr_err("%s: Failed to stop governor\n", __func__); |
@@ -875,7 +859,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, | |||
875 | } | 859 | } |
876 | } | 860 | } |
877 | 861 | ||
878 | lock_policy_rwsem_write(policy->cpu); | 862 | down_write(&policy->rwsem); |
879 | 863 | ||
880 | write_lock_irqsave(&cpufreq_driver_lock, flags); | 864 | write_lock_irqsave(&cpufreq_driver_lock, flags); |
881 | 865 | ||
@@ -883,9 +867,9 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, | |||
883 | per_cpu(cpufreq_cpu_data, cpu) = policy; | 867 | per_cpu(cpufreq_cpu_data, cpu) = policy; |
884 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 868 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
885 | 869 | ||
886 | unlock_policy_rwsem_write(policy->cpu); | 870 | up_write(&policy->rwsem); |
887 | 871 | ||
888 | if (has_target) { | 872 | if (has_target()) { |
889 | if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) || | 873 | if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) || |
890 | (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) { | 874 | (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) { |
891 | pr_err("%s: Failed to start governor\n", __func__); | 875 | pr_err("%s: Failed to start governor\n", __func__); |
@@ -930,6 +914,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void) | |||
930 | goto err_free_cpumask; | 914 | goto err_free_cpumask; |
931 | 915 | ||
932 | INIT_LIST_HEAD(&policy->policy_list); | 916 | INIT_LIST_HEAD(&policy->policy_list); |
917 | init_rwsem(&policy->rwsem); | ||
918 | |||
933 | return policy; | 919 | return policy; |
934 | 920 | ||
935 | err_free_cpumask: | 921 | err_free_cpumask: |
@@ -949,26 +935,17 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy) | |||
949 | 935 | ||
950 | static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) | 936 | static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) |
951 | { | 937 | { |
952 | if (cpu == policy->cpu) | 938 | if (WARN_ON(cpu == policy->cpu)) |
953 | return; | 939 | return; |
954 | 940 | ||
955 | /* | 941 | down_write(&policy->rwsem); |
956 | * Take direct locks as lock_policy_rwsem_write wouldn't work here. | ||
957 | * Also lock for last cpu is enough here as contention will happen only | ||
958 | * after policy->cpu is changed and after it is changed, other threads | ||
959 | * will try to acquire lock for new cpu. And policy is already updated | ||
960 | * by then. | ||
961 | */ | ||
962 | down_write(&per_cpu(cpu_policy_rwsem, policy->cpu)); | ||
963 | 942 | ||
964 | policy->last_cpu = policy->cpu; | 943 | policy->last_cpu = policy->cpu; |
965 | policy->cpu = cpu; | 944 | policy->cpu = cpu; |
966 | 945 | ||
967 | up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu)); | 946 | up_write(&policy->rwsem); |
968 | 947 | ||
969 | #ifdef CONFIG_CPU_FREQ_TABLE | ||
970 | cpufreq_frequency_table_update_policy_cpu(policy); | 948 | cpufreq_frequency_table_update_policy_cpu(policy); |
971 | #endif | ||
972 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 949 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
973 | CPUFREQ_UPDATE_POLICY_CPU, policy); | 950 | CPUFREQ_UPDATE_POLICY_CPU, policy); |
974 | } | 951 | } |
@@ -1053,6 +1030,14 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
1053 | goto err_set_policy_cpu; | 1030 | goto err_set_policy_cpu; |
1054 | } | 1031 | } |
1055 | 1032 | ||
1033 | if (cpufreq_driver->get) { | ||
1034 | policy->cur = cpufreq_driver->get(policy->cpu); | ||
1035 | if (!policy->cur) { | ||
1036 | pr_err("%s: ->get() failed\n", __func__); | ||
1037 | goto err_get_freq; | ||
1038 | } | ||
1039 | } | ||
1040 | |||
1056 | /* related cpus should atleast have policy->cpus */ | 1041 | /* related cpus should atleast have policy->cpus */ |
1057 | cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); | 1042 | cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); |
1058 | 1043 | ||
@@ -1107,6 +1092,9 @@ err_out_unregister: | |||
1107 | per_cpu(cpufreq_cpu_data, j) = NULL; | 1092 | per_cpu(cpufreq_cpu_data, j) = NULL; |
1108 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1093 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1109 | 1094 | ||
1095 | err_get_freq: | ||
1096 | if (cpufreq_driver->exit) | ||
1097 | cpufreq_driver->exit(policy); | ||
1110 | err_set_policy_cpu: | 1098 | err_set_policy_cpu: |
1111 | cpufreq_policy_free(policy); | 1099 | cpufreq_policy_free(policy); |
1112 | nomem_out: | 1100 | nomem_out: |
@@ -1147,9 +1135,9 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, | |||
1147 | if (ret) { | 1135 | if (ret) { |
1148 | pr_err("%s: Failed to move kobj: %d", __func__, ret); | 1136 | pr_err("%s: Failed to move kobj: %d", __func__, ret); |
1149 | 1137 | ||
1150 | WARN_ON(lock_policy_rwsem_write(old_cpu)); | 1138 | down_write(&policy->rwsem); |
1151 | cpumask_set_cpu(old_cpu, policy->cpus); | 1139 | cpumask_set_cpu(old_cpu, policy->cpus); |
1152 | unlock_policy_rwsem_write(old_cpu); | 1140 | up_write(&policy->rwsem); |
1153 | 1141 | ||
1154 | ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, | 1142 | ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, |
1155 | "cpufreq"); | 1143 | "cpufreq"); |
@@ -1186,7 +1174,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, | |||
1186 | return -EINVAL; | 1174 | return -EINVAL; |
1187 | } | 1175 | } |
1188 | 1176 | ||
1189 | if (cpufreq_driver->target) { | 1177 | if (has_target()) { |
1190 | ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); | 1178 | ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); |
1191 | if (ret) { | 1179 | if (ret) { |
1192 | pr_err("%s: Failed to stop governor\n", __func__); | 1180 | pr_err("%s: Failed to stop governor\n", __func__); |
@@ -1200,22 +1188,21 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, | |||
1200 | policy->governor->name, CPUFREQ_NAME_LEN); | 1188 | policy->governor->name, CPUFREQ_NAME_LEN); |
1201 | #endif | 1189 | #endif |
1202 | 1190 | ||
1203 | lock_policy_rwsem_read(cpu); | 1191 | down_read(&policy->rwsem); |
1204 | cpus = cpumask_weight(policy->cpus); | 1192 | cpus = cpumask_weight(policy->cpus); |
1205 | unlock_policy_rwsem_read(cpu); | 1193 | up_read(&policy->rwsem); |
1206 | 1194 | ||
1207 | if (cpu != policy->cpu) { | 1195 | if (cpu != policy->cpu) { |
1208 | if (!frozen) | 1196 | if (!frozen) |
1209 | sysfs_remove_link(&dev->kobj, "cpufreq"); | 1197 | sysfs_remove_link(&dev->kobj, "cpufreq"); |
1210 | } else if (cpus > 1) { | 1198 | } else if (cpus > 1) { |
1211 | |||
1212 | new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); | 1199 | new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); |
1213 | if (new_cpu >= 0) { | 1200 | if (new_cpu >= 0) { |
1214 | update_policy_cpu(policy, new_cpu); | 1201 | update_policy_cpu(policy, new_cpu); |
1215 | 1202 | ||
1216 | if (!frozen) { | 1203 | if (!frozen) { |
1217 | pr_debug("%s: policy Kobject moved to cpu: %d " | 1204 | pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", |
1218 | "from: %d\n",__func__, new_cpu, cpu); | 1205 | __func__, new_cpu, cpu); |
1219 | } | 1206 | } |
1220 | } | 1207 | } |
1221 | } | 1208 | } |
@@ -1243,16 +1230,16 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | |||
1243 | return -EINVAL; | 1230 | return -EINVAL; |
1244 | } | 1231 | } |
1245 | 1232 | ||
1246 | WARN_ON(lock_policy_rwsem_write(cpu)); | 1233 | down_write(&policy->rwsem); |
1247 | cpus = cpumask_weight(policy->cpus); | 1234 | cpus = cpumask_weight(policy->cpus); |
1248 | 1235 | ||
1249 | if (cpus > 1) | 1236 | if (cpus > 1) |
1250 | cpumask_clear_cpu(cpu, policy->cpus); | 1237 | cpumask_clear_cpu(cpu, policy->cpus); |
1251 | unlock_policy_rwsem_write(cpu); | 1238 | up_write(&policy->rwsem); |
1252 | 1239 | ||
1253 | /* If cpu is last user of policy, free policy */ | 1240 | /* If cpu is last user of policy, free policy */ |
1254 | if (cpus == 1) { | 1241 | if (cpus == 1) { |
1255 | if (cpufreq_driver->target) { | 1242 | if (has_target()) { |
1256 | ret = __cpufreq_governor(policy, | 1243 | ret = __cpufreq_governor(policy, |
1257 | CPUFREQ_GOV_POLICY_EXIT); | 1244 | CPUFREQ_GOV_POLICY_EXIT); |
1258 | if (ret) { | 1245 | if (ret) { |
@@ -1263,10 +1250,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | |||
1263 | } | 1250 | } |
1264 | 1251 | ||
1265 | if (!frozen) { | 1252 | if (!frozen) { |
1266 | lock_policy_rwsem_read(cpu); | 1253 | down_read(&policy->rwsem); |
1267 | kobj = &policy->kobj; | 1254 | kobj = &policy->kobj; |
1268 | cmp = &policy->kobj_unregister; | 1255 | cmp = &policy->kobj_unregister; |
1269 | unlock_policy_rwsem_read(cpu); | 1256 | up_read(&policy->rwsem); |
1270 | kobject_put(kobj); | 1257 | kobject_put(kobj); |
1271 | 1258 | ||
1272 | /* | 1259 | /* |
@@ -1295,7 +1282,7 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | |||
1295 | if (!frozen) | 1282 | if (!frozen) |
1296 | cpufreq_policy_free(policy); | 1283 | cpufreq_policy_free(policy); |
1297 | } else { | 1284 | } else { |
1298 | if (cpufreq_driver->target) { | 1285 | if (has_target()) { |
1299 | if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) || | 1286 | if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) || |
1300 | (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) { | 1287 | (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) { |
1301 | pr_err("%s: Failed to start governor\n", | 1288 | pr_err("%s: Failed to start governor\n", |
@@ -1310,36 +1297,24 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | |||
1310 | } | 1297 | } |
1311 | 1298 | ||
1312 | /** | 1299 | /** |
1313 | * __cpufreq_remove_dev - remove a CPU device | 1300 | * cpufreq_remove_dev - remove a CPU device |
1314 | * | 1301 | * |
1315 | * Removes the cpufreq interface for a CPU device. | 1302 | * Removes the cpufreq interface for a CPU device. |
1316 | * Caller should already have policy_rwsem in write mode for this CPU. | ||
1317 | * This routine frees the rwsem before returning. | ||
1318 | */ | 1303 | */ |
1319 | static inline int __cpufreq_remove_dev(struct device *dev, | ||
1320 | struct subsys_interface *sif, | ||
1321 | bool frozen) | ||
1322 | { | ||
1323 | int ret; | ||
1324 | |||
1325 | ret = __cpufreq_remove_dev_prepare(dev, sif, frozen); | ||
1326 | |||
1327 | if (!ret) | ||
1328 | ret = __cpufreq_remove_dev_finish(dev, sif, frozen); | ||
1329 | |||
1330 | return ret; | ||
1331 | } | ||
1332 | |||
1333 | static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) | 1304 | static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) |
1334 | { | 1305 | { |
1335 | unsigned int cpu = dev->id; | 1306 | unsigned int cpu = dev->id; |
1336 | int retval; | 1307 | int ret; |
1337 | 1308 | ||
1338 | if (cpu_is_offline(cpu)) | 1309 | if (cpu_is_offline(cpu)) |
1339 | return 0; | 1310 | return 0; |
1340 | 1311 | ||
1341 | retval = __cpufreq_remove_dev(dev, sif, false); | 1312 | ret = __cpufreq_remove_dev_prepare(dev, sif, false); |
1342 | return retval; | 1313 | |
1314 | if (!ret) | ||
1315 | ret = __cpufreq_remove_dev_finish(dev, sif, false); | ||
1316 | |||
1317 | return ret; | ||
1343 | } | 1318 | } |
1344 | 1319 | ||
1345 | static void handle_update(struct work_struct *work) | 1320 | static void handle_update(struct work_struct *work) |
@@ -1458,22 +1433,22 @@ static unsigned int __cpufreq_get(unsigned int cpu) | |||
1458 | */ | 1433 | */ |
1459 | unsigned int cpufreq_get(unsigned int cpu) | 1434 | unsigned int cpufreq_get(unsigned int cpu) |
1460 | { | 1435 | { |
1436 | struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); | ||
1461 | unsigned int ret_freq = 0; | 1437 | unsigned int ret_freq = 0; |
1462 | 1438 | ||
1463 | if (cpufreq_disabled() || !cpufreq_driver) | 1439 | if (cpufreq_disabled() || !cpufreq_driver) |
1464 | return -ENOENT; | 1440 | return -ENOENT; |
1465 | 1441 | ||
1442 | BUG_ON(!policy); | ||
1443 | |||
1466 | if (!down_read_trylock(&cpufreq_rwsem)) | 1444 | if (!down_read_trylock(&cpufreq_rwsem)) |
1467 | return 0; | 1445 | return 0; |
1468 | 1446 | ||
1469 | if (unlikely(lock_policy_rwsem_read(cpu))) | 1447 | down_read(&policy->rwsem); |
1470 | goto out_policy; | ||
1471 | 1448 | ||
1472 | ret_freq = __cpufreq_get(cpu); | 1449 | ret_freq = __cpufreq_get(cpu); |
1473 | 1450 | ||
1474 | unlock_policy_rwsem_read(cpu); | 1451 | up_read(&policy->rwsem); |
1475 | |||
1476 | out_policy: | ||
1477 | up_read(&cpufreq_rwsem); | 1452 | up_read(&cpufreq_rwsem); |
1478 | 1453 | ||
1479 | return ret_freq; | 1454 | return ret_freq; |
@@ -1681,12 +1656,75 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1681 | pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", | 1656 | pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", |
1682 | policy->cpu, target_freq, relation, old_target_freq); | 1657 | policy->cpu, target_freq, relation, old_target_freq); |
1683 | 1658 | ||
1659 | /* | ||
1660 | * This might look like a redundant call as we are checking it again | ||
1661 | * after finding index. But it is left intentionally for cases where | ||
1662 | * exactly same freq is called again and so we can save on few function | ||
1663 | * calls. | ||
1664 | */ | ||
1684 | if (target_freq == policy->cur) | 1665 | if (target_freq == policy->cur) |
1685 | return 0; | 1666 | return 0; |
1686 | 1667 | ||
1687 | if (cpufreq_driver->target) | 1668 | if (cpufreq_driver->target) |
1688 | retval = cpufreq_driver->target(policy, target_freq, relation); | 1669 | retval = cpufreq_driver->target(policy, target_freq, relation); |
1670 | else if (cpufreq_driver->target_index) { | ||
1671 | struct cpufreq_frequency_table *freq_table; | ||
1672 | struct cpufreq_freqs freqs; | ||
1673 | bool notify; | ||
1674 | int index; | ||
1675 | |||
1676 | freq_table = cpufreq_frequency_get_table(policy->cpu); | ||
1677 | if (unlikely(!freq_table)) { | ||
1678 | pr_err("%s: Unable to find freq_table\n", __func__); | ||
1679 | goto out; | ||
1680 | } | ||
1681 | |||
1682 | retval = cpufreq_frequency_table_target(policy, freq_table, | ||
1683 | target_freq, relation, &index); | ||
1684 | if (unlikely(retval)) { | ||
1685 | pr_err("%s: Unable to find matching freq\n", __func__); | ||
1686 | goto out; | ||
1687 | } | ||
1688 | |||
1689 | if (freq_table[index].frequency == policy->cur) { | ||
1690 | retval = 0; | ||
1691 | goto out; | ||
1692 | } | ||
1693 | |||
1694 | notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); | ||
1695 | |||
1696 | if (notify) { | ||
1697 | freqs.old = policy->cur; | ||
1698 | freqs.new = freq_table[index].frequency; | ||
1699 | freqs.flags = 0; | ||
1700 | |||
1701 | pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", | ||
1702 | __func__, policy->cpu, freqs.old, | ||
1703 | freqs.new); | ||
1704 | |||
1705 | cpufreq_notify_transition(policy, &freqs, | ||
1706 | CPUFREQ_PRECHANGE); | ||
1707 | } | ||
1708 | |||
1709 | retval = cpufreq_driver->target_index(policy, index); | ||
1710 | if (retval) | ||
1711 | pr_err("%s: Failed to change cpu frequency: %d\n", | ||
1712 | __func__, retval); | ||
1713 | |||
1714 | if (notify) { | ||
1715 | /* | ||
1716 | * Notify with old freq in case we failed to change | ||
1717 | * frequency | ||
1718 | */ | ||
1719 | if (retval) | ||
1720 | freqs.new = freqs.old; | ||
1721 | |||
1722 | cpufreq_notify_transition(policy, &freqs, | ||
1723 | CPUFREQ_POSTCHANGE); | ||
1724 | } | ||
1725 | } | ||
1689 | 1726 | ||
1727 | out: | ||
1690 | return retval; | 1728 | return retval; |
1691 | } | 1729 | } |
1692 | EXPORT_SYMBOL_GPL(__cpufreq_driver_target); | 1730 | EXPORT_SYMBOL_GPL(__cpufreq_driver_target); |
@@ -1697,14 +1735,12 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1697 | { | 1735 | { |
1698 | int ret = -EINVAL; | 1736 | int ret = -EINVAL; |
1699 | 1737 | ||
1700 | if (unlikely(lock_policy_rwsem_write(policy->cpu))) | 1738 | down_write(&policy->rwsem); |
1701 | goto fail; | ||
1702 | 1739 | ||
1703 | ret = __cpufreq_driver_target(policy, target_freq, relation); | 1740 | ret = __cpufreq_driver_target(policy, target_freq, relation); |
1704 | 1741 | ||
1705 | unlock_policy_rwsem_write(policy->cpu); | 1742 | up_write(&policy->rwsem); |
1706 | 1743 | ||
1707 | fail: | ||
1708 | return ret; | 1744 | return ret; |
1709 | } | 1745 | } |
1710 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); | 1746 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); |
@@ -1871,10 +1907,10 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) | |||
1871 | EXPORT_SYMBOL(cpufreq_get_policy); | 1907 | EXPORT_SYMBOL(cpufreq_get_policy); |
1872 | 1908 | ||
1873 | /* | 1909 | /* |
1874 | * data : current policy. | 1910 | * policy : current policy. |
1875 | * policy : policy to be set. | 1911 | * new_policy: policy to be set. |
1876 | */ | 1912 | */ |
1877 | static int __cpufreq_set_policy(struct cpufreq_policy *policy, | 1913 | static int cpufreq_set_policy(struct cpufreq_policy *policy, |
1878 | struct cpufreq_policy *new_policy) | 1914 | struct cpufreq_policy *new_policy) |
1879 | { | 1915 | { |
1880 | int ret = 0, failed = 1; | 1916 | int ret = 0, failed = 1; |
@@ -1934,10 +1970,10 @@ static int __cpufreq_set_policy(struct cpufreq_policy *policy, | |||
1934 | /* end old governor */ | 1970 | /* end old governor */ |
1935 | if (policy->governor) { | 1971 | if (policy->governor) { |
1936 | __cpufreq_governor(policy, CPUFREQ_GOV_STOP); | 1972 | __cpufreq_governor(policy, CPUFREQ_GOV_STOP); |
1937 | unlock_policy_rwsem_write(new_policy->cpu); | 1973 | up_write(&policy->rwsem); |
1938 | __cpufreq_governor(policy, | 1974 | __cpufreq_governor(policy, |
1939 | CPUFREQ_GOV_POLICY_EXIT); | 1975 | CPUFREQ_GOV_POLICY_EXIT); |
1940 | lock_policy_rwsem_write(new_policy->cpu); | 1976 | down_write(&policy->rwsem); |
1941 | } | 1977 | } |
1942 | 1978 | ||
1943 | /* start new governor */ | 1979 | /* start new governor */ |
@@ -1946,10 +1982,10 @@ static int __cpufreq_set_policy(struct cpufreq_policy *policy, | |||
1946 | if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) { | 1982 | if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) { |
1947 | failed = 0; | 1983 | failed = 0; |
1948 | } else { | 1984 | } else { |
1949 | unlock_policy_rwsem_write(new_policy->cpu); | 1985 | up_write(&policy->rwsem); |
1950 | __cpufreq_governor(policy, | 1986 | __cpufreq_governor(policy, |
1951 | CPUFREQ_GOV_POLICY_EXIT); | 1987 | CPUFREQ_GOV_POLICY_EXIT); |
1952 | lock_policy_rwsem_write(new_policy->cpu); | 1988 | down_write(&policy->rwsem); |
1953 | } | 1989 | } |
1954 | } | 1990 | } |
1955 | 1991 | ||
@@ -1995,10 +2031,7 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1995 | goto no_policy; | 2031 | goto no_policy; |
1996 | } | 2032 | } |
1997 | 2033 | ||
1998 | if (unlikely(lock_policy_rwsem_write(cpu))) { | 2034 | down_write(&policy->rwsem); |
1999 | ret = -EINVAL; | ||
2000 | goto fail; | ||
2001 | } | ||
2002 | 2035 | ||
2003 | pr_debug("updating policy for CPU %u\n", cpu); | 2036 | pr_debug("updating policy for CPU %u\n", cpu); |
2004 | memcpy(&new_policy, policy, sizeof(*policy)); | 2037 | memcpy(&new_policy, policy, sizeof(*policy)); |
@@ -2017,17 +2050,16 @@ int cpufreq_update_policy(unsigned int cpu) | |||
2017 | pr_debug("Driver did not initialize current freq"); | 2050 | pr_debug("Driver did not initialize current freq"); |
2018 | policy->cur = new_policy.cur; | 2051 | policy->cur = new_policy.cur; |
2019 | } else { | 2052 | } else { |
2020 | if (policy->cur != new_policy.cur && cpufreq_driver->target) | 2053 | if (policy->cur != new_policy.cur && has_target()) |
2021 | cpufreq_out_of_sync(cpu, policy->cur, | 2054 | cpufreq_out_of_sync(cpu, policy->cur, |
2022 | new_policy.cur); | 2055 | new_policy.cur); |
2023 | } | 2056 | } |
2024 | } | 2057 | } |
2025 | 2058 | ||
2026 | ret = __cpufreq_set_policy(policy, &new_policy); | 2059 | ret = cpufreq_set_policy(policy, &new_policy); |
2027 | 2060 | ||
2028 | unlock_policy_rwsem_write(cpu); | 2061 | up_write(&policy->rwsem); |
2029 | 2062 | ||
2030 | fail: | ||
2031 | cpufreq_cpu_put(policy); | 2063 | cpufreq_cpu_put(policy); |
2032 | no_policy: | 2064 | no_policy: |
2033 | return ret; | 2065 | return ret; |
@@ -2096,7 +2128,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
2096 | return -ENODEV; | 2128 | return -ENODEV; |
2097 | 2129 | ||
2098 | if (!driver_data || !driver_data->verify || !driver_data->init || | 2130 | if (!driver_data || !driver_data->verify || !driver_data->init || |
2099 | ((!driver_data->setpolicy) && (!driver_data->target))) | 2131 | !(driver_data->setpolicy || driver_data->target_index || |
2132 | driver_data->target)) | ||
2100 | return -EINVAL; | 2133 | return -EINVAL; |
2101 | 2134 | ||
2102 | pr_debug("trying to register driver %s\n", driver_data->name); | 2135 | pr_debug("trying to register driver %s\n", driver_data->name); |
@@ -2183,14 +2216,9 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); | |||
2183 | 2216 | ||
2184 | static int __init cpufreq_core_init(void) | 2217 | static int __init cpufreq_core_init(void) |
2185 | { | 2218 | { |
2186 | int cpu; | ||
2187 | |||
2188 | if (cpufreq_disabled()) | 2219 | if (cpufreq_disabled()) |
2189 | return -ENODEV; | 2220 | return -ENODEV; |
2190 | 2221 | ||
2191 | for_each_possible_cpu(cpu) | ||
2192 | init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); | ||
2193 | |||
2194 | cpufreq_global_kobject = kobject_create(); | 2222 | cpufreq_global_kobject = kobject_create(); |
2195 | BUG_ON(!cpufreq_global_kobject); | 2223 | BUG_ON(!cpufreq_global_kobject); |
2196 | register_syscore_ops(&cpufreq_syscore_ops); | 2224 | register_syscore_ops(&cpufreq_syscore_ops); |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index f62d822048e6..218460fcd2e4 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -80,13 +80,18 @@ static void cs_check_cpu(int cpu, unsigned int load) | |||
80 | 80 | ||
81 | /* Check for frequency decrease */ | 81 | /* Check for frequency decrease */ |
82 | if (load < cs_tuners->down_threshold) { | 82 | if (load < cs_tuners->down_threshold) { |
83 | unsigned int freq_target; | ||
83 | /* | 84 | /* |
84 | * if we cannot reduce the frequency anymore, break out early | 85 | * if we cannot reduce the frequency anymore, break out early |
85 | */ | 86 | */ |
86 | if (policy->cur == policy->min) | 87 | if (policy->cur == policy->min) |
87 | return; | 88 | return; |
88 | 89 | ||
89 | dbs_info->requested_freq -= get_freq_target(cs_tuners, policy); | 90 | freq_target = get_freq_target(cs_tuners, policy); |
91 | if (dbs_info->requested_freq > freq_target) | ||
92 | dbs_info->requested_freq -= freq_target; | ||
93 | else | ||
94 | dbs_info->requested_freq = policy->min; | ||
90 | 95 | ||
91 | __cpufreq_driver_target(policy, dbs_info->requested_freq, | 96 | __cpufreq_driver_target(policy, dbs_info->requested_freq, |
92 | CPUFREQ_RELATION_L); | 97 | CPUFREQ_RELATION_L); |
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 88cd39f7b0e9..b5f2b8618949 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h | |||
@@ -191,7 +191,10 @@ struct common_dbs_data { | |||
191 | struct attribute_group *attr_group_gov_sys; /* one governor - system */ | 191 | struct attribute_group *attr_group_gov_sys; /* one governor - system */ |
192 | struct attribute_group *attr_group_gov_pol; /* one governor - policy */ | 192 | struct attribute_group *attr_group_gov_pol; /* one governor - policy */ |
193 | 193 | ||
194 | /* Common data for platforms that don't set have_governor_per_policy */ | 194 | /* |
195 | * Common data for platforms that don't set | ||
196 | * CPUFREQ_HAVE_GOVERNOR_PER_POLICY | ||
197 | */ | ||
195 | struct dbs_data *gdbs_data; | 198 | struct dbs_data *gdbs_data; |
196 | 199 | ||
197 | struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu); | 200 | struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu); |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 32f26f6e17c5..18d409189092 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -168,7 +168,6 @@ static void od_check_cpu(int cpu, unsigned int load) | |||
168 | dbs_info->rate_mult = | 168 | dbs_info->rate_mult = |
169 | od_tuners->sampling_down_factor; | 169 | od_tuners->sampling_down_factor; |
170 | dbs_freq_increase(policy, policy->max); | 170 | dbs_freq_increase(policy, policy->max); |
171 | return; | ||
172 | } else { | 171 | } else { |
173 | /* Calculate the next frequency proportional to load */ | 172 | /* Calculate the next frequency proportional to load */ |
174 | unsigned int freq_next; | 173 | unsigned int freq_next; |
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 03078090b5f7..4dbf1db16aca 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c | |||
@@ -38,18 +38,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq) | |||
38 | if (!per_cpu(cpu_is_managed, policy->cpu)) | 38 | if (!per_cpu(cpu_is_managed, policy->cpu)) |
39 | goto err; | 39 | goto err; |
40 | 40 | ||
41 | /* | ||
42 | * We're safe from concurrent calls to ->target() here | ||
43 | * as we hold the userspace_mutex lock. If we were calling | ||
44 | * cpufreq_driver_target, a deadlock situation might occur: | ||
45 | * A: cpufreq_set (lock userspace_mutex) -> | ||
46 | * cpufreq_driver_target(lock policy->lock) | ||
47 | * B: cpufreq_set_policy(lock policy->lock) -> | ||
48 | * __cpufreq_governor -> | ||
49 | * cpufreq_governor_userspace (lock userspace_mutex) | ||
50 | */ | ||
51 | ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L); | 41 | ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L); |
52 | |||
53 | err: | 42 | err: |
54 | mutex_unlock(&userspace_mutex); | 43 | mutex_unlock(&userspace_mutex); |
55 | return ret; | 44 | return ret; |
diff --git a/drivers/cpufreq/cris-artpec3-cpufreq.c b/drivers/cpufreq/cris-artpec3-cpufreq.c index cb8276dd19ca..86559040c54c 100644 --- a/drivers/cpufreq/cris-artpec3-cpufreq.c +++ b/drivers/cpufreq/cris-artpec3-cpufreq.c | |||
@@ -27,18 +27,11 @@ static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) | |||
27 | return clk_ctrl.pll ? 200000 : 6000; | 27 | return clk_ctrl.pll ? 200000 : 6000; |
28 | } | 28 | } |
29 | 29 | ||
30 | static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, | 30 | static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state) |
31 | unsigned int state) | ||
32 | { | 31 | { |
33 | struct cpufreq_freqs freqs; | ||
34 | reg_clkgen_rw_clk_ctrl clk_ctrl; | 32 | reg_clkgen_rw_clk_ctrl clk_ctrl; |
35 | clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl); | 33 | clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl); |
36 | 34 | ||
37 | freqs.old = cris_freq_get_cpu_frequency(policy->cpu); | ||
38 | freqs.new = cris_freq_table[state].frequency; | ||
39 | |||
40 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
41 | |||
42 | local_irq_disable(); | 35 | local_irq_disable(); |
43 | 36 | ||
44 | /* Even though we may be SMP they will share the same clock | 37 | /* Even though we may be SMP they will share the same clock |
@@ -51,67 +44,22 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, | |||
51 | 44 | ||
52 | local_irq_enable(); | 45 | local_irq_enable(); |
53 | 46 | ||
54 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
55 | }; | ||
56 | |||
57 | static int cris_freq_verify(struct cpufreq_policy *policy) | ||
58 | { | ||
59 | return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]); | ||
60 | } | ||
61 | |||
62 | static int cris_freq_target(struct cpufreq_policy *policy, | ||
63 | unsigned int target_freq, | ||
64 | unsigned int relation) | ||
65 | { | ||
66 | unsigned int newstate = 0; | ||
67 | |||
68 | if (cpufreq_frequency_table_target(policy, cris_freq_table, | ||
69 | target_freq, relation, &newstate)) | ||
70 | return -EINVAL; | ||
71 | |||
72 | cris_freq_set_cpu_state(policy, newstate); | ||
73 | |||
74 | return 0; | 47 | return 0; |
75 | } | 48 | } |
76 | 49 | ||
77 | static int cris_freq_cpu_init(struct cpufreq_policy *policy) | 50 | static int cris_freq_cpu_init(struct cpufreq_policy *policy) |
78 | { | 51 | { |
79 | int result; | 52 | return cpufreq_generic_init(policy, cris_freq_table, 1000000); |
80 | |||
81 | /* cpuinfo and default policy values */ | ||
82 | policy->cpuinfo.transition_latency = 1000000; /* 1ms */ | ||
83 | policy->cur = cris_freq_get_cpu_frequency(0); | ||
84 | |||
85 | result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table); | ||
86 | if (result) | ||
87 | return (result); | ||
88 | |||
89 | cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu); | ||
90 | |||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | |||
95 | static int cris_freq_cpu_exit(struct cpufreq_policy *policy) | ||
96 | { | ||
97 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
98 | return 0; | ||
99 | } | 53 | } |
100 | 54 | ||
101 | |||
102 | static struct freq_attr *cris_freq_attr[] = { | ||
103 | &cpufreq_freq_attr_scaling_available_freqs, | ||
104 | NULL, | ||
105 | }; | ||
106 | |||
107 | static struct cpufreq_driver cris_freq_driver = { | 55 | static struct cpufreq_driver cris_freq_driver = { |
108 | .get = cris_freq_get_cpu_frequency, | 56 | .get = cris_freq_get_cpu_frequency, |
109 | .verify = cris_freq_verify, | 57 | .verify = cpufreq_generic_frequency_table_verify, |
110 | .target = cris_freq_target, | 58 | .target_index = cris_freq_target, |
111 | .init = cris_freq_cpu_init, | 59 | .init = cris_freq_cpu_init, |
112 | .exit = cris_freq_cpu_exit, | 60 | .exit = cpufreq_generic_exit, |
113 | .name = "cris_freq", | 61 | .name = "cris_freq", |
114 | .attr = cris_freq_attr, | 62 | .attr = cpufreq_generic_attr, |
115 | }; | 63 | }; |
116 | 64 | ||
117 | static int __init cris_freq_init(void) | 65 | static int __init cris_freq_init(void) |
diff --git a/drivers/cpufreq/cris-etraxfs-cpufreq.c b/drivers/cpufreq/cris-etraxfs-cpufreq.c index 72328f77dc53..26d940d40b1d 100644 --- a/drivers/cpufreq/cris-etraxfs-cpufreq.c +++ b/drivers/cpufreq/cris-etraxfs-cpufreq.c | |||
@@ -27,18 +27,11 @@ static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) | |||
27 | return clk_ctrl.pll ? 200000 : 6000; | 27 | return clk_ctrl.pll ? 200000 : 6000; |
28 | } | 28 | } |
29 | 29 | ||
30 | static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, | 30 | static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state) |
31 | unsigned int state) | ||
32 | { | 31 | { |
33 | struct cpufreq_freqs freqs; | ||
34 | reg_config_rw_clk_ctrl clk_ctrl; | 32 | reg_config_rw_clk_ctrl clk_ctrl; |
35 | clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl); | 33 | clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl); |
36 | 34 | ||
37 | freqs.old = cris_freq_get_cpu_frequency(policy->cpu); | ||
38 | freqs.new = cris_freq_table[state].frequency; | ||
39 | |||
40 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
41 | |||
42 | local_irq_disable(); | 35 | local_irq_disable(); |
43 | 36 | ||
44 | /* Even though we may be SMP they will share the same clock | 37 | /* Even though we may be SMP they will share the same clock |
@@ -51,64 +44,22 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, | |||
51 | 44 | ||
52 | local_irq_enable(); | 45 | local_irq_enable(); |
53 | 46 | ||
54 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
55 | }; | ||
56 | |||
57 | static int cris_freq_verify(struct cpufreq_policy *policy) | ||
58 | { | ||
59 | return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]); | ||
60 | } | ||
61 | |||
62 | static int cris_freq_target(struct cpufreq_policy *policy, | ||
63 | unsigned int target_freq, unsigned int relation) | ||
64 | { | ||
65 | unsigned int newstate = 0; | ||
66 | |||
67 | if (cpufreq_frequency_table_target | ||
68 | (policy, cris_freq_table, target_freq, relation, &newstate)) | ||
69 | return -EINVAL; | ||
70 | |||
71 | cris_freq_set_cpu_state(policy, newstate); | ||
72 | |||
73 | return 0; | 47 | return 0; |
74 | } | 48 | } |
75 | 49 | ||
76 | static int cris_freq_cpu_init(struct cpufreq_policy *policy) | 50 | static int cris_freq_cpu_init(struct cpufreq_policy *policy) |
77 | { | 51 | { |
78 | int result; | 52 | return cpufreq_generic_init(policy, cris_freq_table, 1000000); |
79 | |||
80 | /* cpuinfo and default policy values */ | ||
81 | policy->cpuinfo.transition_latency = 1000000; /* 1ms */ | ||
82 | policy->cur = cris_freq_get_cpu_frequency(0); | ||
83 | |||
84 | result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table); | ||
85 | if (result) | ||
86 | return (result); | ||
87 | |||
88 | cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu); | ||
89 | |||
90 | return 0; | ||
91 | } | 53 | } |
92 | 54 | ||
93 | static int cris_freq_cpu_exit(struct cpufreq_policy *policy) | ||
94 | { | ||
95 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | static struct freq_attr *cris_freq_attr[] = { | ||
100 | &cpufreq_freq_attr_scaling_available_freqs, | ||
101 | NULL, | ||
102 | }; | ||
103 | |||
104 | static struct cpufreq_driver cris_freq_driver = { | 55 | static struct cpufreq_driver cris_freq_driver = { |
105 | .get = cris_freq_get_cpu_frequency, | 56 | .get = cris_freq_get_cpu_frequency, |
106 | .verify = cris_freq_verify, | 57 | .verify = cpufreq_generic_frequency_table_verify, |
107 | .target = cris_freq_target, | 58 | .target_index = cris_freq_target, |
108 | .init = cris_freq_cpu_init, | 59 | .init = cris_freq_cpu_init, |
109 | .exit = cris_freq_cpu_exit, | 60 | .exit = cpufreq_generic_exit, |
110 | .name = "cris_freq", | 61 | .name = "cris_freq", |
111 | .attr = cris_freq_attr, | 62 | .attr = cpufreq_generic_attr, |
112 | }; | 63 | }; |
113 | 64 | ||
114 | static int __init cris_freq_init(void) | 65 | static int __init cris_freq_init(void) |
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c index 551dd655c6f2..5e8a854381b7 100644 --- a/drivers/cpufreq/davinci-cpufreq.c +++ b/drivers/cpufreq/davinci-cpufreq.c | |||
@@ -50,9 +50,7 @@ static int davinci_verify_speed(struct cpufreq_policy *policy) | |||
50 | if (policy->cpu) | 50 | if (policy->cpu) |
51 | return -EINVAL; | 51 | return -EINVAL; |
52 | 52 | ||
53 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | 53 | cpufreq_verify_within_cpu_limits(policy); |
54 | policy->cpuinfo.max_freq); | ||
55 | |||
56 | policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000; | 54 | policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000; |
57 | policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000; | 55 | policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000; |
58 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | 56 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, |
@@ -68,58 +66,38 @@ static unsigned int davinci_getspeed(unsigned int cpu) | |||
68 | return clk_get_rate(cpufreq.armclk) / 1000; | 66 | return clk_get_rate(cpufreq.armclk) / 1000; |
69 | } | 67 | } |
70 | 68 | ||
71 | static int davinci_target(struct cpufreq_policy *policy, | 69 | static int davinci_target(struct cpufreq_policy *policy, unsigned int idx) |
72 | unsigned int target_freq, unsigned int relation) | ||
73 | { | 70 | { |
74 | int ret = 0; | ||
75 | unsigned int idx; | ||
76 | struct cpufreq_freqs freqs; | ||
77 | struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; | 71 | struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; |
78 | struct clk *armclk = cpufreq.armclk; | 72 | struct clk *armclk = cpufreq.armclk; |
73 | unsigned int old_freq, new_freq; | ||
74 | int ret = 0; | ||
79 | 75 | ||
80 | freqs.old = davinci_getspeed(0); | 76 | old_freq = davinci_getspeed(0); |
81 | freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000; | 77 | new_freq = pdata->freq_table[idx].frequency; |
82 | |||
83 | if (freqs.old == freqs.new) | ||
84 | return ret; | ||
85 | |||
86 | dev_dbg(cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new); | ||
87 | |||
88 | ret = cpufreq_frequency_table_target(policy, pdata->freq_table, | ||
89 | freqs.new, relation, &idx); | ||
90 | if (ret) | ||
91 | return -EINVAL; | ||
92 | |||
93 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
94 | 78 | ||
95 | /* if moving to higher frequency, up the voltage beforehand */ | 79 | /* if moving to higher frequency, up the voltage beforehand */ |
96 | if (pdata->set_voltage && freqs.new > freqs.old) { | 80 | if (pdata->set_voltage && new_freq > old_freq) { |
97 | ret = pdata->set_voltage(idx); | 81 | ret = pdata->set_voltage(idx); |
98 | if (ret) | 82 | if (ret) |
99 | goto out; | 83 | return ret; |
100 | } | 84 | } |
101 | 85 | ||
102 | ret = clk_set_rate(armclk, idx); | 86 | ret = clk_set_rate(armclk, idx); |
103 | if (ret) | 87 | if (ret) |
104 | goto out; | 88 | return ret; |
105 | 89 | ||
106 | if (cpufreq.asyncclk) { | 90 | if (cpufreq.asyncclk) { |
107 | ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate); | 91 | ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate); |
108 | if (ret) | 92 | if (ret) |
109 | goto out; | 93 | return ret; |
110 | } | 94 | } |
111 | 95 | ||
112 | /* if moving to lower freq, lower the voltage after lowering freq */ | 96 | /* if moving to lower freq, lower the voltage after lowering freq */ |
113 | if (pdata->set_voltage && freqs.new < freqs.old) | 97 | if (pdata->set_voltage && new_freq < old_freq) |
114 | pdata->set_voltage(idx); | 98 | pdata->set_voltage(idx); |
115 | 99 | ||
116 | out: | 100 | return 0; |
117 | if (ret) | ||
118 | freqs.new = freqs.old; | ||
119 | |||
120 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
121 | |||
122 | return ret; | ||
123 | } | 101 | } |
124 | 102 | ||
125 | static int davinci_cpu_init(struct cpufreq_policy *policy) | 103 | static int davinci_cpu_init(struct cpufreq_policy *policy) |
@@ -138,47 +116,24 @@ static int davinci_cpu_init(struct cpufreq_policy *policy) | |||
138 | return result; | 116 | return result; |
139 | } | 117 | } |
140 | 118 | ||
141 | policy->cur = davinci_getspeed(0); | ||
142 | |||
143 | result = cpufreq_frequency_table_cpuinfo(policy, freq_table); | ||
144 | if (result) { | ||
145 | pr_err("%s: cpufreq_frequency_table_cpuinfo() failed", | ||
146 | __func__); | ||
147 | return result; | ||
148 | } | ||
149 | |||
150 | cpufreq_frequency_table_get_attr(freq_table, policy->cpu); | ||
151 | |||
152 | /* | 119 | /* |
153 | * Time measurement across the target() function yields ~1500-1800us | 120 | * Time measurement across the target() function yields ~1500-1800us |
154 | * time taken with no drivers on notification list. | 121 | * time taken with no drivers on notification list. |
155 | * Setting the latency to 2000 us to accommodate addition of drivers | 122 | * Setting the latency to 2000 us to accommodate addition of drivers |
156 | * to pre/post change notification list. | 123 | * to pre/post change notification list. |
157 | */ | 124 | */ |
158 | policy->cpuinfo.transition_latency = 2000 * 1000; | 125 | return cpufreq_generic_init(policy, freq_table, 2000 * 1000); |
159 | return 0; | ||
160 | } | 126 | } |
161 | 127 | ||
162 | static int davinci_cpu_exit(struct cpufreq_policy *policy) | ||
163 | { | ||
164 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | static struct freq_attr *davinci_cpufreq_attr[] = { | ||
169 | &cpufreq_freq_attr_scaling_available_freqs, | ||
170 | NULL, | ||
171 | }; | ||
172 | |||
173 | static struct cpufreq_driver davinci_driver = { | 128 | static struct cpufreq_driver davinci_driver = { |
174 | .flags = CPUFREQ_STICKY, | 129 | .flags = CPUFREQ_STICKY, |
175 | .verify = davinci_verify_speed, | 130 | .verify = davinci_verify_speed, |
176 | .target = davinci_target, | 131 | .target_index = davinci_target, |
177 | .get = davinci_getspeed, | 132 | .get = davinci_getspeed, |
178 | .init = davinci_cpu_init, | 133 | .init = davinci_cpu_init, |
179 | .exit = davinci_cpu_exit, | 134 | .exit = cpufreq_generic_exit, |
180 | .name = "davinci", | 135 | .name = "davinci", |
181 | .attr = davinci_cpufreq_attr, | 136 | .attr = cpufreq_generic_attr, |
182 | }; | 137 | }; |
183 | 138 | ||
184 | static int __init davinci_cpufreq_probe(struct platform_device *pdev) | 139 | static int __init davinci_cpufreq_probe(struct platform_device *pdev) |
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c index 26321cdc1946..0e67ab96321a 100644 --- a/drivers/cpufreq/dbx500-cpufreq.c +++ b/drivers/cpufreq/dbx500-cpufreq.c | |||
@@ -19,51 +19,11 @@ | |||
19 | static struct cpufreq_frequency_table *freq_table; | 19 | static struct cpufreq_frequency_table *freq_table; |
20 | static struct clk *armss_clk; | 20 | static struct clk *armss_clk; |
21 | 21 | ||
22 | static struct freq_attr *dbx500_cpufreq_attr[] = { | ||
23 | &cpufreq_freq_attr_scaling_available_freqs, | ||
24 | NULL, | ||
25 | }; | ||
26 | |||
27 | static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy) | ||
28 | { | ||
29 | return cpufreq_frequency_table_verify(policy, freq_table); | ||
30 | } | ||
31 | |||
32 | static int dbx500_cpufreq_target(struct cpufreq_policy *policy, | 22 | static int dbx500_cpufreq_target(struct cpufreq_policy *policy, |
33 | unsigned int target_freq, | 23 | unsigned int index) |
34 | unsigned int relation) | ||
35 | { | 24 | { |
36 | struct cpufreq_freqs freqs; | ||
37 | unsigned int idx; | ||
38 | int ret; | ||
39 | |||
40 | /* Lookup the next frequency */ | ||
41 | if (cpufreq_frequency_table_target(policy, freq_table, target_freq, | ||
42 | relation, &idx)) | ||
43 | return -EINVAL; | ||
44 | |||
45 | freqs.old = policy->cur; | ||
46 | freqs.new = freq_table[idx].frequency; | ||
47 | |||
48 | if (freqs.old == freqs.new) | ||
49 | return 0; | ||
50 | |||
51 | /* pre-change notification */ | ||
52 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
53 | |||
54 | /* update armss clk frequency */ | 25 | /* update armss clk frequency */ |
55 | ret = clk_set_rate(armss_clk, freqs.new * 1000); | 26 | return clk_set_rate(armss_clk, freq_table[index].frequency * 1000); |
56 | |||
57 | if (ret) { | ||
58 | pr_err("dbx500-cpufreq: Failed to set armss_clk to %d Hz: error %d\n", | ||
59 | freqs.new * 1000, ret); | ||
60 | freqs.new = freqs.old; | ||
61 | } | ||
62 | |||
63 | /* post change notification */ | ||
64 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
65 | |||
66 | return ret; | ||
67 | } | 27 | } |
68 | 28 | ||
69 | static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu) | 29 | static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu) |
@@ -84,43 +44,17 @@ static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu) | |||
84 | 44 | ||
85 | static int dbx500_cpufreq_init(struct cpufreq_policy *policy) | 45 | static int dbx500_cpufreq_init(struct cpufreq_policy *policy) |
86 | { | 46 | { |
87 | int res; | 47 | return cpufreq_generic_init(policy, freq_table, 20 * 1000); |
88 | |||
89 | /* get policy fields based on the table */ | ||
90 | res = cpufreq_frequency_table_cpuinfo(policy, freq_table); | ||
91 | if (!res) | ||
92 | cpufreq_frequency_table_get_attr(freq_table, policy->cpu); | ||
93 | else { | ||
94 | pr_err("dbx500-cpufreq: Failed to read policy table\n"); | ||
95 | return res; | ||
96 | } | ||
97 | |||
98 | policy->min = policy->cpuinfo.min_freq; | ||
99 | policy->max = policy->cpuinfo.max_freq; | ||
100 | policy->cur = dbx500_cpufreq_getspeed(policy->cpu); | ||
101 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
102 | |||
103 | /* | ||
104 | * FIXME : Need to take time measurement across the target() | ||
105 | * function with no/some/all drivers in the notification | ||
106 | * list. | ||
107 | */ | ||
108 | policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */ | ||
109 | |||
110 | /* policy sharing between dual CPUs */ | ||
111 | cpumask_setall(policy->cpus); | ||
112 | |||
113 | return 0; | ||
114 | } | 48 | } |
115 | 49 | ||
116 | static struct cpufreq_driver dbx500_cpufreq_driver = { | 50 | static struct cpufreq_driver dbx500_cpufreq_driver = { |
117 | .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS, | 51 | .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS, |
118 | .verify = dbx500_cpufreq_verify_speed, | 52 | .verify = cpufreq_generic_frequency_table_verify, |
119 | .target = dbx500_cpufreq_target, | 53 | .target_index = dbx500_cpufreq_target, |
120 | .get = dbx500_cpufreq_getspeed, | 54 | .get = dbx500_cpufreq_getspeed, |
121 | .init = dbx500_cpufreq_init, | 55 | .init = dbx500_cpufreq_init, |
122 | .name = "DBX500", | 56 | .name = "DBX500", |
123 | .attr = dbx500_cpufreq_attr, | 57 | .attr = cpufreq_generic_attr, |
124 | }; | 58 | }; |
125 | 59 | ||
126 | static int dbx500_cpufreq_probe(struct platform_device *pdev) | 60 | static int dbx500_cpufreq_probe(struct platform_device *pdev) |
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c index 09f64cc83019..9012b8bb6b64 100644 --- a/drivers/cpufreq/e_powersaver.c +++ b/drivers/cpufreq/e_powersaver.c | |||
@@ -107,15 +107,9 @@ static int eps_set_state(struct eps_cpu_data *centaur, | |||
107 | struct cpufreq_policy *policy, | 107 | struct cpufreq_policy *policy, |
108 | u32 dest_state) | 108 | u32 dest_state) |
109 | { | 109 | { |
110 | struct cpufreq_freqs freqs; | ||
111 | u32 lo, hi; | 110 | u32 lo, hi; |
112 | int err = 0; | ||
113 | int i; | 111 | int i; |
114 | 112 | ||
115 | freqs.old = eps_get(policy->cpu); | ||
116 | freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff); | ||
117 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
118 | |||
119 | /* Wait while CPU is busy */ | 113 | /* Wait while CPU is busy */ |
120 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | 114 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); |
121 | i = 0; | 115 | i = 0; |
@@ -124,8 +118,7 @@ static int eps_set_state(struct eps_cpu_data *centaur, | |||
124 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | 118 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); |
125 | i++; | 119 | i++; |
126 | if (unlikely(i > 64)) { | 120 | if (unlikely(i > 64)) { |
127 | err = -ENODEV; | 121 | return -ENODEV; |
128 | goto postchange; | ||
129 | } | 122 | } |
130 | } | 123 | } |
131 | /* Set new multiplier and voltage */ | 124 | /* Set new multiplier and voltage */ |
@@ -137,16 +130,10 @@ static int eps_set_state(struct eps_cpu_data *centaur, | |||
137 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | 130 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); |
138 | i++; | 131 | i++; |
139 | if (unlikely(i > 64)) { | 132 | if (unlikely(i > 64)) { |
140 | err = -ENODEV; | 133 | return -ENODEV; |
141 | goto postchange; | ||
142 | } | 134 | } |
143 | } while (lo & ((1 << 16) | (1 << 17))); | 135 | } while (lo & ((1 << 16) | (1 << 17))); |
144 | 136 | ||
145 | /* Return current frequency */ | ||
146 | postchange: | ||
147 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | ||
148 | freqs.new = centaur->fsb * ((lo >> 8) & 0xff); | ||
149 | |||
150 | #ifdef DEBUG | 137 | #ifdef DEBUG |
151 | { | 138 | { |
152 | u8 current_multiplier, current_voltage; | 139 | u8 current_multiplier, current_voltage; |
@@ -161,19 +148,12 @@ postchange: | |||
161 | current_multiplier); | 148 | current_multiplier); |
162 | } | 149 | } |
163 | #endif | 150 | #endif |
164 | if (err) | 151 | return 0; |
165 | freqs.new = freqs.old; | ||
166 | |||
167 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
168 | return err; | ||
169 | } | 152 | } |
170 | 153 | ||
171 | static int eps_target(struct cpufreq_policy *policy, | 154 | static int eps_target(struct cpufreq_policy *policy, unsigned int index) |
172 | unsigned int target_freq, | ||
173 | unsigned int relation) | ||
174 | { | 155 | { |
175 | struct eps_cpu_data *centaur; | 156 | struct eps_cpu_data *centaur; |
176 | unsigned int newstate = 0; | ||
177 | unsigned int cpu = policy->cpu; | 157 | unsigned int cpu = policy->cpu; |
178 | unsigned int dest_state; | 158 | unsigned int dest_state; |
179 | int ret; | 159 | int ret; |
@@ -182,28 +162,14 @@ static int eps_target(struct cpufreq_policy *policy, | |||
182 | return -ENODEV; | 162 | return -ENODEV; |
183 | centaur = eps_cpu[cpu]; | 163 | centaur = eps_cpu[cpu]; |
184 | 164 | ||
185 | if (unlikely(cpufreq_frequency_table_target(policy, | ||
186 | &eps_cpu[cpu]->freq_table[0], | ||
187 | target_freq, | ||
188 | relation, | ||
189 | &newstate))) { | ||
190 | return -EINVAL; | ||
191 | } | ||
192 | |||
193 | /* Make frequency transition */ | 165 | /* Make frequency transition */ |
194 | dest_state = centaur->freq_table[newstate].driver_data & 0xffff; | 166 | dest_state = centaur->freq_table[index].driver_data & 0xffff; |
195 | ret = eps_set_state(centaur, policy, dest_state); | 167 | ret = eps_set_state(centaur, policy, dest_state); |
196 | if (ret) | 168 | if (ret) |
197 | printk(KERN_ERR "eps: Timeout!\n"); | 169 | printk(KERN_ERR "eps: Timeout!\n"); |
198 | return ret; | 170 | return ret; |
199 | } | 171 | } |
200 | 172 | ||
201 | static int eps_verify(struct cpufreq_policy *policy) | ||
202 | { | ||
203 | return cpufreq_frequency_table_verify(policy, | ||
204 | &eps_cpu[policy->cpu]->freq_table[0]); | ||
205 | } | ||
206 | |||
207 | static int eps_cpu_init(struct cpufreq_policy *policy) | 173 | static int eps_cpu_init(struct cpufreq_policy *policy) |
208 | { | 174 | { |
209 | unsigned int i; | 175 | unsigned int i; |
@@ -401,15 +367,13 @@ static int eps_cpu_init(struct cpufreq_policy *policy) | |||
401 | } | 367 | } |
402 | 368 | ||
403 | policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */ | 369 | policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */ |
404 | policy->cur = fsb * current_multiplier; | ||
405 | 370 | ||
406 | ret = cpufreq_frequency_table_cpuinfo(policy, ¢aur->freq_table[0]); | 371 | ret = cpufreq_table_validate_and_show(policy, ¢aur->freq_table[0]); |
407 | if (ret) { | 372 | if (ret) { |
408 | kfree(centaur); | 373 | kfree(centaur); |
409 | return ret; | 374 | return ret; |
410 | } | 375 | } |
411 | 376 | ||
412 | cpufreq_frequency_table_get_attr(¢aur->freq_table[0], policy->cpu); | ||
413 | return 0; | 377 | return 0; |
414 | } | 378 | } |
415 | 379 | ||
@@ -424,19 +388,14 @@ static int eps_cpu_exit(struct cpufreq_policy *policy) | |||
424 | return 0; | 388 | return 0; |
425 | } | 389 | } |
426 | 390 | ||
427 | static struct freq_attr *eps_attr[] = { | ||
428 | &cpufreq_freq_attr_scaling_available_freqs, | ||
429 | NULL, | ||
430 | }; | ||
431 | |||
432 | static struct cpufreq_driver eps_driver = { | 391 | static struct cpufreq_driver eps_driver = { |
433 | .verify = eps_verify, | 392 | .verify = cpufreq_generic_frequency_table_verify, |
434 | .target = eps_target, | 393 | .target_index = eps_target, |
435 | .init = eps_cpu_init, | 394 | .init = eps_cpu_init, |
436 | .exit = eps_cpu_exit, | 395 | .exit = eps_cpu_exit, |
437 | .get = eps_get, | 396 | .get = eps_get, |
438 | .name = "e_powersaver", | 397 | .name = "e_powersaver", |
439 | .attr = eps_attr, | 398 | .attr = cpufreq_generic_attr, |
440 | }; | 399 | }; |
441 | 400 | ||
442 | 401 | ||
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c index 823a400d98fd..de08acff5101 100644 --- a/drivers/cpufreq/elanfreq.c +++ b/drivers/cpufreq/elanfreq.c | |||
@@ -105,32 +105,9 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu) | |||
105 | } | 105 | } |
106 | 106 | ||
107 | 107 | ||
108 | /** | 108 | static int elanfreq_target(struct cpufreq_policy *policy, |
109 | * elanfreq_set_cpu_frequency: Change the CPU core frequency | 109 | unsigned int state) |
110 | * @cpu: cpu number | ||
111 | * @freq: frequency in kHz | ||
112 | * | ||
113 | * This function takes a frequency value and changes the CPU frequency | ||
114 | * according to this. Note that the frequency has to be checked by | ||
115 | * elanfreq_validatespeed() for correctness! | ||
116 | * | ||
117 | * There is no return value. | ||
118 | */ | ||
119 | |||
120 | static void elanfreq_set_cpu_state(struct cpufreq_policy *policy, | ||
121 | unsigned int state) | ||
122 | { | 110 | { |
123 | struct cpufreq_freqs freqs; | ||
124 | |||
125 | freqs.old = elanfreq_get_cpu_frequency(0); | ||
126 | freqs.new = elan_multiplier[state].clock; | ||
127 | |||
128 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
129 | |||
130 | printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n", | ||
131 | elan_multiplier[state].clock); | ||
132 | |||
133 | |||
134 | /* | 111 | /* |
135 | * Access to the Elan's internal registers is indexed via | 112 | * Access to the Elan's internal registers is indexed via |
136 | * 0x22: Chip Setup & Control Register Index Register (CSCI) | 113 | * 0x22: Chip Setup & Control Register Index Register (CSCI) |
@@ -161,39 +138,8 @@ static void elanfreq_set_cpu_state(struct cpufreq_policy *policy, | |||
161 | udelay(10000); | 138 | udelay(10000); |
162 | local_irq_enable(); | 139 | local_irq_enable(); |
163 | 140 | ||
164 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
165 | }; | ||
166 | |||
167 | |||
168 | /** | ||
169 | * elanfreq_validatespeed: test if frequency range is valid | ||
170 | * @policy: the policy to validate | ||
171 | * | ||
172 | * This function checks if a given frequency range in kHz is valid | ||
173 | * for the hardware supported by the driver. | ||
174 | */ | ||
175 | |||
176 | static int elanfreq_verify(struct cpufreq_policy *policy) | ||
177 | { | ||
178 | return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]); | ||
179 | } | ||
180 | |||
181 | static int elanfreq_target(struct cpufreq_policy *policy, | ||
182 | unsigned int target_freq, | ||
183 | unsigned int relation) | ||
184 | { | ||
185 | unsigned int newstate = 0; | ||
186 | |||
187 | if (cpufreq_frequency_table_target(policy, &elanfreq_table[0], | ||
188 | target_freq, relation, &newstate)) | ||
189 | return -EINVAL; | ||
190 | |||
191 | elanfreq_set_cpu_state(policy, newstate); | ||
192 | |||
193 | return 0; | 141 | return 0; |
194 | } | 142 | } |
195 | |||
196 | |||
197 | /* | 143 | /* |
198 | * Module init and exit code | 144 | * Module init and exit code |
199 | */ | 145 | */ |
@@ -202,7 +148,6 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy) | |||
202 | { | 148 | { |
203 | struct cpuinfo_x86 *c = &cpu_data(0); | 149 | struct cpuinfo_x86 *c = &cpu_data(0); |
204 | unsigned int i; | 150 | unsigned int i; |
205 | int result; | ||
206 | 151 | ||
207 | /* capability check */ | 152 | /* capability check */ |
208 | if ((c->x86_vendor != X86_VENDOR_AMD) || | 153 | if ((c->x86_vendor != X86_VENDOR_AMD) || |
@@ -221,21 +166,8 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy) | |||
221 | 166 | ||
222 | /* cpuinfo and default policy values */ | 167 | /* cpuinfo and default policy values */ |
223 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 168 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
224 | policy->cur = elanfreq_get_cpu_frequency(0); | ||
225 | |||
226 | result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table); | ||
227 | if (result) | ||
228 | return result; | ||
229 | 169 | ||
230 | cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu); | 170 | return cpufreq_table_validate_and_show(policy, elanfreq_table); |
231 | return 0; | ||
232 | } | ||
233 | |||
234 | |||
235 | static int elanfreq_cpu_exit(struct cpufreq_policy *policy) | ||
236 | { | ||
237 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
238 | return 0; | ||
239 | } | 171 | } |
240 | 172 | ||
241 | 173 | ||
@@ -261,20 +193,14 @@ __setup("elanfreq=", elanfreq_setup); | |||
261 | #endif | 193 | #endif |
262 | 194 | ||
263 | 195 | ||
264 | static struct freq_attr *elanfreq_attr[] = { | ||
265 | &cpufreq_freq_attr_scaling_available_freqs, | ||
266 | NULL, | ||
267 | }; | ||
268 | |||
269 | |||
270 | static struct cpufreq_driver elanfreq_driver = { | 196 | static struct cpufreq_driver elanfreq_driver = { |
271 | .get = elanfreq_get_cpu_frequency, | 197 | .get = elanfreq_get_cpu_frequency, |
272 | .verify = elanfreq_verify, | 198 | .verify = cpufreq_generic_frequency_table_verify, |
273 | .target = elanfreq_target, | 199 | .target_index = elanfreq_target, |
274 | .init = elanfreq_cpu_init, | 200 | .init = elanfreq_cpu_init, |
275 | .exit = elanfreq_cpu_exit, | 201 | .exit = cpufreq_generic_exit, |
276 | .name = "elanfreq", | 202 | .name = "elanfreq", |
277 | .attr = elanfreq_attr, | 203 | .attr = cpufreq_generic_attr, |
278 | }; | 204 | }; |
279 | 205 | ||
280 | static const struct x86_cpu_id elan_id[] = { | 206 | static const struct x86_cpu_id elan_id[] = { |
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index 0fac34439e31..7b6dc06b1bd4 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c | |||
@@ -25,18 +25,11 @@ | |||
25 | static struct exynos_dvfs_info *exynos_info; | 25 | static struct exynos_dvfs_info *exynos_info; |
26 | 26 | ||
27 | static struct regulator *arm_regulator; | 27 | static struct regulator *arm_regulator; |
28 | static struct cpufreq_freqs freqs; | ||
29 | 28 | ||
30 | static unsigned int locking_frequency; | 29 | static unsigned int locking_frequency; |
31 | static bool frequency_locked; | 30 | static bool frequency_locked; |
32 | static DEFINE_MUTEX(cpufreq_lock); | 31 | static DEFINE_MUTEX(cpufreq_lock); |
33 | 32 | ||
34 | static int exynos_verify_speed(struct cpufreq_policy *policy) | ||
35 | { | ||
36 | return cpufreq_frequency_table_verify(policy, | ||
37 | exynos_info->freq_table); | ||
38 | } | ||
39 | |||
40 | static unsigned int exynos_getspeed(unsigned int cpu) | 33 | static unsigned int exynos_getspeed(unsigned int cpu) |
41 | { | 34 | { |
42 | return clk_get_rate(exynos_info->cpu_clk) / 1000; | 35 | return clk_get_rate(exynos_info->cpu_clk) / 1000; |
@@ -65,21 +58,18 @@ static int exynos_cpufreq_scale(unsigned int target_freq) | |||
65 | struct cpufreq_policy *policy = cpufreq_cpu_get(0); | 58 | struct cpufreq_policy *policy = cpufreq_cpu_get(0); |
66 | unsigned int arm_volt, safe_arm_volt = 0; | 59 | unsigned int arm_volt, safe_arm_volt = 0; |
67 | unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz; | 60 | unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz; |
61 | unsigned int old_freq; | ||
68 | int index, old_index; | 62 | int index, old_index; |
69 | int ret = 0; | 63 | int ret = 0; |
70 | 64 | ||
71 | freqs.old = policy->cur; | 65 | old_freq = policy->cur; |
72 | freqs.new = target_freq; | ||
73 | |||
74 | if (freqs.new == freqs.old) | ||
75 | goto out; | ||
76 | 66 | ||
77 | /* | 67 | /* |
78 | * The policy max have been changed so that we cannot get proper | 68 | * The policy max have been changed so that we cannot get proper |
79 | * old_index with cpufreq_frequency_table_target(). Thus, ignore | 69 | * old_index with cpufreq_frequency_table_target(). Thus, ignore |
80 | * policy and get the index from the raw freqeuncy table. | 70 | * policy and get the index from the raw freqeuncy table. |
81 | */ | 71 | */ |
82 | old_index = exynos_cpufreq_get_index(freqs.old); | 72 | old_index = exynos_cpufreq_get_index(old_freq); |
83 | if (old_index < 0) { | 73 | if (old_index < 0) { |
84 | ret = old_index; | 74 | ret = old_index; |
85 | goto out; | 75 | goto out; |
@@ -104,17 +94,14 @@ static int exynos_cpufreq_scale(unsigned int target_freq) | |||
104 | } | 94 | } |
105 | arm_volt = volt_table[index]; | 95 | arm_volt = volt_table[index]; |
106 | 96 | ||
107 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
108 | |||
109 | /* When the new frequency is higher than current frequency */ | 97 | /* When the new frequency is higher than current frequency */ |
110 | if ((freqs.new > freqs.old) && !safe_arm_volt) { | 98 | if ((target_freq > old_freq) && !safe_arm_volt) { |
111 | /* Firstly, voltage up to increase frequency */ | 99 | /* Firstly, voltage up to increase frequency */ |
112 | ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt); | 100 | ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt); |
113 | if (ret) { | 101 | if (ret) { |
114 | pr_err("%s: failed to set cpu voltage to %d\n", | 102 | pr_err("%s: failed to set cpu voltage to %d\n", |
115 | __func__, arm_volt); | 103 | __func__, arm_volt); |
116 | freqs.new = freqs.old; | 104 | return ret; |
117 | goto post_notify; | ||
118 | } | 105 | } |
119 | } | 106 | } |
120 | 107 | ||
@@ -124,24 +111,17 @@ static int exynos_cpufreq_scale(unsigned int target_freq) | |||
124 | if (ret) { | 111 | if (ret) { |
125 | pr_err("%s: failed to set cpu voltage to %d\n", | 112 | pr_err("%s: failed to set cpu voltage to %d\n", |
126 | __func__, safe_arm_volt); | 113 | __func__, safe_arm_volt); |
127 | freqs.new = freqs.old; | 114 | return ret; |
128 | goto post_notify; | ||
129 | } | 115 | } |
130 | } | 116 | } |
131 | 117 | ||
132 | exynos_info->set_freq(old_index, index); | 118 | exynos_info->set_freq(old_index, index); |
133 | 119 | ||
134 | post_notify: | ||
135 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
136 | |||
137 | if (ret) | ||
138 | goto out; | ||
139 | |||
140 | /* When the new frequency is lower than current frequency */ | 120 | /* When the new frequency is lower than current frequency */ |
141 | if ((freqs.new < freqs.old) || | 121 | if ((target_freq < old_freq) || |
142 | ((freqs.new > freqs.old) && safe_arm_volt)) { | 122 | ((target_freq > old_freq) && safe_arm_volt)) { |
143 | /* down the voltage after frequency change */ | 123 | /* down the voltage after frequency change */ |
144 | regulator_set_voltage(arm_regulator, arm_volt, | 124 | ret = regulator_set_voltage(arm_regulator, arm_volt, |
145 | arm_volt); | 125 | arm_volt); |
146 | if (ret) { | 126 | if (ret) { |
147 | pr_err("%s: failed to set cpu voltage to %d\n", | 127 | pr_err("%s: failed to set cpu voltage to %d\n", |
@@ -151,19 +131,14 @@ post_notify: | |||
151 | } | 131 | } |
152 | 132 | ||
153 | out: | 133 | out: |
154 | |||
155 | cpufreq_cpu_put(policy); | 134 | cpufreq_cpu_put(policy); |
156 | 135 | ||
157 | return ret; | 136 | return ret; |
158 | } | 137 | } |
159 | 138 | ||
160 | static int exynos_target(struct cpufreq_policy *policy, | 139 | static int exynos_target(struct cpufreq_policy *policy, unsigned int index) |
161 | unsigned int target_freq, | ||
162 | unsigned int relation) | ||
163 | { | 140 | { |
164 | struct cpufreq_frequency_table *freq_table = exynos_info->freq_table; | 141 | struct cpufreq_frequency_table *freq_table = exynos_info->freq_table; |
165 | unsigned int index; | ||
166 | unsigned int new_freq; | ||
167 | int ret = 0; | 142 | int ret = 0; |
168 | 143 | ||
169 | mutex_lock(&cpufreq_lock); | 144 | mutex_lock(&cpufreq_lock); |
@@ -171,15 +146,7 @@ static int exynos_target(struct cpufreq_policy *policy, | |||
171 | if (frequency_locked) | 146 | if (frequency_locked) |
172 | goto out; | 147 | goto out; |
173 | 148 | ||
174 | if (cpufreq_frequency_table_target(policy, freq_table, | 149 | ret = exynos_cpufreq_scale(freq_table[index].frequency); |
175 | target_freq, relation, &index)) { | ||
176 | ret = -EINVAL; | ||
177 | goto out; | ||
178 | } | ||
179 | |||
180 | new_freq = freq_table[index].frequency; | ||
181 | |||
182 | ret = exynos_cpufreq_scale(new_freq); | ||
183 | 150 | ||
184 | out: | 151 | out: |
185 | mutex_unlock(&cpufreq_lock); | 152 | mutex_unlock(&cpufreq_lock); |
@@ -247,38 +214,18 @@ static struct notifier_block exynos_cpufreq_nb = { | |||
247 | 214 | ||
248 | static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) | 215 | static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) |
249 | { | 216 | { |
250 | policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu); | 217 | return cpufreq_generic_init(policy, exynos_info->freq_table, 100000); |
251 | |||
252 | cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu); | ||
253 | |||
254 | /* set the transition latency value */ | ||
255 | policy->cpuinfo.transition_latency = 100000; | ||
256 | |||
257 | cpumask_setall(policy->cpus); | ||
258 | |||
259 | return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table); | ||
260 | } | ||
261 | |||
262 | static int exynos_cpufreq_cpu_exit(struct cpufreq_policy *policy) | ||
263 | { | ||
264 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
265 | return 0; | ||
266 | } | 218 | } |
267 | 219 | ||
268 | static struct freq_attr *exynos_cpufreq_attr[] = { | ||
269 | &cpufreq_freq_attr_scaling_available_freqs, | ||
270 | NULL, | ||
271 | }; | ||
272 | |||
273 | static struct cpufreq_driver exynos_driver = { | 220 | static struct cpufreq_driver exynos_driver = { |
274 | .flags = CPUFREQ_STICKY, | 221 | .flags = CPUFREQ_STICKY, |
275 | .verify = exynos_verify_speed, | 222 | .verify = cpufreq_generic_frequency_table_verify, |
276 | .target = exynos_target, | 223 | .target_index = exynos_target, |
277 | .get = exynos_getspeed, | 224 | .get = exynos_getspeed, |
278 | .init = exynos_cpufreq_cpu_init, | 225 | .init = exynos_cpufreq_cpu_init, |
279 | .exit = exynos_cpufreq_cpu_exit, | 226 | .exit = cpufreq_generic_exit, |
280 | .name = "exynos_cpufreq", | 227 | .name = "exynos_cpufreq", |
281 | .attr = exynos_cpufreq_attr, | 228 | .attr = cpufreq_generic_attr, |
282 | #ifdef CONFIG_PM | 229 | #ifdef CONFIG_PM |
283 | .suspend = exynos_cpufreq_suspend, | 230 | .suspend = exynos_cpufreq_suspend, |
284 | .resume = exynos_cpufreq_resume, | 231 | .resume = exynos_cpufreq_resume, |
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c index add7fbec4fc9..f2c75065ce19 100644 --- a/drivers/cpufreq/exynos4210-cpufreq.c +++ b/drivers/cpufreq/exynos4210-cpufreq.c | |||
@@ -81,9 +81,9 @@ static void exynos4210_set_clkdiv(unsigned int div_index) | |||
81 | 81 | ||
82 | static void exynos4210_set_apll(unsigned int index) | 82 | static void exynos4210_set_apll(unsigned int index) |
83 | { | 83 | { |
84 | unsigned int tmp; | 84 | unsigned int tmp, freq = apll_freq_4210[index].freq; |
85 | 85 | ||
86 | /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ | 86 | /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ |
87 | clk_set_parent(moutcore, mout_mpll); | 87 | clk_set_parent(moutcore, mout_mpll); |
88 | 88 | ||
89 | do { | 89 | do { |
@@ -92,21 +92,9 @@ static void exynos4210_set_apll(unsigned int index) | |||
92 | tmp &= 0x7; | 92 | tmp &= 0x7; |
93 | } while (tmp != 0x2); | 93 | } while (tmp != 0x2); |
94 | 94 | ||
95 | /* 2. Set APLL Lock time */ | 95 | clk_set_rate(mout_apll, freq * 1000); |
96 | __raw_writel(EXYNOS4_APLL_LOCKTIME, EXYNOS4_APLL_LOCK); | ||
97 | |||
98 | /* 3. Change PLL PMS values */ | ||
99 | tmp = __raw_readl(EXYNOS4_APLL_CON0); | ||
100 | tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0)); | ||
101 | tmp |= apll_freq_4210[index].mps; | ||
102 | __raw_writel(tmp, EXYNOS4_APLL_CON0); | ||
103 | 96 | ||
104 | /* 4. wait_lock_time */ | 97 | /* MUX_CORE_SEL = APLL */ |
105 | do { | ||
106 | tmp = __raw_readl(EXYNOS4_APLL_CON0); | ||
107 | } while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT))); | ||
108 | |||
109 | /* 5. MUX_CORE_SEL = APLL */ | ||
110 | clk_set_parent(moutcore, mout_apll); | 98 | clk_set_parent(moutcore, mout_apll); |
111 | 99 | ||
112 | do { | 100 | do { |
@@ -115,53 +103,15 @@ static void exynos4210_set_apll(unsigned int index) | |||
115 | } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); | 103 | } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); |
116 | } | 104 | } |
117 | 105 | ||
118 | static bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index) | ||
119 | { | ||
120 | unsigned int old_pm = apll_freq_4210[old_index].mps >> 8; | ||
121 | unsigned int new_pm = apll_freq_4210[new_index].mps >> 8; | ||
122 | |||
123 | return (old_pm == new_pm) ? 0 : 1; | ||
124 | } | ||
125 | |||
126 | static void exynos4210_set_frequency(unsigned int old_index, | 106 | static void exynos4210_set_frequency(unsigned int old_index, |
127 | unsigned int new_index) | 107 | unsigned int new_index) |
128 | { | 108 | { |
129 | unsigned int tmp; | ||
130 | |||
131 | if (old_index > new_index) { | 109 | if (old_index > new_index) { |
132 | if (!exynos4210_pms_change(old_index, new_index)) { | 110 | exynos4210_set_clkdiv(new_index); |
133 | /* 1. Change the system clock divider values */ | 111 | exynos4210_set_apll(new_index); |
134 | exynos4210_set_clkdiv(new_index); | ||
135 | |||
136 | /* 2. Change just s value in apll m,p,s value */ | ||
137 | tmp = __raw_readl(EXYNOS4_APLL_CON0); | ||
138 | tmp &= ~(0x7 << 0); | ||
139 | tmp |= apll_freq_4210[new_index].mps & 0x7; | ||
140 | __raw_writel(tmp, EXYNOS4_APLL_CON0); | ||
141 | } else { | ||
142 | /* Clock Configuration Procedure */ | ||
143 | /* 1. Change the system clock divider values */ | ||
144 | exynos4210_set_clkdiv(new_index); | ||
145 | /* 2. Change the apll m,p,s value */ | ||
146 | exynos4210_set_apll(new_index); | ||
147 | } | ||
148 | } else if (old_index < new_index) { | 112 | } else if (old_index < new_index) { |
149 | if (!exynos4210_pms_change(old_index, new_index)) { | 113 | exynos4210_set_apll(new_index); |
150 | /* 1. Change just s value in apll m,p,s value */ | 114 | exynos4210_set_clkdiv(new_index); |
151 | tmp = __raw_readl(EXYNOS4_APLL_CON0); | ||
152 | tmp &= ~(0x7 << 0); | ||
153 | tmp |= apll_freq_4210[new_index].mps & 0x7; | ||
154 | __raw_writel(tmp, EXYNOS4_APLL_CON0); | ||
155 | |||
156 | /* 2. Change the system clock divider values */ | ||
157 | exynos4210_set_clkdiv(new_index); | ||
158 | } else { | ||
159 | /* Clock Configuration Procedure */ | ||
160 | /* 1. Change the apll m,p,s value */ | ||
161 | exynos4210_set_apll(new_index); | ||
162 | /* 2. Change the system clock divider values */ | ||
163 | exynos4210_set_clkdiv(new_index); | ||
164 | } | ||
165 | } | 115 | } |
166 | } | 116 | } |
167 | 117 | ||
@@ -194,7 +144,6 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) | |||
194 | info->volt_table = exynos4210_volt_table; | 144 | info->volt_table = exynos4210_volt_table; |
195 | info->freq_table = exynos4210_freq_table; | 145 | info->freq_table = exynos4210_freq_table; |
196 | info->set_freq = exynos4210_set_frequency; | 146 | info->set_freq = exynos4210_set_frequency; |
197 | info->need_apll_change = exynos4210_pms_change; | ||
198 | 147 | ||
199 | return 0; | 148 | return 0; |
200 | 149 | ||
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c index 08b7477b0aa2..8683304ce62c 100644 --- a/drivers/cpufreq/exynos4x12-cpufreq.c +++ b/drivers/cpufreq/exynos4x12-cpufreq.c | |||
@@ -128,9 +128,9 @@ static void exynos4x12_set_clkdiv(unsigned int div_index) | |||
128 | 128 | ||
129 | static void exynos4x12_set_apll(unsigned int index) | 129 | static void exynos4x12_set_apll(unsigned int index) |
130 | { | 130 | { |
131 | unsigned int tmp, pdiv; | 131 | unsigned int tmp, freq = apll_freq_4x12[index].freq; |
132 | 132 | ||
133 | /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ | 133 | /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ |
134 | clk_set_parent(moutcore, mout_mpll); | 134 | clk_set_parent(moutcore, mout_mpll); |
135 | 135 | ||
136 | do { | 136 | do { |
@@ -140,24 +140,9 @@ static void exynos4x12_set_apll(unsigned int index) | |||
140 | tmp &= 0x7; | 140 | tmp &= 0x7; |
141 | } while (tmp != 0x2); | 141 | } while (tmp != 0x2); |
142 | 142 | ||
143 | /* 2. Set APLL Lock time */ | 143 | clk_set_rate(mout_apll, freq * 1000); |
144 | pdiv = ((apll_freq_4x12[index].mps >> 8) & 0x3f); | ||
145 | 144 | ||
146 | __raw_writel((pdiv * 250), EXYNOS4_APLL_LOCK); | 145 | /* MUX_CORE_SEL = APLL */ |
147 | |||
148 | /* 3. Change PLL PMS values */ | ||
149 | tmp = __raw_readl(EXYNOS4_APLL_CON0); | ||
150 | tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0)); | ||
151 | tmp |= apll_freq_4x12[index].mps; | ||
152 | __raw_writel(tmp, EXYNOS4_APLL_CON0); | ||
153 | |||
154 | /* 4. wait_lock_time */ | ||
155 | do { | ||
156 | cpu_relax(); | ||
157 | tmp = __raw_readl(EXYNOS4_APLL_CON0); | ||
158 | } while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT))); | ||
159 | |||
160 | /* 5. MUX_CORE_SEL = APLL */ | ||
161 | clk_set_parent(moutcore, mout_apll); | 146 | clk_set_parent(moutcore, mout_apll); |
162 | 147 | ||
163 | do { | 148 | do { |
@@ -167,52 +152,15 @@ static void exynos4x12_set_apll(unsigned int index) | |||
167 | } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); | 152 | } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); |
168 | } | 153 | } |
169 | 154 | ||
170 | static bool exynos4x12_pms_change(unsigned int old_index, unsigned int new_index) | ||
171 | { | ||
172 | unsigned int old_pm = apll_freq_4x12[old_index].mps >> 8; | ||
173 | unsigned int new_pm = apll_freq_4x12[new_index].mps >> 8; | ||
174 | |||
175 | return (old_pm == new_pm) ? 0 : 1; | ||
176 | } | ||
177 | |||
178 | static void exynos4x12_set_frequency(unsigned int old_index, | 155 | static void exynos4x12_set_frequency(unsigned int old_index, |
179 | unsigned int new_index) | 156 | unsigned int new_index) |
180 | { | 157 | { |
181 | unsigned int tmp; | ||
182 | |||
183 | if (old_index > new_index) { | 158 | if (old_index > new_index) { |
184 | if (!exynos4x12_pms_change(old_index, new_index)) { | 159 | exynos4x12_set_clkdiv(new_index); |
185 | /* 1. Change the system clock divider values */ | 160 | exynos4x12_set_apll(new_index); |
186 | exynos4x12_set_clkdiv(new_index); | ||
187 | /* 2. Change just s value in apll m,p,s value */ | ||
188 | tmp = __raw_readl(EXYNOS4_APLL_CON0); | ||
189 | tmp &= ~(0x7 << 0); | ||
190 | tmp |= apll_freq_4x12[new_index].mps & 0x7; | ||
191 | __raw_writel(tmp, EXYNOS4_APLL_CON0); | ||
192 | |||
193 | } else { | ||
194 | /* Clock Configuration Procedure */ | ||
195 | /* 1. Change the system clock divider values */ | ||
196 | exynos4x12_set_clkdiv(new_index); | ||
197 | /* 2. Change the apll m,p,s value */ | ||
198 | exynos4x12_set_apll(new_index); | ||
199 | } | ||
200 | } else if (old_index < new_index) { | 161 | } else if (old_index < new_index) { |
201 | if (!exynos4x12_pms_change(old_index, new_index)) { | 162 | exynos4x12_set_apll(new_index); |
202 | /* 1. Change just s value in apll m,p,s value */ | 163 | exynos4x12_set_clkdiv(new_index); |
203 | tmp = __raw_readl(EXYNOS4_APLL_CON0); | ||
204 | tmp &= ~(0x7 << 0); | ||
205 | tmp |= apll_freq_4x12[new_index].mps & 0x7; | ||
206 | __raw_writel(tmp, EXYNOS4_APLL_CON0); | ||
207 | /* 2. Change the system clock divider values */ | ||
208 | exynos4x12_set_clkdiv(new_index); | ||
209 | } else { | ||
210 | /* Clock Configuration Procedure */ | ||
211 | /* 1. Change the apll m,p,s value */ | ||
212 | exynos4x12_set_apll(new_index); | ||
213 | /* 2. Change the system clock divider values */ | ||
214 | exynos4x12_set_clkdiv(new_index); | ||
215 | } | ||
216 | } | 164 | } |
217 | } | 165 | } |
218 | 166 | ||
@@ -250,7 +198,6 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) | |||
250 | info->volt_table = exynos4x12_volt_table; | 198 | info->volt_table = exynos4x12_volt_table; |
251 | info->freq_table = exynos4x12_freq_table; | 199 | info->freq_table = exynos4x12_freq_table; |
252 | info->set_freq = exynos4x12_set_frequency; | 200 | info->set_freq = exynos4x12_set_frequency; |
253 | info->need_apll_change = exynos4x12_pms_change; | ||
254 | 201 | ||
255 | return 0; | 202 | return 0; |
256 | 203 | ||
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c index be5380ecdcd4..76bef8b078cb 100644 --- a/drivers/cpufreq/exynos5440-cpufreq.c +++ b/drivers/cpufreq/exynos5440-cpufreq.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/of_address.h> | 21 | #include <linux/of_address.h> |
22 | #include <linux/of_irq.h> | 22 | #include <linux/of_irq.h> |
23 | #include <linux/opp.h> | 23 | #include <linux/pm_opp.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | 26 | ||
@@ -118,12 +118,12 @@ static int init_div_table(void) | |||
118 | struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table; | 118 | struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table; |
119 | unsigned int tmp, clk_div, ema_div, freq, volt_id; | 119 | unsigned int tmp, clk_div, ema_div, freq, volt_id; |
120 | int i = 0; | 120 | int i = 0; |
121 | struct opp *opp; | 121 | struct dev_pm_opp *opp; |
122 | 122 | ||
123 | rcu_read_lock(); | 123 | rcu_read_lock(); |
124 | for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) { | 124 | for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) { |
125 | 125 | ||
126 | opp = opp_find_freq_exact(dvfs_info->dev, | 126 | opp = dev_pm_opp_find_freq_exact(dvfs_info->dev, |
127 | freq_tbl[i].frequency * 1000, true); | 127 | freq_tbl[i].frequency * 1000, true); |
128 | if (IS_ERR(opp)) { | 128 | if (IS_ERR(opp)) { |
129 | rcu_read_unlock(); | 129 | rcu_read_unlock(); |
@@ -142,7 +142,7 @@ static int init_div_table(void) | |||
142 | << P0_7_CSCLKDEV_SHIFT; | 142 | << P0_7_CSCLKDEV_SHIFT; |
143 | 143 | ||
144 | /* Calculate EMA */ | 144 | /* Calculate EMA */ |
145 | volt_id = opp_get_voltage(opp); | 145 | volt_id = dev_pm_opp_get_voltage(opp); |
146 | volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP; | 146 | volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP; |
147 | if (volt_id < PMIC_HIGH_VOLT) { | 147 | if (volt_id < PMIC_HIGH_VOLT) { |
148 | ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) | | 148 | ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) | |
@@ -209,38 +209,22 @@ static void exynos_enable_dvfs(void) | |||
209 | dvfs_info->base + XMU_DVFS_CTRL); | 209 | dvfs_info->base + XMU_DVFS_CTRL); |
210 | } | 210 | } |
211 | 211 | ||
212 | static int exynos_verify_speed(struct cpufreq_policy *policy) | ||
213 | { | ||
214 | return cpufreq_frequency_table_verify(policy, | ||
215 | dvfs_info->freq_table); | ||
216 | } | ||
217 | |||
218 | static unsigned int exynos_getspeed(unsigned int cpu) | 212 | static unsigned int exynos_getspeed(unsigned int cpu) |
219 | { | 213 | { |
220 | return dvfs_info->cur_frequency; | 214 | return dvfs_info->cur_frequency; |
221 | } | 215 | } |
222 | 216 | ||
223 | static int exynos_target(struct cpufreq_policy *policy, | 217 | static int exynos_target(struct cpufreq_policy *policy, unsigned int index) |
224 | unsigned int target_freq, | ||
225 | unsigned int relation) | ||
226 | { | 218 | { |
227 | unsigned int index, tmp; | 219 | unsigned int tmp; |
228 | int ret = 0, i; | 220 | int i; |
229 | struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table; | 221 | struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table; |
230 | 222 | ||
231 | mutex_lock(&cpufreq_lock); | 223 | mutex_lock(&cpufreq_lock); |
232 | 224 | ||
233 | ret = cpufreq_frequency_table_target(policy, freq_table, | ||
234 | target_freq, relation, &index); | ||
235 | if (ret) | ||
236 | goto out; | ||
237 | |||
238 | freqs.old = dvfs_info->cur_frequency; | 225 | freqs.old = dvfs_info->cur_frequency; |
239 | freqs.new = freq_table[index].frequency; | 226 | freqs.new = freq_table[index].frequency; |
240 | 227 | ||
241 | if (freqs.old == freqs.new) | ||
242 | goto out; | ||
243 | |||
244 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 228 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
245 | 229 | ||
246 | /* Set the target frequency in all C0_3_PSTATE register */ | 230 | /* Set the target frequency in all C0_3_PSTATE register */ |
@@ -251,9 +235,8 @@ static int exynos_target(struct cpufreq_policy *policy, | |||
251 | 235 | ||
252 | __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4); | 236 | __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4); |
253 | } | 237 | } |
254 | out: | ||
255 | mutex_unlock(&cpufreq_lock); | 238 | mutex_unlock(&cpufreq_lock); |
256 | return ret; | 239 | return 0; |
257 | } | 240 | } |
258 | 241 | ||
259 | static void exynos_cpufreq_work(struct work_struct *work) | 242 | static void exynos_cpufreq_work(struct work_struct *work) |
@@ -324,30 +307,19 @@ static void exynos_sort_descend_freq_table(void) | |||
324 | 307 | ||
325 | static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) | 308 | static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) |
326 | { | 309 | { |
327 | int ret; | 310 | return cpufreq_generic_init(policy, dvfs_info->freq_table, |
328 | 311 | dvfs_info->latency); | |
329 | ret = cpufreq_frequency_table_cpuinfo(policy, dvfs_info->freq_table); | ||
330 | if (ret) { | ||
331 | dev_err(dvfs_info->dev, "Invalid frequency table: %d\n", ret); | ||
332 | return ret; | ||
333 | } | ||
334 | |||
335 | policy->cur = dvfs_info->cur_frequency; | ||
336 | policy->cpuinfo.transition_latency = dvfs_info->latency; | ||
337 | cpumask_setall(policy->cpus); | ||
338 | |||
339 | cpufreq_frequency_table_get_attr(dvfs_info->freq_table, policy->cpu); | ||
340 | |||
341 | return 0; | ||
342 | } | 312 | } |
343 | 313 | ||
344 | static struct cpufreq_driver exynos_driver = { | 314 | static struct cpufreq_driver exynos_driver = { |
345 | .flags = CPUFREQ_STICKY, | 315 | .flags = CPUFREQ_STICKY | CPUFREQ_ASYNC_NOTIFICATION, |
346 | .verify = exynos_verify_speed, | 316 | .verify = cpufreq_generic_frequency_table_verify, |
347 | .target = exynos_target, | 317 | .target_index = exynos_target, |
348 | .get = exynos_getspeed, | 318 | .get = exynos_getspeed, |
349 | .init = exynos_cpufreq_cpu_init, | 319 | .init = exynos_cpufreq_cpu_init, |
320 | .exit = cpufreq_generic_exit, | ||
350 | .name = CPUFREQ_NAME, | 321 | .name = CPUFREQ_NAME, |
322 | .attr = cpufreq_generic_attr, | ||
351 | }; | 323 | }; |
352 | 324 | ||
353 | static const struct of_device_id exynos_cpufreq_match[] = { | 325 | static const struct of_device_id exynos_cpufreq_match[] = { |
@@ -399,13 +371,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) | |||
399 | goto err_put_node; | 371 | goto err_put_node; |
400 | } | 372 | } |
401 | 373 | ||
402 | ret = opp_init_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); | 374 | ret = dev_pm_opp_init_cpufreq_table(dvfs_info->dev, |
375 | &dvfs_info->freq_table); | ||
403 | if (ret) { | 376 | if (ret) { |
404 | dev_err(dvfs_info->dev, | 377 | dev_err(dvfs_info->dev, |
405 | "failed to init cpufreq table: %d\n", ret); | 378 | "failed to init cpufreq table: %d\n", ret); |
406 | goto err_put_node; | 379 | goto err_put_node; |
407 | } | 380 | } |
408 | dvfs_info->freq_count = opp_get_opp_count(dvfs_info->dev); | 381 | dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev); |
409 | exynos_sort_descend_freq_table(); | 382 | exynos_sort_descend_freq_table(); |
410 | 383 | ||
411 | if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency)) | 384 | if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency)) |
@@ -454,7 +427,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) | |||
454 | return 0; | 427 | return 0; |
455 | 428 | ||
456 | err_free_table: | 429 | err_free_table: |
457 | opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); | 430 | dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); |
458 | err_put_node: | 431 | err_put_node: |
459 | of_node_put(np); | 432 | of_node_put(np); |
460 | dev_err(&pdev->dev, "%s: failed initialization\n", __func__); | 433 | dev_err(&pdev->dev, "%s: failed initialization\n", __func__); |
@@ -464,7 +437,7 @@ err_put_node: | |||
464 | static int exynos_cpufreq_remove(struct platform_device *pdev) | 437 | static int exynos_cpufreq_remove(struct platform_device *pdev) |
465 | { | 438 | { |
466 | cpufreq_unregister_driver(&exynos_driver); | 439 | cpufreq_unregister_driver(&exynos_driver); |
467 | opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); | 440 | dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); |
468 | return 0; | 441 | return 0; |
469 | } | 442 | } |
470 | 443 | ||
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index f111454a7aea..3458d27f63b4 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c | |||
@@ -54,31 +54,30 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo); | |||
54 | int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, | 54 | int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, |
55 | struct cpufreq_frequency_table *table) | 55 | struct cpufreq_frequency_table *table) |
56 | { | 56 | { |
57 | unsigned int next_larger = ~0; | 57 | unsigned int next_larger = ~0, freq, i = 0; |
58 | unsigned int i; | 58 | bool found = false; |
59 | unsigned int count = 0; | ||
60 | 59 | ||
61 | pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", | 60 | pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", |
62 | policy->min, policy->max, policy->cpu); | 61 | policy->min, policy->max, policy->cpu); |
63 | 62 | ||
64 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | 63 | cpufreq_verify_within_cpu_limits(policy); |
65 | policy->cpuinfo.max_freq); | ||
66 | 64 | ||
67 | for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { | 65 | for (; freq = table[i].frequency, freq != CPUFREQ_TABLE_END; i++) { |
68 | unsigned int freq = table[i].frequency; | ||
69 | if (freq == CPUFREQ_ENTRY_INVALID) | 66 | if (freq == CPUFREQ_ENTRY_INVALID) |
70 | continue; | 67 | continue; |
71 | if ((freq >= policy->min) && (freq <= policy->max)) | 68 | if ((freq >= policy->min) && (freq <= policy->max)) { |
72 | count++; | 69 | found = true; |
73 | else if ((next_larger > freq) && (freq > policy->max)) | 70 | break; |
71 | } | ||
72 | |||
73 | if ((next_larger > freq) && (freq > policy->max)) | ||
74 | next_larger = freq; | 74 | next_larger = freq; |
75 | } | 75 | } |
76 | 76 | ||
77 | if (!count) | 77 | if (!found) { |
78 | policy->max = next_larger; | 78 | policy->max = next_larger; |
79 | 79 | cpufreq_verify_within_cpu_limits(policy); | |
80 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | 80 | } |
81 | policy->cpuinfo.max_freq); | ||
82 | 81 | ||
83 | pr_debug("verification lead to (%u - %u kHz) for cpu %u\n", | 82 | pr_debug("verification lead to (%u - %u kHz) for cpu %u\n", |
84 | policy->min, policy->max, policy->cpu); | 83 | policy->min, policy->max, policy->cpu); |
@@ -87,6 +86,20 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, | |||
87 | } | 86 | } |
88 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify); | 87 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify); |
89 | 88 | ||
89 | /* | ||
90 | * Generic routine to verify policy & frequency table, requires driver to call | ||
91 | * cpufreq_frequency_table_get_attr() prior to it. | ||
92 | */ | ||
93 | int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy) | ||
94 | { | ||
95 | struct cpufreq_frequency_table *table = | ||
96 | cpufreq_frequency_get_table(policy->cpu); | ||
97 | if (!table) | ||
98 | return -ENODEV; | ||
99 | |||
100 | return cpufreq_frequency_table_verify(policy, table); | ||
101 | } | ||
102 | EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify); | ||
90 | 103 | ||
91 | int cpufreq_frequency_table_target(struct cpufreq_policy *policy, | 104 | int cpufreq_frequency_table_target(struct cpufreq_policy *policy, |
92 | struct cpufreq_frequency_table *table, | 105 | struct cpufreq_frequency_table *table, |
@@ -200,6 +213,12 @@ struct freq_attr cpufreq_freq_attr_scaling_available_freqs = { | |||
200 | }; | 213 | }; |
201 | EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); | 214 | EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); |
202 | 215 | ||
216 | struct freq_attr *cpufreq_generic_attr[] = { | ||
217 | &cpufreq_freq_attr_scaling_available_freqs, | ||
218 | NULL, | ||
219 | }; | ||
220 | EXPORT_SYMBOL_GPL(cpufreq_generic_attr); | ||
221 | |||
203 | /* | 222 | /* |
204 | * if you use these, you must assure that the frequency table is valid | 223 | * if you use these, you must assure that the frequency table is valid |
205 | * all the time between get_attr and put_attr! | 224 | * all the time between get_attr and put_attr! |
@@ -219,6 +238,18 @@ void cpufreq_frequency_table_put_attr(unsigned int cpu) | |||
219 | } | 238 | } |
220 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); | 239 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); |
221 | 240 | ||
241 | int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, | ||
242 | struct cpufreq_frequency_table *table) | ||
243 | { | ||
244 | int ret = cpufreq_frequency_table_cpuinfo(policy, table); | ||
245 | |||
246 | if (!ret) | ||
247 | cpufreq_frequency_table_get_attr(table, policy->cpu); | ||
248 | |||
249 | return ret; | ||
250 | } | ||
251 | EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show); | ||
252 | |||
222 | void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy) | 253 | void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy) |
223 | { | 254 | { |
224 | pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n", | 255 | pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n", |
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c index 70442c7b5e71..d83e8266a58e 100644 --- a/drivers/cpufreq/gx-suspmod.c +++ b/drivers/cpufreq/gx-suspmod.c | |||
@@ -401,7 +401,7 @@ static int cpufreq_gx_target(struct cpufreq_policy *policy, | |||
401 | 401 | ||
402 | static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) | 402 | static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) |
403 | { | 403 | { |
404 | unsigned int maxfreq, curfreq; | 404 | unsigned int maxfreq; |
405 | 405 | ||
406 | if (!policy || policy->cpu != 0) | 406 | if (!policy || policy->cpu != 0) |
407 | return -ENODEV; | 407 | return -ENODEV; |
@@ -415,10 +415,8 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) | |||
415 | maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; | 415 | maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; |
416 | 416 | ||
417 | stock_freq = maxfreq; | 417 | stock_freq = maxfreq; |
418 | curfreq = gx_get_cpuspeed(0); | ||
419 | 418 | ||
420 | pr_debug("cpu max frequency is %d.\n", maxfreq); | 419 | pr_debug("cpu max frequency is %d.\n", maxfreq); |
421 | pr_debug("cpu current frequency is %dkHz.\n", curfreq); | ||
422 | 420 | ||
423 | /* setup basic struct for cpufreq API */ | 421 | /* setup basic struct for cpufreq API */ |
424 | policy->cpu = 0; | 422 | policy->cpu = 0; |
@@ -428,7 +426,6 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) | |||
428 | else | 426 | else |
429 | policy->min = maxfreq / POLICY_MIN_DIV; | 427 | policy->min = maxfreq / POLICY_MIN_DIV; |
430 | policy->max = maxfreq; | 428 | policy->max = maxfreq; |
431 | policy->cur = curfreq; | ||
432 | policy->cpuinfo.min_freq = maxfreq / max_duration; | 429 | policy->cpuinfo.min_freq = maxfreq / max_duration; |
433 | policy->cpuinfo.max_freq = maxfreq; | 430 | policy->cpuinfo.max_freq = maxfreq; |
434 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 431 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c index 794123fcf3e3..bf8902a0866d 100644 --- a/drivers/cpufreq/highbank-cpufreq.c +++ b/drivers/cpufreq/highbank-cpufreq.c | |||
@@ -66,7 +66,8 @@ static int hb_cpufreq_driver_init(void) | |||
66 | struct device_node *np; | 66 | struct device_node *np; |
67 | int ret; | 67 | int ret; |
68 | 68 | ||
69 | if (!of_machine_is_compatible("calxeda,highbank")) | 69 | if ((!of_machine_is_compatible("calxeda,highbank")) && |
70 | (!of_machine_is_compatible("calxeda,ecx-2000"))) | ||
70 | return -ENODEV; | 71 | return -ENODEV; |
71 | 72 | ||
72 | cpu_dev = get_cpu_device(0); | 73 | cpu_dev = get_cpu_device(0); |
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c index 3e14f0317175..53c6ac637e10 100644 --- a/drivers/cpufreq/ia64-acpi-cpufreq.c +++ b/drivers/cpufreq/ia64-acpi-cpufreq.c | |||
@@ -141,7 +141,6 @@ processor_set_freq ( | |||
141 | { | 141 | { |
142 | int ret = 0; | 142 | int ret = 0; |
143 | u32 value = 0; | 143 | u32 value = 0; |
144 | struct cpufreq_freqs cpufreq_freqs; | ||
145 | cpumask_t saved_mask; | 144 | cpumask_t saved_mask; |
146 | int retval; | 145 | int retval; |
147 | 146 | ||
@@ -168,13 +167,6 @@ processor_set_freq ( | |||
168 | pr_debug("Transitioning from P%d to P%d\n", | 167 | pr_debug("Transitioning from P%d to P%d\n", |
169 | data->acpi_data.state, state); | 168 | data->acpi_data.state, state); |
170 | 169 | ||
171 | /* cpufreq frequency struct */ | ||
172 | cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency; | ||
173 | cpufreq_freqs.new = data->freq_table[state].frequency; | ||
174 | |||
175 | /* notify cpufreq */ | ||
176 | cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_PRECHANGE); | ||
177 | |||
178 | /* | 170 | /* |
179 | * First we write the target state's 'control' value to the | 171 | * First we write the target state's 'control' value to the |
180 | * control_register. | 172 | * control_register. |
@@ -186,22 +178,11 @@ processor_set_freq ( | |||
186 | 178 | ||
187 | ret = processor_set_pstate(value); | 179 | ret = processor_set_pstate(value); |
188 | if (ret) { | 180 | if (ret) { |
189 | unsigned int tmp = cpufreq_freqs.new; | ||
190 | cpufreq_notify_transition(policy, &cpufreq_freqs, | ||
191 | CPUFREQ_POSTCHANGE); | ||
192 | cpufreq_freqs.new = cpufreq_freqs.old; | ||
193 | cpufreq_freqs.old = tmp; | ||
194 | cpufreq_notify_transition(policy, &cpufreq_freqs, | ||
195 | CPUFREQ_PRECHANGE); | ||
196 | cpufreq_notify_transition(policy, &cpufreq_freqs, | ||
197 | CPUFREQ_POSTCHANGE); | ||
198 | printk(KERN_WARNING "Transition failed with error %d\n", ret); | 181 | printk(KERN_WARNING "Transition failed with error %d\n", ret); |
199 | retval = -ENODEV; | 182 | retval = -ENODEV; |
200 | goto migrate_end; | 183 | goto migrate_end; |
201 | } | 184 | } |
202 | 185 | ||
203 | cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_POSTCHANGE); | ||
204 | |||
205 | data->acpi_data.state = state; | 186 | data->acpi_data.state = state; |
206 | 187 | ||
207 | retval = 0; | 188 | retval = 0; |
@@ -227,42 +208,11 @@ acpi_cpufreq_get ( | |||
227 | static int | 208 | static int |
228 | acpi_cpufreq_target ( | 209 | acpi_cpufreq_target ( |
229 | struct cpufreq_policy *policy, | 210 | struct cpufreq_policy *policy, |
230 | unsigned int target_freq, | 211 | unsigned int index) |
231 | unsigned int relation) | ||
232 | { | ||
233 | struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; | ||
234 | unsigned int next_state = 0; | ||
235 | unsigned int result = 0; | ||
236 | |||
237 | pr_debug("acpi_cpufreq_setpolicy\n"); | ||
238 | |||
239 | result = cpufreq_frequency_table_target(policy, | ||
240 | data->freq_table, target_freq, relation, &next_state); | ||
241 | if (result) | ||
242 | return (result); | ||
243 | |||
244 | result = processor_set_freq(data, policy, next_state); | ||
245 | |||
246 | return (result); | ||
247 | } | ||
248 | |||
249 | |||
250 | static int | ||
251 | acpi_cpufreq_verify ( | ||
252 | struct cpufreq_policy *policy) | ||
253 | { | 212 | { |
254 | unsigned int result = 0; | 213 | return processor_set_freq(acpi_io_data[policy->cpu], policy, index); |
255 | struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; | ||
256 | |||
257 | pr_debug("acpi_cpufreq_verify\n"); | ||
258 | |||
259 | result = cpufreq_frequency_table_verify(policy, | ||
260 | data->freq_table); | ||
261 | |||
262 | return (result); | ||
263 | } | 214 | } |
264 | 215 | ||
265 | |||
266 | static int | 216 | static int |
267 | acpi_cpufreq_cpu_init ( | 217 | acpi_cpufreq_cpu_init ( |
268 | struct cpufreq_policy *policy) | 218 | struct cpufreq_policy *policy) |
@@ -321,7 +271,6 @@ acpi_cpufreq_cpu_init ( | |||
321 | data->acpi_data.states[i].transition_latency * 1000; | 271 | data->acpi_data.states[i].transition_latency * 1000; |
322 | } | 272 | } |
323 | } | 273 | } |
324 | policy->cur = processor_get_freq(data, policy->cpu); | ||
325 | 274 | ||
326 | /* table init */ | 275 | /* table init */ |
327 | for (i = 0; i <= data->acpi_data.state_count; i++) | 276 | for (i = 0; i <= data->acpi_data.state_count; i++) |
@@ -335,7 +284,7 @@ acpi_cpufreq_cpu_init ( | |||
335 | } | 284 | } |
336 | } | 285 | } |
337 | 286 | ||
338 | result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); | 287 | result = cpufreq_table_validate_and_show(policy, data->freq_table); |
339 | if (result) { | 288 | if (result) { |
340 | goto err_freqfree; | 289 | goto err_freqfree; |
341 | } | 290 | } |
@@ -356,8 +305,6 @@ acpi_cpufreq_cpu_init ( | |||
356 | (u32) data->acpi_data.states[i].status, | 305 | (u32) data->acpi_data.states[i].status, |
357 | (u32) data->acpi_data.states[i].control); | 306 | (u32) data->acpi_data.states[i].control); |
358 | 307 | ||
359 | cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); | ||
360 | |||
361 | /* the first call to ->target() should result in us actually | 308 | /* the first call to ->target() should result in us actually |
362 | * writing something to the appropriate registers. */ | 309 | * writing something to the appropriate registers. */ |
363 | data->resume = 1; | 310 | data->resume = 1; |
@@ -396,20 +343,14 @@ acpi_cpufreq_cpu_exit ( | |||
396 | } | 343 | } |
397 | 344 | ||
398 | 345 | ||
399 | static struct freq_attr* acpi_cpufreq_attr[] = { | ||
400 | &cpufreq_freq_attr_scaling_available_freqs, | ||
401 | NULL, | ||
402 | }; | ||
403 | |||
404 | |||
405 | static struct cpufreq_driver acpi_cpufreq_driver = { | 346 | static struct cpufreq_driver acpi_cpufreq_driver = { |
406 | .verify = acpi_cpufreq_verify, | 347 | .verify = cpufreq_generic_frequency_table_verify, |
407 | .target = acpi_cpufreq_target, | 348 | .target_index = acpi_cpufreq_target, |
408 | .get = acpi_cpufreq_get, | 349 | .get = acpi_cpufreq_get, |
409 | .init = acpi_cpufreq_cpu_init, | 350 | .init = acpi_cpufreq_cpu_init, |
410 | .exit = acpi_cpufreq_cpu_exit, | 351 | .exit = acpi_cpufreq_cpu_exit, |
411 | .name = "acpi-cpufreq", | 352 | .name = "acpi-cpufreq", |
412 | .attr = acpi_cpufreq_attr, | 353 | .attr = cpufreq_generic_attr, |
413 | }; | 354 | }; |
414 | 355 | ||
415 | 356 | ||
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index c3fd2a101ca0..4b3f18e5f36b 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/of.h> | 15 | #include <linux/of.h> |
16 | #include <linux/opp.h> | 16 | #include <linux/pm_opp.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/regulator/consumer.h> | 18 | #include <linux/regulator/consumer.h> |
19 | 19 | ||
@@ -35,73 +35,52 @@ static struct device *cpu_dev; | |||
35 | static struct cpufreq_frequency_table *freq_table; | 35 | static struct cpufreq_frequency_table *freq_table; |
36 | static unsigned int transition_latency; | 36 | static unsigned int transition_latency; |
37 | 37 | ||
38 | static int imx6q_verify_speed(struct cpufreq_policy *policy) | ||
39 | { | ||
40 | return cpufreq_frequency_table_verify(policy, freq_table); | ||
41 | } | ||
42 | |||
43 | static unsigned int imx6q_get_speed(unsigned int cpu) | 38 | static unsigned int imx6q_get_speed(unsigned int cpu) |
44 | { | 39 | { |
45 | return clk_get_rate(arm_clk) / 1000; | 40 | return clk_get_rate(arm_clk) / 1000; |
46 | } | 41 | } |
47 | 42 | ||
48 | static int imx6q_set_target(struct cpufreq_policy *policy, | 43 | static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) |
49 | unsigned int target_freq, unsigned int relation) | ||
50 | { | 44 | { |
51 | struct cpufreq_freqs freqs; | 45 | struct dev_pm_opp *opp; |
52 | struct opp *opp; | ||
53 | unsigned long freq_hz, volt, volt_old; | 46 | unsigned long freq_hz, volt, volt_old; |
54 | unsigned int index; | 47 | unsigned int old_freq, new_freq; |
55 | int ret; | 48 | int ret; |
56 | 49 | ||
57 | ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, | 50 | new_freq = freq_table[index].frequency; |
58 | relation, &index); | 51 | freq_hz = new_freq * 1000; |
59 | if (ret) { | 52 | old_freq = clk_get_rate(arm_clk) / 1000; |
60 | dev_err(cpu_dev, "failed to match target frequency %d: %d\n", | ||
61 | target_freq, ret); | ||
62 | return ret; | ||
63 | } | ||
64 | |||
65 | freqs.new = freq_table[index].frequency; | ||
66 | freq_hz = freqs.new * 1000; | ||
67 | freqs.old = clk_get_rate(arm_clk) / 1000; | ||
68 | |||
69 | if (freqs.old == freqs.new) | ||
70 | return 0; | ||
71 | 53 | ||
72 | rcu_read_lock(); | 54 | rcu_read_lock(); |
73 | opp = opp_find_freq_ceil(cpu_dev, &freq_hz); | 55 | opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz); |
74 | if (IS_ERR(opp)) { | 56 | if (IS_ERR(opp)) { |
75 | rcu_read_unlock(); | 57 | rcu_read_unlock(); |
76 | dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz); | 58 | dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz); |
77 | return PTR_ERR(opp); | 59 | return PTR_ERR(opp); |
78 | } | 60 | } |
79 | 61 | ||
80 | volt = opp_get_voltage(opp); | 62 | volt = dev_pm_opp_get_voltage(opp); |
81 | rcu_read_unlock(); | 63 | rcu_read_unlock(); |
82 | volt_old = regulator_get_voltage(arm_reg); | 64 | volt_old = regulator_get_voltage(arm_reg); |
83 | 65 | ||
84 | dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n", | 66 | dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n", |
85 | freqs.old / 1000, volt_old / 1000, | 67 | old_freq / 1000, volt_old / 1000, |
86 | freqs.new / 1000, volt / 1000); | 68 | new_freq / 1000, volt / 1000); |
87 | |||
88 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
89 | 69 | ||
90 | /* scaling up? scale voltage before frequency */ | 70 | /* scaling up? scale voltage before frequency */ |
91 | if (freqs.new > freqs.old) { | 71 | if (new_freq > old_freq) { |
92 | ret = regulator_set_voltage_tol(arm_reg, volt, 0); | 72 | ret = regulator_set_voltage_tol(arm_reg, volt, 0); |
93 | if (ret) { | 73 | if (ret) { |
94 | dev_err(cpu_dev, | 74 | dev_err(cpu_dev, |
95 | "failed to scale vddarm up: %d\n", ret); | 75 | "failed to scale vddarm up: %d\n", ret); |
96 | freqs.new = freqs.old; | 76 | return ret; |
97 | goto post_notify; | ||
98 | } | 77 | } |
99 | 78 | ||
100 | /* | 79 | /* |
101 | * Need to increase vddpu and vddsoc for safety | 80 | * Need to increase vddpu and vddsoc for safety |
102 | * if we are about to run at 1.2 GHz. | 81 | * if we are about to run at 1.2 GHz. |
103 | */ | 82 | */ |
104 | if (freqs.new == FREQ_1P2_GHZ / 1000) { | 83 | if (new_freq == FREQ_1P2_GHZ / 1000) { |
105 | regulator_set_voltage_tol(pu_reg, | 84 | regulator_set_voltage_tol(pu_reg, |
106 | PU_SOC_VOLTAGE_HIGH, 0); | 85 | PU_SOC_VOLTAGE_HIGH, 0); |
107 | regulator_set_voltage_tol(soc_reg, | 86 | regulator_set_voltage_tol(soc_reg, |
@@ -121,21 +100,20 @@ static int imx6q_set_target(struct cpufreq_policy *policy, | |||
121 | clk_set_parent(step_clk, pll2_pfd2_396m_clk); | 100 | clk_set_parent(step_clk, pll2_pfd2_396m_clk); |
122 | clk_set_parent(pll1_sw_clk, step_clk); | 101 | clk_set_parent(pll1_sw_clk, step_clk); |
123 | if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) { | 102 | if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) { |
124 | clk_set_rate(pll1_sys_clk, freqs.new * 1000); | 103 | clk_set_rate(pll1_sys_clk, new_freq * 1000); |
125 | clk_set_parent(pll1_sw_clk, pll1_sys_clk); | 104 | clk_set_parent(pll1_sw_clk, pll1_sys_clk); |
126 | } | 105 | } |
127 | 106 | ||
128 | /* Ensure the arm clock divider is what we expect */ | 107 | /* Ensure the arm clock divider is what we expect */ |
129 | ret = clk_set_rate(arm_clk, freqs.new * 1000); | 108 | ret = clk_set_rate(arm_clk, new_freq * 1000); |
130 | if (ret) { | 109 | if (ret) { |
131 | dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); | 110 | dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); |
132 | regulator_set_voltage_tol(arm_reg, volt_old, 0); | 111 | regulator_set_voltage_tol(arm_reg, volt_old, 0); |
133 | freqs.new = freqs.old; | 112 | return ret; |
134 | goto post_notify; | ||
135 | } | 113 | } |
136 | 114 | ||
137 | /* scaling down? scale voltage after frequency */ | 115 | /* scaling down? scale voltage after frequency */ |
138 | if (freqs.new < freqs.old) { | 116 | if (new_freq < old_freq) { |
139 | ret = regulator_set_voltage_tol(arm_reg, volt, 0); | 117 | ret = regulator_set_voltage_tol(arm_reg, volt, 0); |
140 | if (ret) { | 118 | if (ret) { |
141 | dev_warn(cpu_dev, | 119 | dev_warn(cpu_dev, |
@@ -143,7 +121,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, | |||
143 | ret = 0; | 121 | ret = 0; |
144 | } | 122 | } |
145 | 123 | ||
146 | if (freqs.old == FREQ_1P2_GHZ / 1000) { | 124 | if (old_freq == FREQ_1P2_GHZ / 1000) { |
147 | regulator_set_voltage_tol(pu_reg, | 125 | regulator_set_voltage_tol(pu_reg, |
148 | PU_SOC_VOLTAGE_NORMAL, 0); | 126 | PU_SOC_VOLTAGE_NORMAL, 0); |
149 | regulator_set_voltage_tol(soc_reg, | 127 | regulator_set_voltage_tol(soc_reg, |
@@ -151,55 +129,28 @@ static int imx6q_set_target(struct cpufreq_policy *policy, | |||
151 | } | 129 | } |
152 | } | 130 | } |
153 | 131 | ||
154 | post_notify: | ||
155 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
156 | |||
157 | return ret; | ||
158 | } | ||
159 | |||
160 | static int imx6q_cpufreq_init(struct cpufreq_policy *policy) | ||
161 | { | ||
162 | int ret; | ||
163 | |||
164 | ret = cpufreq_frequency_table_cpuinfo(policy, freq_table); | ||
165 | if (ret) { | ||
166 | dev_err(cpu_dev, "invalid frequency table: %d\n", ret); | ||
167 | return ret; | ||
168 | } | ||
169 | |||
170 | policy->cpuinfo.transition_latency = transition_latency; | ||
171 | policy->cur = clk_get_rate(arm_clk) / 1000; | ||
172 | cpumask_setall(policy->cpus); | ||
173 | cpufreq_frequency_table_get_attr(freq_table, policy->cpu); | ||
174 | |||
175 | return 0; | 132 | return 0; |
176 | } | 133 | } |
177 | 134 | ||
178 | static int imx6q_cpufreq_exit(struct cpufreq_policy *policy) | 135 | static int imx6q_cpufreq_init(struct cpufreq_policy *policy) |
179 | { | 136 | { |
180 | cpufreq_frequency_table_put_attr(policy->cpu); | 137 | return cpufreq_generic_init(policy, freq_table, transition_latency); |
181 | return 0; | ||
182 | } | 138 | } |
183 | 139 | ||
184 | static struct freq_attr *imx6q_cpufreq_attr[] = { | ||
185 | &cpufreq_freq_attr_scaling_available_freqs, | ||
186 | NULL, | ||
187 | }; | ||
188 | |||
189 | static struct cpufreq_driver imx6q_cpufreq_driver = { | 140 | static struct cpufreq_driver imx6q_cpufreq_driver = { |
190 | .verify = imx6q_verify_speed, | 141 | .verify = cpufreq_generic_frequency_table_verify, |
191 | .target = imx6q_set_target, | 142 | .target_index = imx6q_set_target, |
192 | .get = imx6q_get_speed, | 143 | .get = imx6q_get_speed, |
193 | .init = imx6q_cpufreq_init, | 144 | .init = imx6q_cpufreq_init, |
194 | .exit = imx6q_cpufreq_exit, | 145 | .exit = cpufreq_generic_exit, |
195 | .name = "imx6q-cpufreq", | 146 | .name = "imx6q-cpufreq", |
196 | .attr = imx6q_cpufreq_attr, | 147 | .attr = cpufreq_generic_attr, |
197 | }; | 148 | }; |
198 | 149 | ||
199 | static int imx6q_cpufreq_probe(struct platform_device *pdev) | 150 | static int imx6q_cpufreq_probe(struct platform_device *pdev) |
200 | { | 151 | { |
201 | struct device_node *np; | 152 | struct device_node *np; |
202 | struct opp *opp; | 153 | struct dev_pm_opp *opp; |
203 | unsigned long min_volt, max_volt; | 154 | unsigned long min_volt, max_volt; |
204 | int num, ret; | 155 | int num, ret; |
205 | 156 | ||
@@ -237,14 +188,14 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) | |||
237 | } | 188 | } |
238 | 189 | ||
239 | /* We expect an OPP table supplied by platform */ | 190 | /* We expect an OPP table supplied by platform */ |
240 | num = opp_get_opp_count(cpu_dev); | 191 | num = dev_pm_opp_get_opp_count(cpu_dev); |
241 | if (num < 0) { | 192 | if (num < 0) { |
242 | ret = num; | 193 | ret = num; |
243 | dev_err(cpu_dev, "no OPP table is found: %d\n", ret); | 194 | dev_err(cpu_dev, "no OPP table is found: %d\n", ret); |
244 | goto put_node; | 195 | goto put_node; |
245 | } | 196 | } |
246 | 197 | ||
247 | ret = opp_init_cpufreq_table(cpu_dev, &freq_table); | 198 | ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); |
248 | if (ret) { | 199 | if (ret) { |
249 | dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); | 200 | dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); |
250 | goto put_node; | 201 | goto put_node; |
@@ -259,12 +210,12 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) | |||
259 | * same order. | 210 | * same order. |
260 | */ | 211 | */ |
261 | rcu_read_lock(); | 212 | rcu_read_lock(); |
262 | opp = opp_find_freq_exact(cpu_dev, | 213 | opp = dev_pm_opp_find_freq_exact(cpu_dev, |
263 | freq_table[0].frequency * 1000, true); | 214 | freq_table[0].frequency * 1000, true); |
264 | min_volt = opp_get_voltage(opp); | 215 | min_volt = dev_pm_opp_get_voltage(opp); |
265 | opp = opp_find_freq_exact(cpu_dev, | 216 | opp = dev_pm_opp_find_freq_exact(cpu_dev, |
266 | freq_table[--num].frequency * 1000, true); | 217 | freq_table[--num].frequency * 1000, true); |
267 | max_volt = opp_get_voltage(opp); | 218 | max_volt = dev_pm_opp_get_voltage(opp); |
268 | rcu_read_unlock(); | 219 | rcu_read_unlock(); |
269 | ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt); | 220 | ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt); |
270 | if (ret > 0) | 221 | if (ret > 0) |
@@ -292,7 +243,7 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) | |||
292 | return 0; | 243 | return 0; |
293 | 244 | ||
294 | free_freq_table: | 245 | free_freq_table: |
295 | opp_free_cpufreq_table(cpu_dev, &freq_table); | 246 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); |
296 | put_node: | 247 | put_node: |
297 | of_node_put(np); | 248 | of_node_put(np); |
298 | return ret; | 249 | return ret; |
@@ -301,7 +252,7 @@ put_node: | |||
301 | static int imx6q_cpufreq_remove(struct platform_device *pdev) | 252 | static int imx6q_cpufreq_remove(struct platform_device *pdev) |
302 | { | 253 | { |
303 | cpufreq_unregister_driver(&imx6q_cpufreq_driver); | 254 | cpufreq_unregister_driver(&imx6q_cpufreq_driver); |
304 | opp_free_cpufreq_table(cpu_dev, &freq_table); | 255 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); |
305 | 256 | ||
306 | return 0; | 257 | return 0; |
307 | } | 258 | } |
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c index 3d79bca47433..7d8ab000d317 100644 --- a/drivers/cpufreq/integrator-cpufreq.c +++ b/drivers/cpufreq/integrator-cpufreq.c | |||
@@ -60,9 +60,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy) | |||
60 | { | 60 | { |
61 | struct icst_vco vco; | 61 | struct icst_vco vco; |
62 | 62 | ||
63 | cpufreq_verify_within_limits(policy, | 63 | cpufreq_verify_within_cpu_limits(policy); |
64 | policy->cpuinfo.min_freq, | ||
65 | policy->cpuinfo.max_freq); | ||
66 | 64 | ||
67 | vco = icst_hz_to_vco(&cclk_params, policy->max * 1000); | 65 | vco = icst_hz_to_vco(&cclk_params, policy->max * 1000); |
68 | policy->max = icst_hz(&cclk_params, vco) / 1000; | 66 | policy->max = icst_hz(&cclk_params, vco) / 1000; |
@@ -70,10 +68,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy) | |||
70 | vco = icst_hz_to_vco(&cclk_params, policy->min * 1000); | 68 | vco = icst_hz_to_vco(&cclk_params, policy->min * 1000); |
71 | policy->min = icst_hz(&cclk_params, vco) / 1000; | 69 | policy->min = icst_hz(&cclk_params, vco) / 1000; |
72 | 70 | ||
73 | cpufreq_verify_within_limits(policy, | 71 | cpufreq_verify_within_cpu_limits(policy); |
74 | policy->cpuinfo.min_freq, | ||
75 | policy->cpuinfo.max_freq); | ||
76 | |||
77 | return 0; | 72 | return 0; |
78 | } | 73 | } |
79 | 74 | ||
@@ -187,10 +182,9 @@ static int integrator_cpufreq_init(struct cpufreq_policy *policy) | |||
187 | { | 182 | { |
188 | 183 | ||
189 | /* set default policy and cpuinfo */ | 184 | /* set default policy and cpuinfo */ |
190 | policy->cpuinfo.max_freq = 160000; | 185 | policy->max = policy->cpuinfo.max_freq = 160000; |
191 | policy->cpuinfo.min_freq = 12000; | 186 | policy->min = policy->cpuinfo.min_freq = 12000; |
192 | policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */ | 187 | policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */ |
193 | policy->cur = policy->min = policy->max = integrator_get(policy->cpu); | ||
194 | 188 | ||
195 | return 0; | 189 | return 0; |
196 | } | 190 | } |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index eb3fdc755000..5f1cbae36961 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/debugfs.h> | 27 | #include <linux/debugfs.h> |
28 | #include <linux/acpi.h> | ||
28 | #include <trace/events/power.h> | 29 | #include <trace/events/power.h> |
29 | 30 | ||
30 | #include <asm/div64.h> | 31 | #include <asm/div64.h> |
@@ -33,6 +34,8 @@ | |||
33 | 34 | ||
34 | #define SAMPLE_COUNT 3 | 35 | #define SAMPLE_COUNT 3 |
35 | 36 | ||
37 | #define BYT_RATIOS 0x66a | ||
38 | |||
36 | #define FRAC_BITS 8 | 39 | #define FRAC_BITS 8 |
37 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) | 40 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) |
38 | #define fp_toint(X) ((X) >> FRAC_BITS) | 41 | #define fp_toint(X) ((X) >> FRAC_BITS) |
@@ -78,7 +81,6 @@ struct cpudata { | |||
78 | 81 | ||
79 | struct timer_list timer; | 82 | struct timer_list timer; |
80 | 83 | ||
81 | struct pstate_adjust_policy *pstate_policy; | ||
82 | struct pstate_data pstate; | 84 | struct pstate_data pstate; |
83 | struct _pid pid; | 85 | struct _pid pid; |
84 | 86 | ||
@@ -100,15 +102,21 @@ struct pstate_adjust_policy { | |||
100 | int i_gain_pct; | 102 | int i_gain_pct; |
101 | }; | 103 | }; |
102 | 104 | ||
103 | static struct pstate_adjust_policy default_policy = { | 105 | struct pstate_funcs { |
104 | .sample_rate_ms = 10, | 106 | int (*get_max)(void); |
105 | .deadband = 0, | 107 | int (*get_min)(void); |
106 | .setpoint = 97, | 108 | int (*get_turbo)(void); |
107 | .p_gain_pct = 20, | 109 | void (*set)(int pstate); |
108 | .d_gain_pct = 0, | 110 | }; |
109 | .i_gain_pct = 0, | 111 | |
112 | struct cpu_defaults { | ||
113 | struct pstate_adjust_policy pid_policy; | ||
114 | struct pstate_funcs funcs; | ||
110 | }; | 115 | }; |
111 | 116 | ||
117 | static struct pstate_adjust_policy pid_params; | ||
118 | static struct pstate_funcs pstate_funcs; | ||
119 | |||
112 | struct perf_limits { | 120 | struct perf_limits { |
113 | int no_turbo; | 121 | int no_turbo; |
114 | int max_perf_pct; | 122 | int max_perf_pct; |
@@ -185,14 +193,14 @@ static signed int pid_calc(struct _pid *pid, int32_t busy) | |||
185 | 193 | ||
186 | static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) | 194 | static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) |
187 | { | 195 | { |
188 | pid_p_gain_set(&cpu->pid, cpu->pstate_policy->p_gain_pct); | 196 | pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); |
189 | pid_d_gain_set(&cpu->pid, cpu->pstate_policy->d_gain_pct); | 197 | pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); |
190 | pid_i_gain_set(&cpu->pid, cpu->pstate_policy->i_gain_pct); | 198 | pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); |
191 | 199 | ||
192 | pid_reset(&cpu->pid, | 200 | pid_reset(&cpu->pid, |
193 | cpu->pstate_policy->setpoint, | 201 | pid_params.setpoint, |
194 | 100, | 202 | 100, |
195 | cpu->pstate_policy->deadband, | 203 | pid_params.deadband, |
196 | 0); | 204 | 0); |
197 | } | 205 | } |
198 | 206 | ||
@@ -226,12 +234,12 @@ struct pid_param { | |||
226 | }; | 234 | }; |
227 | 235 | ||
228 | static struct pid_param pid_files[] = { | 236 | static struct pid_param pid_files[] = { |
229 | {"sample_rate_ms", &default_policy.sample_rate_ms}, | 237 | {"sample_rate_ms", &pid_params.sample_rate_ms}, |
230 | {"d_gain_pct", &default_policy.d_gain_pct}, | 238 | {"d_gain_pct", &pid_params.d_gain_pct}, |
231 | {"i_gain_pct", &default_policy.i_gain_pct}, | 239 | {"i_gain_pct", &pid_params.i_gain_pct}, |
232 | {"deadband", &default_policy.deadband}, | 240 | {"deadband", &pid_params.deadband}, |
233 | {"setpoint", &default_policy.setpoint}, | 241 | {"setpoint", &pid_params.setpoint}, |
234 | {"p_gain_pct", &default_policy.p_gain_pct}, | 242 | {"p_gain_pct", &pid_params.p_gain_pct}, |
235 | {NULL, NULL} | 243 | {NULL, NULL} |
236 | }; | 244 | }; |
237 | 245 | ||
@@ -336,33 +344,92 @@ static void intel_pstate_sysfs_expose_params(void) | |||
336 | } | 344 | } |
337 | 345 | ||
338 | /************************** sysfs end ************************/ | 346 | /************************** sysfs end ************************/ |
347 | static int byt_get_min_pstate(void) | ||
348 | { | ||
349 | u64 value; | ||
350 | rdmsrl(BYT_RATIOS, value); | ||
351 | return value & 0xFF; | ||
352 | } | ||
339 | 353 | ||
340 | static int intel_pstate_min_pstate(void) | 354 | static int byt_get_max_pstate(void) |
355 | { | ||
356 | u64 value; | ||
357 | rdmsrl(BYT_RATIOS, value); | ||
358 | return (value >> 16) & 0xFF; | ||
359 | } | ||
360 | |||
361 | static int core_get_min_pstate(void) | ||
341 | { | 362 | { |
342 | u64 value; | 363 | u64 value; |
343 | rdmsrl(MSR_PLATFORM_INFO, value); | 364 | rdmsrl(MSR_PLATFORM_INFO, value); |
344 | return (value >> 40) & 0xFF; | 365 | return (value >> 40) & 0xFF; |
345 | } | 366 | } |
346 | 367 | ||
347 | static int intel_pstate_max_pstate(void) | 368 | static int core_get_max_pstate(void) |
348 | { | 369 | { |
349 | u64 value; | 370 | u64 value; |
350 | rdmsrl(MSR_PLATFORM_INFO, value); | 371 | rdmsrl(MSR_PLATFORM_INFO, value); |
351 | return (value >> 8) & 0xFF; | 372 | return (value >> 8) & 0xFF; |
352 | } | 373 | } |
353 | 374 | ||
354 | static int intel_pstate_turbo_pstate(void) | 375 | static int core_get_turbo_pstate(void) |
355 | { | 376 | { |
356 | u64 value; | 377 | u64 value; |
357 | int nont, ret; | 378 | int nont, ret; |
358 | rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); | 379 | rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); |
359 | nont = intel_pstate_max_pstate(); | 380 | nont = core_get_max_pstate(); |
360 | ret = ((value) & 255); | 381 | ret = ((value) & 255); |
361 | if (ret <= nont) | 382 | if (ret <= nont) |
362 | ret = nont; | 383 | ret = nont; |
363 | return ret; | 384 | return ret; |
364 | } | 385 | } |
365 | 386 | ||
387 | static void core_set_pstate(int pstate) | ||
388 | { | ||
389 | u64 val; | ||
390 | |||
391 | val = pstate << 8; | ||
392 | if (limits.no_turbo) | ||
393 | val |= (u64)1 << 32; | ||
394 | |||
395 | wrmsrl(MSR_IA32_PERF_CTL, val); | ||
396 | } | ||
397 | |||
398 | static struct cpu_defaults core_params = { | ||
399 | .pid_policy = { | ||
400 | .sample_rate_ms = 10, | ||
401 | .deadband = 0, | ||
402 | .setpoint = 97, | ||
403 | .p_gain_pct = 20, | ||
404 | .d_gain_pct = 0, | ||
405 | .i_gain_pct = 0, | ||
406 | }, | ||
407 | .funcs = { | ||
408 | .get_max = core_get_max_pstate, | ||
409 | .get_min = core_get_min_pstate, | ||
410 | .get_turbo = core_get_turbo_pstate, | ||
411 | .set = core_set_pstate, | ||
412 | }, | ||
413 | }; | ||
414 | |||
415 | static struct cpu_defaults byt_params = { | ||
416 | .pid_policy = { | ||
417 | .sample_rate_ms = 10, | ||
418 | .deadband = 0, | ||
419 | .setpoint = 97, | ||
420 | .p_gain_pct = 14, | ||
421 | .d_gain_pct = 0, | ||
422 | .i_gain_pct = 4, | ||
423 | }, | ||
424 | .funcs = { | ||
425 | .get_max = byt_get_max_pstate, | ||
426 | .get_min = byt_get_min_pstate, | ||
427 | .get_turbo = byt_get_max_pstate, | ||
428 | .set = core_set_pstate, | ||
429 | }, | ||
430 | }; | ||
431 | |||
432 | |||
366 | static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) | 433 | static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) |
367 | { | 434 | { |
368 | int max_perf = cpu->pstate.turbo_pstate; | 435 | int max_perf = cpu->pstate.turbo_pstate; |
@@ -383,7 +450,6 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) | |||
383 | static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) | 450 | static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) |
384 | { | 451 | { |
385 | int max_perf, min_perf; | 452 | int max_perf, min_perf; |
386 | u64 val; | ||
387 | 453 | ||
388 | intel_pstate_get_min_max(cpu, &min_perf, &max_perf); | 454 | intel_pstate_get_min_max(cpu, &min_perf, &max_perf); |
389 | 455 | ||
@@ -395,11 +461,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) | |||
395 | trace_cpu_frequency(pstate * 100000, cpu->cpu); | 461 | trace_cpu_frequency(pstate * 100000, cpu->cpu); |
396 | 462 | ||
397 | cpu->pstate.current_pstate = pstate; | 463 | cpu->pstate.current_pstate = pstate; |
398 | val = pstate << 8; | ||
399 | if (limits.no_turbo) | ||
400 | val |= (u64)1 << 32; | ||
401 | 464 | ||
402 | wrmsrl(MSR_IA32_PERF_CTL, val); | 465 | pstate_funcs.set(pstate); |
403 | } | 466 | } |
404 | 467 | ||
405 | static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps) | 468 | static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps) |
@@ -421,9 +484,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) | |||
421 | { | 484 | { |
422 | sprintf(cpu->name, "Intel 2nd generation core"); | 485 | sprintf(cpu->name, "Intel 2nd generation core"); |
423 | 486 | ||
424 | cpu->pstate.min_pstate = intel_pstate_min_pstate(); | 487 | cpu->pstate.min_pstate = pstate_funcs.get_min(); |
425 | cpu->pstate.max_pstate = intel_pstate_max_pstate(); | 488 | cpu->pstate.max_pstate = pstate_funcs.get_max(); |
426 | cpu->pstate.turbo_pstate = intel_pstate_turbo_pstate(); | 489 | cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); |
427 | 490 | ||
428 | /* | 491 | /* |
429 | * goto max pstate so we don't slow up boot if we are built-in if we are | 492 | * goto max pstate so we don't slow up boot if we are built-in if we are |
@@ -465,7 +528,7 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) | |||
465 | { | 528 | { |
466 | int sample_time, delay; | 529 | int sample_time, delay; |
467 | 530 | ||
468 | sample_time = cpu->pstate_policy->sample_rate_ms; | 531 | sample_time = pid_params.sample_rate_ms; |
469 | delay = msecs_to_jiffies(sample_time); | 532 | delay = msecs_to_jiffies(sample_time); |
470 | mod_timer_pinned(&cpu->timer, jiffies + delay); | 533 | mod_timer_pinned(&cpu->timer, jiffies + delay); |
471 | } | 534 | } |
@@ -521,14 +584,15 @@ static void intel_pstate_timer_func(unsigned long __data) | |||
521 | { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy } | 584 | { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy } |
522 | 585 | ||
523 | static const struct x86_cpu_id intel_pstate_cpu_ids[] = { | 586 | static const struct x86_cpu_id intel_pstate_cpu_ids[] = { |
524 | ICPU(0x2a, default_policy), | 587 | ICPU(0x2a, core_params), |
525 | ICPU(0x2d, default_policy), | 588 | ICPU(0x2d, core_params), |
526 | ICPU(0x3a, default_policy), | 589 | ICPU(0x37, byt_params), |
527 | ICPU(0x3c, default_policy), | 590 | ICPU(0x3a, core_params), |
528 | ICPU(0x3e, default_policy), | 591 | ICPU(0x3c, core_params), |
529 | ICPU(0x3f, default_policy), | 592 | ICPU(0x3e, core_params), |
530 | ICPU(0x45, default_policy), | 593 | ICPU(0x3f, core_params), |
531 | ICPU(0x46, default_policy), | 594 | ICPU(0x45, core_params), |
595 | ICPU(0x46, core_params), | ||
532 | {} | 596 | {} |
533 | }; | 597 | }; |
534 | MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); | 598 | MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); |
@@ -552,8 +616,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum) | |||
552 | intel_pstate_get_cpu_pstates(cpu); | 616 | intel_pstate_get_cpu_pstates(cpu); |
553 | 617 | ||
554 | cpu->cpu = cpunum; | 618 | cpu->cpu = cpunum; |
555 | cpu->pstate_policy = | 619 | |
556 | (struct pstate_adjust_policy *)id->driver_data; | ||
557 | init_timer_deferrable(&cpu->timer); | 620 | init_timer_deferrable(&cpu->timer); |
558 | cpu->timer.function = intel_pstate_timer_func; | 621 | cpu->timer.function = intel_pstate_timer_func; |
559 | cpu->timer.data = | 622 | cpu->timer.data = |
@@ -613,9 +676,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
613 | 676 | ||
614 | static int intel_pstate_verify_policy(struct cpufreq_policy *policy) | 677 | static int intel_pstate_verify_policy(struct cpufreq_policy *policy) |
615 | { | 678 | { |
616 | cpufreq_verify_within_limits(policy, | 679 | cpufreq_verify_within_cpu_limits(policy); |
617 | policy->cpuinfo.min_freq, | ||
618 | policy->cpuinfo.max_freq); | ||
619 | 680 | ||
620 | if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && | 681 | if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && |
621 | (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) | 682 | (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) |
@@ -683,9 +744,9 @@ static int intel_pstate_msrs_not_valid(void) | |||
683 | rdmsrl(MSR_IA32_APERF, aperf); | 744 | rdmsrl(MSR_IA32_APERF, aperf); |
684 | rdmsrl(MSR_IA32_MPERF, mperf); | 745 | rdmsrl(MSR_IA32_MPERF, mperf); |
685 | 746 | ||
686 | if (!intel_pstate_min_pstate() || | 747 | if (!pstate_funcs.get_max() || |
687 | !intel_pstate_max_pstate() || | 748 | !pstate_funcs.get_min() || |
688 | !intel_pstate_turbo_pstate()) | 749 | !pstate_funcs.get_turbo()) |
689 | return -ENODEV; | 750 | return -ENODEV; |
690 | 751 | ||
691 | rdmsrl(MSR_IA32_APERF, tmp); | 752 | rdmsrl(MSR_IA32_APERF, tmp); |
@@ -698,10 +759,96 @@ static int intel_pstate_msrs_not_valid(void) | |||
698 | 759 | ||
699 | return 0; | 760 | return 0; |
700 | } | 761 | } |
762 | |||
763 | static void copy_pid_params(struct pstate_adjust_policy *policy) | ||
764 | { | ||
765 | pid_params.sample_rate_ms = policy->sample_rate_ms; | ||
766 | pid_params.p_gain_pct = policy->p_gain_pct; | ||
767 | pid_params.i_gain_pct = policy->i_gain_pct; | ||
768 | pid_params.d_gain_pct = policy->d_gain_pct; | ||
769 | pid_params.deadband = policy->deadband; | ||
770 | pid_params.setpoint = policy->setpoint; | ||
771 | } | ||
772 | |||
773 | static void copy_cpu_funcs(struct pstate_funcs *funcs) | ||
774 | { | ||
775 | pstate_funcs.get_max = funcs->get_max; | ||
776 | pstate_funcs.get_min = funcs->get_min; | ||
777 | pstate_funcs.get_turbo = funcs->get_turbo; | ||
778 | pstate_funcs.set = funcs->set; | ||
779 | } | ||
780 | |||
781 | #if IS_ENABLED(CONFIG_ACPI) | ||
782 | #include <acpi/processor.h> | ||
783 | |||
784 | static bool intel_pstate_no_acpi_pss(void) | ||
785 | { | ||
786 | int i; | ||
787 | |||
788 | for_each_possible_cpu(i) { | ||
789 | acpi_status status; | ||
790 | union acpi_object *pss; | ||
791 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
792 | struct acpi_processor *pr = per_cpu(processors, i); | ||
793 | |||
794 | if (!pr) | ||
795 | continue; | ||
796 | |||
797 | status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); | ||
798 | if (ACPI_FAILURE(status)) | ||
799 | continue; | ||
800 | |||
801 | pss = buffer.pointer; | ||
802 | if (pss && pss->type == ACPI_TYPE_PACKAGE) { | ||
803 | kfree(pss); | ||
804 | return false; | ||
805 | } | ||
806 | |||
807 | kfree(pss); | ||
808 | } | ||
809 | |||
810 | return true; | ||
811 | } | ||
812 | |||
813 | struct hw_vendor_info { | ||
814 | u16 valid; | ||
815 | char oem_id[ACPI_OEM_ID_SIZE]; | ||
816 | char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; | ||
817 | }; | ||
818 | |||
819 | /* Hardware vendor-specific info that has its own power management modes */ | ||
820 | static struct hw_vendor_info vendor_info[] = { | ||
821 | {1, "HP ", "ProLiant"}, | ||
822 | {0, "", ""}, | ||
823 | }; | ||
824 | |||
825 | static bool intel_pstate_platform_pwr_mgmt_exists(void) | ||
826 | { | ||
827 | struct acpi_table_header hdr; | ||
828 | struct hw_vendor_info *v_info; | ||
829 | |||
830 | if (acpi_disabled | ||
831 | || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) | ||
832 | return false; | ||
833 | |||
834 | for (v_info = vendor_info; v_info->valid; v_info++) { | ||
835 | if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) | ||
836 | && !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) | ||
837 | && intel_pstate_no_acpi_pss()) | ||
838 | return true; | ||
839 | } | ||
840 | |||
841 | return false; | ||
842 | } | ||
843 | #else /* CONFIG_ACPI not enabled */ | ||
844 | static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } | ||
845 | #endif /* CONFIG_ACPI */ | ||
846 | |||
701 | static int __init intel_pstate_init(void) | 847 | static int __init intel_pstate_init(void) |
702 | { | 848 | { |
703 | int cpu, rc = 0; | 849 | int cpu, rc = 0; |
704 | const struct x86_cpu_id *id; | 850 | const struct x86_cpu_id *id; |
851 | struct cpu_defaults *cpu_info; | ||
705 | 852 | ||
706 | if (no_load) | 853 | if (no_load) |
707 | return -ENODEV; | 854 | return -ENODEV; |
@@ -710,6 +857,18 @@ static int __init intel_pstate_init(void) | |||
710 | if (!id) | 857 | if (!id) |
711 | return -ENODEV; | 858 | return -ENODEV; |
712 | 859 | ||
860 | /* | ||
861 | * The Intel pstate driver will be ignored if the platform | ||
862 | * firmware has its own power management modes. | ||
863 | */ | ||
864 | if (intel_pstate_platform_pwr_mgmt_exists()) | ||
865 | return -ENODEV; | ||
866 | |||
867 | cpu_info = (struct cpu_defaults *)id->driver_data; | ||
868 | |||
869 | copy_pid_params(&cpu_info->pid_policy); | ||
870 | copy_cpu_funcs(&cpu_info->funcs); | ||
871 | |||
713 | if (intel_pstate_msrs_not_valid()) | 872 | if (intel_pstate_msrs_not_valid()) |
714 | return -ENODEV; | 873 | return -ENODEV; |
715 | 874 | ||
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index ba10658a9394..0767a4e29dfe 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c | |||
@@ -55,69 +55,37 @@ static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu) | |||
55 | return kirkwood_freq_table[0].frequency; | 55 | return kirkwood_freq_table[0].frequency; |
56 | } | 56 | } |
57 | 57 | ||
58 | static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy, | 58 | static int kirkwood_cpufreq_target(struct cpufreq_policy *policy, |
59 | unsigned int index) | 59 | unsigned int index) |
60 | { | 60 | { |
61 | struct cpufreq_freqs freqs; | ||
62 | unsigned int state = kirkwood_freq_table[index].driver_data; | 61 | unsigned int state = kirkwood_freq_table[index].driver_data; |
63 | unsigned long reg; | 62 | unsigned long reg; |
64 | 63 | ||
65 | freqs.old = kirkwood_cpufreq_get_cpu_frequency(0); | 64 | local_irq_disable(); |
66 | freqs.new = kirkwood_freq_table[index].frequency; | ||
67 | |||
68 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
69 | |||
70 | dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n", | ||
71 | kirkwood_freq_table[index].frequency); | ||
72 | dev_dbg(priv.dev, "old frequency was %i KHz\n", | ||
73 | kirkwood_cpufreq_get_cpu_frequency(0)); | ||
74 | |||
75 | if (freqs.old != freqs.new) { | ||
76 | local_irq_disable(); | ||
77 | |||
78 | /* Disable interrupts to the CPU */ | ||
79 | reg = readl_relaxed(priv.base); | ||
80 | reg |= CPU_SW_INT_BLK; | ||
81 | writel_relaxed(reg, priv.base); | ||
82 | |||
83 | switch (state) { | ||
84 | case STATE_CPU_FREQ: | ||
85 | clk_disable(priv.powersave_clk); | ||
86 | break; | ||
87 | case STATE_DDR_FREQ: | ||
88 | clk_enable(priv.powersave_clk); | ||
89 | break; | ||
90 | } | ||
91 | 65 | ||
92 | /* Wait-for-Interrupt, while the hardware changes frequency */ | 66 | /* Disable interrupts to the CPU */ |
93 | cpu_do_idle(); | 67 | reg = readl_relaxed(priv.base); |
68 | reg |= CPU_SW_INT_BLK; | ||
69 | writel_relaxed(reg, priv.base); | ||
94 | 70 | ||
95 | /* Enable interrupts to the CPU */ | 71 | switch (state) { |
96 | reg = readl_relaxed(priv.base); | 72 | case STATE_CPU_FREQ: |
97 | reg &= ~CPU_SW_INT_BLK; | 73 | clk_disable(priv.powersave_clk); |
98 | writel_relaxed(reg, priv.base); | 74 | break; |
99 | 75 | case STATE_DDR_FREQ: | |
100 | local_irq_enable(); | 76 | clk_enable(priv.powersave_clk); |
77 | break; | ||
101 | } | 78 | } |
102 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
103 | }; | ||
104 | |||
105 | static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy) | ||
106 | { | ||
107 | return cpufreq_frequency_table_verify(policy, kirkwood_freq_table); | ||
108 | } | ||
109 | 79 | ||
110 | static int kirkwood_cpufreq_target(struct cpufreq_policy *policy, | 80 | /* Wait-for-Interrupt, while the hardware changes frequency */ |
111 | unsigned int target_freq, | 81 | cpu_do_idle(); |
112 | unsigned int relation) | ||
113 | { | ||
114 | unsigned int index = 0; | ||
115 | 82 | ||
116 | if (cpufreq_frequency_table_target(policy, kirkwood_freq_table, | 83 | /* Enable interrupts to the CPU */ |
117 | target_freq, relation, &index)) | 84 | reg = readl_relaxed(priv.base); |
118 | return -EINVAL; | 85 | reg &= ~CPU_SW_INT_BLK; |
86 | writel_relaxed(reg, priv.base); | ||
119 | 87 | ||
120 | kirkwood_cpufreq_set_cpu_state(policy, index); | 88 | local_irq_enable(); |
121 | 89 | ||
122 | return 0; | 90 | return 0; |
123 | } | 91 | } |
@@ -125,40 +93,17 @@ static int kirkwood_cpufreq_target(struct cpufreq_policy *policy, | |||
125 | /* Module init and exit code */ | 93 | /* Module init and exit code */ |
126 | static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy) | 94 | static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy) |
127 | { | 95 | { |
128 | int result; | 96 | return cpufreq_generic_init(policy, kirkwood_freq_table, 5000); |
129 | |||
130 | /* cpuinfo and default policy values */ | ||
131 | policy->cpuinfo.transition_latency = 5000; /* 5uS */ | ||
132 | policy->cur = kirkwood_cpufreq_get_cpu_frequency(0); | ||
133 | |||
134 | result = cpufreq_frequency_table_cpuinfo(policy, kirkwood_freq_table); | ||
135 | if (result) | ||
136 | return result; | ||
137 | |||
138 | cpufreq_frequency_table_get_attr(kirkwood_freq_table, policy->cpu); | ||
139 | |||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | static int kirkwood_cpufreq_cpu_exit(struct cpufreq_policy *policy) | ||
144 | { | ||
145 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
146 | return 0; | ||
147 | } | 97 | } |
148 | 98 | ||
149 | static struct freq_attr *kirkwood_cpufreq_attr[] = { | ||
150 | &cpufreq_freq_attr_scaling_available_freqs, | ||
151 | NULL, | ||
152 | }; | ||
153 | |||
154 | static struct cpufreq_driver kirkwood_cpufreq_driver = { | 99 | static struct cpufreq_driver kirkwood_cpufreq_driver = { |
155 | .get = kirkwood_cpufreq_get_cpu_frequency, | 100 | .get = kirkwood_cpufreq_get_cpu_frequency, |
156 | .verify = kirkwood_cpufreq_verify, | 101 | .verify = cpufreq_generic_frequency_table_verify, |
157 | .target = kirkwood_cpufreq_target, | 102 | .target_index = kirkwood_cpufreq_target, |
158 | .init = kirkwood_cpufreq_cpu_init, | 103 | .init = kirkwood_cpufreq_cpu_init, |
159 | .exit = kirkwood_cpufreq_cpu_exit, | 104 | .exit = cpufreq_generic_exit, |
160 | .name = "kirkwood-cpufreq", | 105 | .name = "kirkwood-cpufreq", |
161 | .attr = kirkwood_cpufreq_attr, | 106 | .attr = cpufreq_generic_attr, |
162 | }; | 107 | }; |
163 | 108 | ||
164 | static int kirkwood_cpufreq_probe(struct platform_device *pdev) | 109 | static int kirkwood_cpufreq_probe(struct platform_device *pdev) |
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 4ada1cccb052..45bafddfd8ea 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c | |||
@@ -625,28 +625,13 @@ static void longhaul_setup_voltagescaling(void) | |||
625 | } | 625 | } |
626 | 626 | ||
627 | 627 | ||
628 | static int longhaul_verify(struct cpufreq_policy *policy) | ||
629 | { | ||
630 | return cpufreq_frequency_table_verify(policy, longhaul_table); | ||
631 | } | ||
632 | |||
633 | |||
634 | static int longhaul_target(struct cpufreq_policy *policy, | 628 | static int longhaul_target(struct cpufreq_policy *policy, |
635 | unsigned int target_freq, unsigned int relation) | 629 | unsigned int table_index) |
636 | { | 630 | { |
637 | unsigned int table_index = 0; | ||
638 | unsigned int i; | 631 | unsigned int i; |
639 | unsigned int dir = 0; | 632 | unsigned int dir = 0; |
640 | u8 vid, current_vid; | 633 | u8 vid, current_vid; |
641 | 634 | ||
642 | if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, | ||
643 | relation, &table_index)) | ||
644 | return -EINVAL; | ||
645 | |||
646 | /* Don't set same frequency again */ | ||
647 | if (longhaul_index == table_index) | ||
648 | return 0; | ||
649 | |||
650 | if (!can_scale_voltage) | 635 | if (!can_scale_voltage) |
651 | longhaul_setstate(policy, table_index); | 636 | longhaul_setstate(policy, table_index); |
652 | else { | 637 | else { |
@@ -919,36 +904,18 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) | |||
919 | longhaul_setup_voltagescaling(); | 904 | longhaul_setup_voltagescaling(); |
920 | 905 | ||
921 | policy->cpuinfo.transition_latency = 200000; /* nsec */ | 906 | policy->cpuinfo.transition_latency = 200000; /* nsec */ |
922 | policy->cur = calc_speed(longhaul_get_cpu_mult()); | ||
923 | |||
924 | ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table); | ||
925 | if (ret) | ||
926 | return ret; | ||
927 | |||
928 | cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu); | ||
929 | 907 | ||
930 | return 0; | 908 | return cpufreq_table_validate_and_show(policy, longhaul_table); |
931 | } | 909 | } |
932 | 910 | ||
933 | static int longhaul_cpu_exit(struct cpufreq_policy *policy) | ||
934 | { | ||
935 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
936 | return 0; | ||
937 | } | ||
938 | |||
939 | static struct freq_attr *longhaul_attr[] = { | ||
940 | &cpufreq_freq_attr_scaling_available_freqs, | ||
941 | NULL, | ||
942 | }; | ||
943 | |||
944 | static struct cpufreq_driver longhaul_driver = { | 911 | static struct cpufreq_driver longhaul_driver = { |
945 | .verify = longhaul_verify, | 912 | .verify = cpufreq_generic_frequency_table_verify, |
946 | .target = longhaul_target, | 913 | .target_index = longhaul_target, |
947 | .get = longhaul_get, | 914 | .get = longhaul_get, |
948 | .init = longhaul_cpu_init, | 915 | .init = longhaul_cpu_init, |
949 | .exit = longhaul_cpu_exit, | 916 | .exit = cpufreq_generic_exit, |
950 | .name = "longhaul", | 917 | .name = "longhaul", |
951 | .attr = longhaul_attr, | 918 | .attr = cpufreq_generic_attr, |
952 | }; | 919 | }; |
953 | 920 | ||
954 | static const struct x86_cpu_id longhaul_id[] = { | 921 | static const struct x86_cpu_id longhaul_id[] = { |
diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c index 5aa031612d53..074971b12635 100644 --- a/drivers/cpufreq/longrun.c +++ b/drivers/cpufreq/longrun.c | |||
@@ -129,9 +129,7 @@ static int longrun_verify_policy(struct cpufreq_policy *policy) | |||
129 | return -EINVAL; | 129 | return -EINVAL; |
130 | 130 | ||
131 | policy->cpu = 0; | 131 | policy->cpu = 0; |
132 | cpufreq_verify_within_limits(policy, | 132 | cpufreq_verify_within_cpu_limits(policy); |
133 | policy->cpuinfo.min_freq, | ||
134 | policy->cpuinfo.max_freq); | ||
135 | 133 | ||
136 | if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && | 134 | if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && |
137 | (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) | 135 | (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) |
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index 7bc3c44d34e2..a43609218105 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c | |||
@@ -53,51 +53,24 @@ static unsigned int loongson2_cpufreq_get(unsigned int cpu) | |||
53 | * Here we notify other drivers of the proposed change and the final change. | 53 | * Here we notify other drivers of the proposed change and the final change. |
54 | */ | 54 | */ |
55 | static int loongson2_cpufreq_target(struct cpufreq_policy *policy, | 55 | static int loongson2_cpufreq_target(struct cpufreq_policy *policy, |
56 | unsigned int target_freq, | 56 | unsigned int index) |
57 | unsigned int relation) | ||
58 | { | 57 | { |
59 | unsigned int cpu = policy->cpu; | 58 | unsigned int cpu = policy->cpu; |
60 | unsigned int newstate = 0; | ||
61 | cpumask_t cpus_allowed; | 59 | cpumask_t cpus_allowed; |
62 | struct cpufreq_freqs freqs; | ||
63 | unsigned int freq; | 60 | unsigned int freq; |
64 | 61 | ||
65 | cpus_allowed = current->cpus_allowed; | 62 | cpus_allowed = current->cpus_allowed; |
66 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | 63 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); |
67 | 64 | ||
68 | if (cpufreq_frequency_table_target | ||
69 | (policy, &loongson2_clockmod_table[0], target_freq, relation, | ||
70 | &newstate)) | ||
71 | return -EINVAL; | ||
72 | |||
73 | freq = | 65 | freq = |
74 | ((cpu_clock_freq / 1000) * | 66 | ((cpu_clock_freq / 1000) * |
75 | loongson2_clockmod_table[newstate].driver_data) / 8; | 67 | loongson2_clockmod_table[index].driver_data) / 8; |
76 | if (freq < policy->min || freq > policy->max) | ||
77 | return -EINVAL; | ||
78 | |||
79 | pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); | ||
80 | |||
81 | freqs.old = loongson2_cpufreq_get(cpu); | ||
82 | freqs.new = freq; | ||
83 | freqs.flags = 0; | ||
84 | |||
85 | if (freqs.new == freqs.old) | ||
86 | return 0; | ||
87 | |||
88 | /* notifiers */ | ||
89 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
90 | 68 | ||
91 | set_cpus_allowed_ptr(current, &cpus_allowed); | 69 | set_cpus_allowed_ptr(current, &cpus_allowed); |
92 | 70 | ||
93 | /* setting the cpu frequency */ | 71 | /* setting the cpu frequency */ |
94 | clk_set_rate(cpuclk, freq); | 72 | clk_set_rate(cpuclk, freq); |
95 | 73 | ||
96 | /* notifiers */ | ||
97 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
98 | |||
99 | pr_debug("cpufreq: set frequency %u kHz\n", freq); | ||
100 | |||
101 | return 0; | 74 | return 0; |
102 | } | 75 | } |
103 | 76 | ||
@@ -131,40 +104,24 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
131 | return ret; | 104 | return ret; |
132 | } | 105 | } |
133 | 106 | ||
134 | policy->cur = loongson2_cpufreq_get(policy->cpu); | 107 | return cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0); |
135 | |||
136 | cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0], | ||
137 | policy->cpu); | ||
138 | |||
139 | return cpufreq_frequency_table_cpuinfo(policy, | ||
140 | &loongson2_clockmod_table[0]); | ||
141 | } | ||
142 | |||
143 | static int loongson2_cpufreq_verify(struct cpufreq_policy *policy) | ||
144 | { | ||
145 | return cpufreq_frequency_table_verify(policy, | ||
146 | &loongson2_clockmod_table[0]); | ||
147 | } | 108 | } |
148 | 109 | ||
149 | static int loongson2_cpufreq_exit(struct cpufreq_policy *policy) | 110 | static int loongson2_cpufreq_exit(struct cpufreq_policy *policy) |
150 | { | 111 | { |
112 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
151 | clk_put(cpuclk); | 113 | clk_put(cpuclk); |
152 | return 0; | 114 | return 0; |
153 | } | 115 | } |
154 | 116 | ||
155 | static struct freq_attr *loongson2_table_attr[] = { | ||
156 | &cpufreq_freq_attr_scaling_available_freqs, | ||
157 | NULL, | ||
158 | }; | ||
159 | |||
160 | static struct cpufreq_driver loongson2_cpufreq_driver = { | 117 | static struct cpufreq_driver loongson2_cpufreq_driver = { |
161 | .name = "loongson2", | 118 | .name = "loongson2", |
162 | .init = loongson2_cpufreq_cpu_init, | 119 | .init = loongson2_cpufreq_cpu_init, |
163 | .verify = loongson2_cpufreq_verify, | 120 | .verify = cpufreq_generic_frequency_table_verify, |
164 | .target = loongson2_cpufreq_target, | 121 | .target_index = loongson2_cpufreq_target, |
165 | .get = loongson2_cpufreq_get, | 122 | .get = loongson2_cpufreq_get, |
166 | .exit = loongson2_cpufreq_exit, | 123 | .exit = loongson2_cpufreq_exit, |
167 | .attr = loongson2_table_attr, | 124 | .attr = cpufreq_generic_attr, |
168 | }; | 125 | }; |
169 | 126 | ||
170 | static struct platform_device_id platform_device_ids[] = { | 127 | static struct platform_device_id platform_device_ids[] = { |
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c index 6168d77b296d..c4dfa42a75ac 100644 --- a/drivers/cpufreq/maple-cpufreq.c +++ b/drivers/cpufreq/maple-cpufreq.c | |||
@@ -64,18 +64,11 @@ static struct cpufreq_frequency_table maple_cpu_freqs[] = { | |||
64 | {0, CPUFREQ_TABLE_END}, | 64 | {0, CPUFREQ_TABLE_END}, |
65 | }; | 65 | }; |
66 | 66 | ||
67 | static struct freq_attr *maple_cpu_freqs_attr[] = { | ||
68 | &cpufreq_freq_attr_scaling_available_freqs, | ||
69 | NULL, | ||
70 | }; | ||
71 | |||
72 | /* Power mode data is an array of the 32 bits PCR values to use for | 67 | /* Power mode data is an array of the 32 bits PCR values to use for |
73 | * the various frequencies, retrieved from the device-tree | 68 | * the various frequencies, retrieved from the device-tree |
74 | */ | 69 | */ |
75 | static int maple_pmode_cur; | 70 | static int maple_pmode_cur; |
76 | 71 | ||
77 | static DEFINE_MUTEX(maple_switch_mutex); | ||
78 | |||
79 | static const u32 *maple_pmode_data; | 72 | static const u32 *maple_pmode_data; |
80 | static int maple_pmode_max; | 73 | static int maple_pmode_max; |
81 | 74 | ||
@@ -135,37 +128,10 @@ static int maple_scom_query_freq(void) | |||
135 | * Common interface to the cpufreq core | 128 | * Common interface to the cpufreq core |
136 | */ | 129 | */ |
137 | 130 | ||
138 | static int maple_cpufreq_verify(struct cpufreq_policy *policy) | ||
139 | { | ||
140 | return cpufreq_frequency_table_verify(policy, maple_cpu_freqs); | ||
141 | } | ||
142 | |||
143 | static int maple_cpufreq_target(struct cpufreq_policy *policy, | 131 | static int maple_cpufreq_target(struct cpufreq_policy *policy, |
144 | unsigned int target_freq, unsigned int relation) | 132 | unsigned int index) |
145 | { | 133 | { |
146 | unsigned int newstate = 0; | 134 | return maple_scom_switch_freq(index); |
147 | struct cpufreq_freqs freqs; | ||
148 | int rc; | ||
149 | |||
150 | if (cpufreq_frequency_table_target(policy, maple_cpu_freqs, | ||
151 | target_freq, relation, &newstate)) | ||
152 | return -EINVAL; | ||
153 | |||
154 | if (maple_pmode_cur == newstate) | ||
155 | return 0; | ||
156 | |||
157 | mutex_lock(&maple_switch_mutex); | ||
158 | |||
159 | freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency; | ||
160 | freqs.new = maple_cpu_freqs[newstate].frequency; | ||
161 | |||
162 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
163 | rc = maple_scom_switch_freq(newstate); | ||
164 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
165 | |||
166 | mutex_unlock(&maple_switch_mutex); | ||
167 | |||
168 | return rc; | ||
169 | } | 135 | } |
170 | 136 | ||
171 | static unsigned int maple_cpufreq_get_speed(unsigned int cpu) | 137 | static unsigned int maple_cpufreq_get_speed(unsigned int cpu) |
@@ -175,27 +141,17 @@ static unsigned int maple_cpufreq_get_speed(unsigned int cpu) | |||
175 | 141 | ||
176 | static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy) | 142 | static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy) |
177 | { | 143 | { |
178 | policy->cpuinfo.transition_latency = 12000; | 144 | return cpufreq_generic_init(policy, maple_cpu_freqs, 12000); |
179 | policy->cur = maple_cpu_freqs[maple_scom_query_freq()].frequency; | ||
180 | /* secondary CPUs are tied to the primary one by the | ||
181 | * cpufreq core if in the secondary policy we tell it that | ||
182 | * it actually must be one policy together with all others. */ | ||
183 | cpumask_setall(policy->cpus); | ||
184 | cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu); | ||
185 | |||
186 | return cpufreq_frequency_table_cpuinfo(policy, | ||
187 | maple_cpu_freqs); | ||
188 | } | 145 | } |
189 | 146 | ||
190 | |||
191 | static struct cpufreq_driver maple_cpufreq_driver = { | 147 | static struct cpufreq_driver maple_cpufreq_driver = { |
192 | .name = "maple", | 148 | .name = "maple", |
193 | .flags = CPUFREQ_CONST_LOOPS, | 149 | .flags = CPUFREQ_CONST_LOOPS, |
194 | .init = maple_cpufreq_cpu_init, | 150 | .init = maple_cpufreq_cpu_init, |
195 | .verify = maple_cpufreq_verify, | 151 | .verify = cpufreq_generic_frequency_table_verify, |
196 | .target = maple_cpufreq_target, | 152 | .target_index = maple_cpufreq_target, |
197 | .get = maple_cpufreq_get_speed, | 153 | .get = maple_cpufreq_get_speed, |
198 | .attr = maple_cpu_freqs_attr, | 154 | .attr = cpufreq_generic_attr, |
199 | }; | 155 | }; |
200 | 156 | ||
201 | static int __init maple_cpufreq_init(void) | 157 | static int __init maple_cpufreq_init(void) |
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index f31fcfcad514..be6d14307aa8 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/err.h> | 22 | #include <linux/err.h> |
23 | #include <linux/clk.h> | 23 | #include <linux/clk.h> |
24 | #include <linux/io.h> | 24 | #include <linux/io.h> |
25 | #include <linux/opp.h> | 25 | #include <linux/pm_opp.h> |
26 | #include <linux/cpu.h> | 26 | #include <linux/cpu.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
@@ -40,13 +40,6 @@ static struct clk *mpu_clk; | |||
40 | static struct device *mpu_dev; | 40 | static struct device *mpu_dev; |
41 | static struct regulator *mpu_reg; | 41 | static struct regulator *mpu_reg; |
42 | 42 | ||
43 | static int omap_verify_speed(struct cpufreq_policy *policy) | ||
44 | { | ||
45 | if (!freq_table) | ||
46 | return -EINVAL; | ||
47 | return cpufreq_frequency_table_verify(policy, freq_table); | ||
48 | } | ||
49 | |||
50 | static unsigned int omap_getspeed(unsigned int cpu) | 43 | static unsigned int omap_getspeed(unsigned int cpu) |
51 | { | 44 | { |
52 | unsigned long rate; | 45 | unsigned long rate; |
@@ -58,42 +51,16 @@ static unsigned int omap_getspeed(unsigned int cpu) | |||
58 | return rate; | 51 | return rate; |
59 | } | 52 | } |
60 | 53 | ||
61 | static int omap_target(struct cpufreq_policy *policy, | 54 | static int omap_target(struct cpufreq_policy *policy, unsigned int index) |
62 | unsigned int target_freq, | ||
63 | unsigned int relation) | ||
64 | { | 55 | { |
65 | unsigned int i; | 56 | struct dev_pm_opp *opp; |
66 | int r, ret = 0; | ||
67 | struct cpufreq_freqs freqs; | ||
68 | struct opp *opp; | ||
69 | unsigned long freq, volt = 0, volt_old = 0, tol = 0; | 57 | unsigned long freq, volt = 0, volt_old = 0, tol = 0; |
58 | unsigned int old_freq, new_freq; | ||
70 | 59 | ||
71 | if (!freq_table) { | 60 | old_freq = omap_getspeed(policy->cpu); |
72 | dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__, | 61 | new_freq = freq_table[index].frequency; |
73 | policy->cpu); | ||
74 | return -EINVAL; | ||
75 | } | ||
76 | 62 | ||
77 | ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, | 63 | freq = new_freq * 1000; |
78 | relation, &i); | ||
79 | if (ret) { | ||
80 | dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n", | ||
81 | __func__, policy->cpu, target_freq, ret); | ||
82 | return ret; | ||
83 | } | ||
84 | freqs.new = freq_table[i].frequency; | ||
85 | if (!freqs.new) { | ||
86 | dev_err(mpu_dev, "%s: cpu%d: no match for freq %d\n", __func__, | ||
87 | policy->cpu, target_freq); | ||
88 | return -EINVAL; | ||
89 | } | ||
90 | |||
91 | freqs.old = omap_getspeed(policy->cpu); | ||
92 | |||
93 | if (freqs.old == freqs.new && policy->cur == freqs.new) | ||
94 | return ret; | ||
95 | |||
96 | freq = freqs.new * 1000; | ||
97 | ret = clk_round_rate(mpu_clk, freq); | 64 | ret = clk_round_rate(mpu_clk, freq); |
98 | if (IS_ERR_VALUE(ret)) { | 65 | if (IS_ERR_VALUE(ret)) { |
99 | dev_warn(mpu_dev, | 66 | dev_warn(mpu_dev, |
@@ -105,143 +72,103 @@ static int omap_target(struct cpufreq_policy *policy, | |||
105 | 72 | ||
106 | if (mpu_reg) { | 73 | if (mpu_reg) { |
107 | rcu_read_lock(); | 74 | rcu_read_lock(); |
108 | opp = opp_find_freq_ceil(mpu_dev, &freq); | 75 | opp = dev_pm_opp_find_freq_ceil(mpu_dev, &freq); |
109 | if (IS_ERR(opp)) { | 76 | if (IS_ERR(opp)) { |
110 | rcu_read_unlock(); | 77 | rcu_read_unlock(); |
111 | dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n", | 78 | dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n", |
112 | __func__, freqs.new); | 79 | __func__, new_freq); |
113 | return -EINVAL; | 80 | return -EINVAL; |
114 | } | 81 | } |
115 | volt = opp_get_voltage(opp); | 82 | volt = dev_pm_opp_get_voltage(opp); |
116 | rcu_read_unlock(); | 83 | rcu_read_unlock(); |
117 | tol = volt * OPP_TOLERANCE / 100; | 84 | tol = volt * OPP_TOLERANCE / 100; |
118 | volt_old = regulator_get_voltage(mpu_reg); | 85 | volt_old = regulator_get_voltage(mpu_reg); |
119 | } | 86 | } |
120 | 87 | ||
121 | dev_dbg(mpu_dev, "cpufreq-omap: %u MHz, %ld mV --> %u MHz, %ld mV\n", | 88 | dev_dbg(mpu_dev, "cpufreq-omap: %u MHz, %ld mV --> %u MHz, %ld mV\n", |
122 | freqs.old / 1000, volt_old ? volt_old / 1000 : -1, | 89 | old_freq / 1000, volt_old ? volt_old / 1000 : -1, |
123 | freqs.new / 1000, volt ? volt / 1000 : -1); | 90 | new_freq / 1000, volt ? volt / 1000 : -1); |
124 | |||
125 | /* notifiers */ | ||
126 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
127 | 91 | ||
128 | /* scaling up? scale voltage before frequency */ | 92 | /* scaling up? scale voltage before frequency */ |
129 | if (mpu_reg && (freqs.new > freqs.old)) { | 93 | if (mpu_reg && (new_freq > old_freq)) { |
130 | r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol); | 94 | r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol); |
131 | if (r < 0) { | 95 | if (r < 0) { |
132 | dev_warn(mpu_dev, "%s: unable to scale voltage up.\n", | 96 | dev_warn(mpu_dev, "%s: unable to scale voltage up.\n", |
133 | __func__); | 97 | __func__); |
134 | freqs.new = freqs.old; | 98 | return r; |
135 | goto done; | ||
136 | } | 99 | } |
137 | } | 100 | } |
138 | 101 | ||
139 | ret = clk_set_rate(mpu_clk, freqs.new * 1000); | 102 | ret = clk_set_rate(mpu_clk, new_freq * 1000); |
140 | 103 | ||
141 | /* scaling down? scale voltage after frequency */ | 104 | /* scaling down? scale voltage after frequency */ |
142 | if (mpu_reg && (freqs.new < freqs.old)) { | 105 | if (mpu_reg && (new_freq < old_freq)) { |
143 | r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol); | 106 | r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol); |
144 | if (r < 0) { | 107 | if (r < 0) { |
145 | dev_warn(mpu_dev, "%s: unable to scale voltage down.\n", | 108 | dev_warn(mpu_dev, "%s: unable to scale voltage down.\n", |
146 | __func__); | 109 | __func__); |
147 | ret = clk_set_rate(mpu_clk, freqs.old * 1000); | 110 | clk_set_rate(mpu_clk, old_freq * 1000); |
148 | freqs.new = freqs.old; | 111 | return r; |
149 | goto done; | ||
150 | } | 112 | } |
151 | } | 113 | } |
152 | 114 | ||
153 | freqs.new = omap_getspeed(policy->cpu); | ||
154 | |||
155 | done: | ||
156 | /* notifiers */ | ||
157 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
158 | |||
159 | return ret; | 115 | return ret; |
160 | } | 116 | } |
161 | 117 | ||
162 | static inline void freq_table_free(void) | 118 | static inline void freq_table_free(void) |
163 | { | 119 | { |
164 | if (atomic_dec_and_test(&freq_table_users)) | 120 | if (atomic_dec_and_test(&freq_table_users)) |
165 | opp_free_cpufreq_table(mpu_dev, &freq_table); | 121 | dev_pm_opp_free_cpufreq_table(mpu_dev, &freq_table); |
166 | } | 122 | } |
167 | 123 | ||
168 | static int omap_cpu_init(struct cpufreq_policy *policy) | 124 | static int omap_cpu_init(struct cpufreq_policy *policy) |
169 | { | 125 | { |
170 | int result = 0; | 126 | int result; |
171 | 127 | ||
172 | mpu_clk = clk_get(NULL, "cpufreq_ck"); | 128 | mpu_clk = clk_get(NULL, "cpufreq_ck"); |
173 | if (IS_ERR(mpu_clk)) | 129 | if (IS_ERR(mpu_clk)) |
174 | return PTR_ERR(mpu_clk); | 130 | return PTR_ERR(mpu_clk); |
175 | 131 | ||
176 | if (policy->cpu >= NR_CPUS) { | 132 | if (!freq_table) { |
177 | result = -EINVAL; | 133 | result = dev_pm_opp_init_cpufreq_table(mpu_dev, &freq_table); |
178 | goto fail_ck; | 134 | if (result) { |
179 | } | 135 | dev_err(mpu_dev, |
180 | 136 | "%s: cpu%d: failed creating freq table[%d]\n", | |
181 | policy->cur = omap_getspeed(policy->cpu); | ||
182 | |||
183 | if (!freq_table) | ||
184 | result = opp_init_cpufreq_table(mpu_dev, &freq_table); | ||
185 | |||
186 | if (result) { | ||
187 | dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n", | ||
188 | __func__, policy->cpu, result); | 137 | __func__, policy->cpu, result); |
189 | goto fail_ck; | 138 | goto fail; |
139 | } | ||
190 | } | 140 | } |
191 | 141 | ||
192 | atomic_inc_return(&freq_table_users); | 142 | atomic_inc_return(&freq_table_users); |
193 | 143 | ||
194 | result = cpufreq_frequency_table_cpuinfo(policy, freq_table); | ||
195 | if (result) | ||
196 | goto fail_table; | ||
197 | |||
198 | cpufreq_frequency_table_get_attr(freq_table, policy->cpu); | ||
199 | |||
200 | policy->cur = omap_getspeed(policy->cpu); | ||
201 | |||
202 | /* | ||
203 | * On OMAP SMP configuartion, both processors share the voltage | ||
204 | * and clock. So both CPUs needs to be scaled together and hence | ||
205 | * needs software co-ordination. Use cpufreq affected_cpus | ||
206 | * interface to handle this scenario. Additional is_smp() check | ||
207 | * is to keep SMP_ON_UP build working. | ||
208 | */ | ||
209 | if (is_smp()) | ||
210 | cpumask_setall(policy->cpus); | ||
211 | |||
212 | /* FIXME: what's the actual transition time? */ | 144 | /* FIXME: what's the actual transition time? */ |
213 | policy->cpuinfo.transition_latency = 300 * 1000; | 145 | result = cpufreq_generic_init(policy, freq_table, 300 * 1000); |
214 | 146 | if (!result) | |
215 | return 0; | 147 | return 0; |
216 | 148 | ||
217 | fail_table: | ||
218 | freq_table_free(); | 149 | freq_table_free(); |
219 | fail_ck: | 150 | fail: |
220 | clk_put(mpu_clk); | 151 | clk_put(mpu_clk); |
221 | return result; | 152 | return result; |
222 | } | 153 | } |
223 | 154 | ||
224 | static int omap_cpu_exit(struct cpufreq_policy *policy) | 155 | static int omap_cpu_exit(struct cpufreq_policy *policy) |
225 | { | 156 | { |
157 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
226 | freq_table_free(); | 158 | freq_table_free(); |
227 | clk_put(mpu_clk); | 159 | clk_put(mpu_clk); |
228 | return 0; | 160 | return 0; |
229 | } | 161 | } |
230 | 162 | ||
231 | static struct freq_attr *omap_cpufreq_attr[] = { | ||
232 | &cpufreq_freq_attr_scaling_available_freqs, | ||
233 | NULL, | ||
234 | }; | ||
235 | |||
236 | static struct cpufreq_driver omap_driver = { | 163 | static struct cpufreq_driver omap_driver = { |
237 | .flags = CPUFREQ_STICKY, | 164 | .flags = CPUFREQ_STICKY, |
238 | .verify = omap_verify_speed, | 165 | .verify = cpufreq_generic_frequency_table_verify, |
239 | .target = omap_target, | 166 | .target_index = omap_target, |
240 | .get = omap_getspeed, | 167 | .get = omap_getspeed, |
241 | .init = omap_cpu_init, | 168 | .init = omap_cpu_init, |
242 | .exit = omap_cpu_exit, | 169 | .exit = omap_cpu_exit, |
243 | .name = "omap", | 170 | .name = "omap", |
244 | .attr = omap_cpufreq_attr, | 171 | .attr = cpufreq_generic_attr, |
245 | }; | 172 | }; |
246 | 173 | ||
247 | static int omap_cpufreq_probe(struct platform_device *pdev) | 174 | static int omap_cpufreq_probe(struct platform_device *pdev) |
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index 2f0a2a65c37f..3d1cba9fd5f9 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c | |||
@@ -105,47 +105,21 @@ static struct cpufreq_frequency_table p4clockmod_table[] = { | |||
105 | }; | 105 | }; |
106 | 106 | ||
107 | 107 | ||
108 | static int cpufreq_p4_target(struct cpufreq_policy *policy, | 108 | static int cpufreq_p4_target(struct cpufreq_policy *policy, unsigned int index) |
109 | unsigned int target_freq, | ||
110 | unsigned int relation) | ||
111 | { | 109 | { |
112 | unsigned int newstate = DC_RESV; | ||
113 | struct cpufreq_freqs freqs; | ||
114 | int i; | 110 | int i; |
115 | 111 | ||
116 | if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], | ||
117 | target_freq, relation, &newstate)) | ||
118 | return -EINVAL; | ||
119 | |||
120 | freqs.old = cpufreq_p4_get(policy->cpu); | ||
121 | freqs.new = stock_freq * p4clockmod_table[newstate].driver_data / 8; | ||
122 | |||
123 | if (freqs.new == freqs.old) | ||
124 | return 0; | ||
125 | |||
126 | /* notifiers */ | ||
127 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
128 | |||
129 | /* run on each logical CPU, | 112 | /* run on each logical CPU, |
130 | * see section 13.15.3 of IA32 Intel Architecture Software | 113 | * see section 13.15.3 of IA32 Intel Architecture Software |
131 | * Developer's Manual, Volume 3 | 114 | * Developer's Manual, Volume 3 |
132 | */ | 115 | */ |
133 | for_each_cpu(i, policy->cpus) | 116 | for_each_cpu(i, policy->cpus) |
134 | cpufreq_p4_setdc(i, p4clockmod_table[newstate].driver_data); | 117 | cpufreq_p4_setdc(i, p4clockmod_table[index].driver_data); |
135 | |||
136 | /* notifiers */ | ||
137 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
138 | 118 | ||
139 | return 0; | 119 | return 0; |
140 | } | 120 | } |
141 | 121 | ||
142 | 122 | ||
143 | static int cpufreq_p4_verify(struct cpufreq_policy *policy) | ||
144 | { | ||
145 | return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]); | ||
146 | } | ||
147 | |||
148 | |||
149 | static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) | 123 | static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) |
150 | { | 124 | { |
151 | if (c->x86 == 0x06) { | 125 | if (c->x86 == 0x06) { |
@@ -230,25 +204,17 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) | |||
230 | else | 204 | else |
231 | p4clockmod_table[i].frequency = (stock_freq * i)/8; | 205 | p4clockmod_table[i].frequency = (stock_freq * i)/8; |
232 | } | 206 | } |
233 | cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu); | ||
234 | 207 | ||
235 | /* cpuinfo and default policy values */ | 208 | /* cpuinfo and default policy values */ |
236 | 209 | ||
237 | /* the transition latency is set to be 1 higher than the maximum | 210 | /* the transition latency is set to be 1 higher than the maximum |
238 | * transition latency of the ondemand governor */ | 211 | * transition latency of the ondemand governor */ |
239 | policy->cpuinfo.transition_latency = 10000001; | 212 | policy->cpuinfo.transition_latency = 10000001; |
240 | policy->cur = stock_freq; | ||
241 | 213 | ||
242 | return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]); | 214 | return cpufreq_table_validate_and_show(policy, &p4clockmod_table[0]); |
243 | } | 215 | } |
244 | 216 | ||
245 | 217 | ||
246 | static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy) | ||
247 | { | ||
248 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | static unsigned int cpufreq_p4_get(unsigned int cpu) | 218 | static unsigned int cpufreq_p4_get(unsigned int cpu) |
253 | { | 219 | { |
254 | u32 l, h; | 220 | u32 l, h; |
@@ -267,19 +233,14 @@ static unsigned int cpufreq_p4_get(unsigned int cpu) | |||
267 | return stock_freq; | 233 | return stock_freq; |
268 | } | 234 | } |
269 | 235 | ||
270 | static struct freq_attr *p4clockmod_attr[] = { | ||
271 | &cpufreq_freq_attr_scaling_available_freqs, | ||
272 | NULL, | ||
273 | }; | ||
274 | |||
275 | static struct cpufreq_driver p4clockmod_driver = { | 236 | static struct cpufreq_driver p4clockmod_driver = { |
276 | .verify = cpufreq_p4_verify, | 237 | .verify = cpufreq_generic_frequency_table_verify, |
277 | .target = cpufreq_p4_target, | 238 | .target_index = cpufreq_p4_target, |
278 | .init = cpufreq_p4_cpu_init, | 239 | .init = cpufreq_p4_cpu_init, |
279 | .exit = cpufreq_p4_cpu_exit, | 240 | .exit = cpufreq_generic_exit, |
280 | .get = cpufreq_p4_get, | 241 | .get = cpufreq_p4_get, |
281 | .name = "p4-clockmod", | 242 | .name = "p4-clockmod", |
282 | .attr = p4clockmod_attr, | 243 | .attr = cpufreq_generic_attr, |
283 | }; | 244 | }; |
284 | 245 | ||
285 | static const struct x86_cpu_id cpufreq_p4_id[] = { | 246 | static const struct x86_cpu_id cpufreq_p4_id[] = { |
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c index f4ec8145b3d1..0426008380d8 100644 --- a/drivers/cpufreq/pasemi-cpufreq.c +++ b/drivers/cpufreq/pasemi-cpufreq.c | |||
@@ -52,8 +52,6 @@ | |||
52 | static void __iomem *sdcpwr_mapbase; | 52 | static void __iomem *sdcpwr_mapbase; |
53 | static void __iomem *sdcasr_mapbase; | 53 | static void __iomem *sdcasr_mapbase; |
54 | 54 | ||
55 | static DEFINE_MUTEX(pas_switch_mutex); | ||
56 | |||
57 | /* Current astate, is used when waking up from power savings on | 55 | /* Current astate, is used when waking up from power savings on |
58 | * one core, in case the other core has switched states during | 56 | * one core, in case the other core has switched states during |
59 | * the idle time. | 57 | * the idle time. |
@@ -70,11 +68,6 @@ static struct cpufreq_frequency_table pas_freqs[] = { | |||
70 | {0, CPUFREQ_TABLE_END}, | 68 | {0, CPUFREQ_TABLE_END}, |
71 | }; | 69 | }; |
72 | 70 | ||
73 | static struct freq_attr *pas_cpu_freqs_attr[] = { | ||
74 | &cpufreq_freq_attr_scaling_available_freqs, | ||
75 | NULL, | ||
76 | }; | ||
77 | |||
78 | /* | 71 | /* |
79 | * hardware specific functions | 72 | * hardware specific functions |
80 | */ | 73 | */ |
@@ -210,22 +203,13 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
210 | pr_debug("%d: %d\n", i, pas_freqs[i].frequency); | 203 | pr_debug("%d: %d\n", i, pas_freqs[i].frequency); |
211 | } | 204 | } |
212 | 205 | ||
213 | policy->cpuinfo.transition_latency = get_gizmo_latency(); | ||
214 | |||
215 | cur_astate = get_cur_astate(policy->cpu); | 206 | cur_astate = get_cur_astate(policy->cpu); |
216 | pr_debug("current astate is at %d\n",cur_astate); | 207 | pr_debug("current astate is at %d\n",cur_astate); |
217 | 208 | ||
218 | policy->cur = pas_freqs[cur_astate].frequency; | 209 | policy->cur = pas_freqs[cur_astate].frequency; |
219 | cpumask_copy(policy->cpus, cpu_online_mask); | ||
220 | |||
221 | ppc_proc_freq = policy->cur * 1000ul; | 210 | ppc_proc_freq = policy->cur * 1000ul; |
222 | 211 | ||
223 | cpufreq_frequency_table_get_attr(pas_freqs, policy->cpu); | 212 | return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency()); |
224 | |||
225 | /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max | ||
226 | * are set correctly | ||
227 | */ | ||
228 | return cpufreq_frequency_table_cpuinfo(policy, pas_freqs); | ||
229 | 213 | ||
230 | out_unmap_sdcpwr: | 214 | out_unmap_sdcpwr: |
231 | iounmap(sdcpwr_mapbase); | 215 | iounmap(sdcpwr_mapbase); |
@@ -254,31 +238,11 @@ static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |||
254 | return 0; | 238 | return 0; |
255 | } | 239 | } |
256 | 240 | ||
257 | static int pas_cpufreq_verify(struct cpufreq_policy *policy) | ||
258 | { | ||
259 | return cpufreq_frequency_table_verify(policy, pas_freqs); | ||
260 | } | ||
261 | |||
262 | static int pas_cpufreq_target(struct cpufreq_policy *policy, | 241 | static int pas_cpufreq_target(struct cpufreq_policy *policy, |
263 | unsigned int target_freq, | 242 | unsigned int pas_astate_new) |
264 | unsigned int relation) | ||
265 | { | 243 | { |
266 | struct cpufreq_freqs freqs; | ||
267 | int pas_astate_new; | ||
268 | int i; | 244 | int i; |
269 | 245 | ||
270 | cpufreq_frequency_table_target(policy, | ||
271 | pas_freqs, | ||
272 | target_freq, | ||
273 | relation, | ||
274 | &pas_astate_new); | ||
275 | |||
276 | freqs.old = policy->cur; | ||
277 | freqs.new = pas_freqs[pas_astate_new].frequency; | ||
278 | |||
279 | mutex_lock(&pas_switch_mutex); | ||
280 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
281 | |||
282 | pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n", | 246 | pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n", |
283 | policy->cpu, | 247 | policy->cpu, |
284 | pas_freqs[pas_astate_new].frequency, | 248 | pas_freqs[pas_astate_new].frequency, |
@@ -289,10 +253,7 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy, | |||
289 | for_each_online_cpu(i) | 253 | for_each_online_cpu(i) |
290 | set_astate(i, pas_astate_new); | 254 | set_astate(i, pas_astate_new); |
291 | 255 | ||
292 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 256 | ppc_proc_freq = pas_freqs[pas_astate_new].frequency * 1000ul; |
293 | mutex_unlock(&pas_switch_mutex); | ||
294 | |||
295 | ppc_proc_freq = freqs.new * 1000ul; | ||
296 | return 0; | 257 | return 0; |
297 | } | 258 | } |
298 | 259 | ||
@@ -301,9 +262,9 @@ static struct cpufreq_driver pas_cpufreq_driver = { | |||
301 | .flags = CPUFREQ_CONST_LOOPS, | 262 | .flags = CPUFREQ_CONST_LOOPS, |
302 | .init = pas_cpufreq_cpu_init, | 263 | .init = pas_cpufreq_cpu_init, |
303 | .exit = pas_cpufreq_cpu_exit, | 264 | .exit = pas_cpufreq_cpu_exit, |
304 | .verify = pas_cpufreq_verify, | 265 | .verify = cpufreq_generic_frequency_table_verify, |
305 | .target = pas_cpufreq_target, | 266 | .target_index = pas_cpufreq_target, |
306 | .attr = pas_cpu_freqs_attr, | 267 | .attr = cpufreq_generic_attr, |
307 | }; | 268 | }; |
308 | 269 | ||
309 | /* | 270 | /* |
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index d81c4e5ea0ad..e2b4f40ff69a 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c | |||
@@ -111,8 +111,7 @@ static struct pcc_cpu __percpu *pcc_cpu_info; | |||
111 | 111 | ||
112 | static int pcc_cpufreq_verify(struct cpufreq_policy *policy) | 112 | static int pcc_cpufreq_verify(struct cpufreq_policy *policy) |
113 | { | 113 | { |
114 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | 114 | cpufreq_verify_within_cpu_limits(policy); |
115 | policy->cpuinfo.max_freq); | ||
116 | return 0; | 115 | return 0; |
117 | } | 116 | } |
118 | 117 | ||
@@ -396,15 +395,14 @@ static int __init pcc_cpufreq_probe(void) | |||
396 | struct pcc_memory_resource *mem_resource; | 395 | struct pcc_memory_resource *mem_resource; |
397 | struct pcc_register_resource *reg_resource; | 396 | struct pcc_register_resource *reg_resource; |
398 | union acpi_object *out_obj, *member; | 397 | union acpi_object *out_obj, *member; |
399 | acpi_handle handle, osc_handle, pcch_handle; | 398 | acpi_handle handle, osc_handle; |
400 | int ret = 0; | 399 | int ret = 0; |
401 | 400 | ||
402 | status = acpi_get_handle(NULL, "\\_SB", &handle); | 401 | status = acpi_get_handle(NULL, "\\_SB", &handle); |
403 | if (ACPI_FAILURE(status)) | 402 | if (ACPI_FAILURE(status)) |
404 | return -ENODEV; | 403 | return -ENODEV; |
405 | 404 | ||
406 | status = acpi_get_handle(handle, "PCCH", &pcch_handle); | 405 | if (!acpi_has_method(handle, "PCCH")) |
407 | if (ACPI_FAILURE(status)) | ||
408 | return -ENODEV; | 406 | return -ENODEV; |
409 | 407 | ||
410 | status = acpi_get_handle(handle, "_OSC", &osc_handle); | 408 | status = acpi_get_handle(handle, "_OSC", &osc_handle); |
@@ -560,13 +558,6 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
560 | ioread32(&pcch_hdr->nominal) * 1000; | 558 | ioread32(&pcch_hdr->nominal) * 1000; |
561 | policy->min = policy->cpuinfo.min_freq = | 559 | policy->min = policy->cpuinfo.min_freq = |
562 | ioread32(&pcch_hdr->minimum_frequency) * 1000; | 560 | ioread32(&pcch_hdr->minimum_frequency) * 1000; |
563 | policy->cur = pcc_get_freq(cpu); | ||
564 | |||
565 | if (!policy->cur) { | ||
566 | pr_debug("init: Unable to get current CPU frequency\n"); | ||
567 | result = -EINVAL; | ||
568 | goto out; | ||
569 | } | ||
570 | 561 | ||
571 | pr_debug("init: policy->max is %d, policy->min is %d\n", | 562 | pr_debug("init: policy->max is %d, policy->min is %d\n", |
572 | policy->max, policy->min); | 563 | policy->max, policy->min); |
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c index a096cd3fa23d..cf55d202f332 100644 --- a/drivers/cpufreq/pmac32-cpufreq.c +++ b/drivers/cpufreq/pmac32-cpufreq.c | |||
@@ -86,11 +86,6 @@ static struct cpufreq_frequency_table pmac_cpu_freqs[] = { | |||
86 | {0, CPUFREQ_TABLE_END}, | 86 | {0, CPUFREQ_TABLE_END}, |
87 | }; | 87 | }; |
88 | 88 | ||
89 | static struct freq_attr* pmac_cpu_freqs_attr[] = { | ||
90 | &cpufreq_freq_attr_scaling_available_freqs, | ||
91 | NULL, | ||
92 | }; | ||
93 | |||
94 | static inline void local_delay(unsigned long ms) | 89 | static inline void local_delay(unsigned long ms) |
95 | { | 90 | { |
96 | if (no_schedule) | 91 | if (no_schedule) |
@@ -336,21 +331,11 @@ static int pmu_set_cpu_speed(int low_speed) | |||
336 | return 0; | 331 | return 0; |
337 | } | 332 | } |
338 | 333 | ||
339 | static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode, | 334 | static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode) |
340 | int notify) | ||
341 | { | 335 | { |
342 | struct cpufreq_freqs freqs; | ||
343 | unsigned long l3cr; | 336 | unsigned long l3cr; |
344 | static unsigned long prev_l3cr; | 337 | static unsigned long prev_l3cr; |
345 | 338 | ||
346 | freqs.old = cur_freq; | ||
347 | freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; | ||
348 | |||
349 | if (freqs.old == freqs.new) | ||
350 | return 0; | ||
351 | |||
352 | if (notify) | ||
353 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
354 | if (speed_mode == CPUFREQ_LOW && | 339 | if (speed_mode == CPUFREQ_LOW && |
355 | cpu_has_feature(CPU_FTR_L3CR)) { | 340 | cpu_has_feature(CPU_FTR_L3CR)) { |
356 | l3cr = _get_L3CR(); | 341 | l3cr = _get_L3CR(); |
@@ -366,8 +351,6 @@ static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode, | |||
366 | if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr) | 351 | if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr) |
367 | _set_L3CR(prev_l3cr); | 352 | _set_L3CR(prev_l3cr); |
368 | } | 353 | } |
369 | if (notify) | ||
370 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
371 | cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; | 354 | cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; |
372 | 355 | ||
373 | return 0; | 356 | return 0; |
@@ -378,23 +361,12 @@ static unsigned int pmac_cpufreq_get_speed(unsigned int cpu) | |||
378 | return cur_freq; | 361 | return cur_freq; |
379 | } | 362 | } |
380 | 363 | ||
381 | static int pmac_cpufreq_verify(struct cpufreq_policy *policy) | ||
382 | { | ||
383 | return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs); | ||
384 | } | ||
385 | |||
386 | static int pmac_cpufreq_target( struct cpufreq_policy *policy, | 364 | static int pmac_cpufreq_target( struct cpufreq_policy *policy, |
387 | unsigned int target_freq, | 365 | unsigned int index) |
388 | unsigned int relation) | ||
389 | { | 366 | { |
390 | unsigned int newstate = 0; | ||
391 | int rc; | 367 | int rc; |
392 | 368 | ||
393 | if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs, | 369 | rc = do_set_cpu_speed(policy, index); |
394 | target_freq, relation, &newstate)) | ||
395 | return -EINVAL; | ||
396 | |||
397 | rc = do_set_cpu_speed(policy, newstate, 1); | ||
398 | 370 | ||
399 | ppc_proc_freq = cur_freq * 1000ul; | 371 | ppc_proc_freq = cur_freq * 1000ul; |
400 | return rc; | 372 | return rc; |
@@ -402,14 +374,7 @@ static int pmac_cpufreq_target( struct cpufreq_policy *policy, | |||
402 | 374 | ||
403 | static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) | 375 | static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) |
404 | { | 376 | { |
405 | if (policy->cpu != 0) | 377 | return cpufreq_generic_init(policy, pmac_cpu_freqs, transition_latency); |
406 | return -ENODEV; | ||
407 | |||
408 | policy->cpuinfo.transition_latency = transition_latency; | ||
409 | policy->cur = cur_freq; | ||
410 | |||
411 | cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu); | ||
412 | return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs); | ||
413 | } | 378 | } |
414 | 379 | ||
415 | static u32 read_gpio(struct device_node *np) | 380 | static u32 read_gpio(struct device_node *np) |
@@ -443,7 +408,7 @@ static int pmac_cpufreq_suspend(struct cpufreq_policy *policy) | |||
443 | no_schedule = 1; | 408 | no_schedule = 1; |
444 | sleep_freq = cur_freq; | 409 | sleep_freq = cur_freq; |
445 | if (cur_freq == low_freq && !is_pmu_based) | 410 | if (cur_freq == low_freq && !is_pmu_based) |
446 | do_set_cpu_speed(policy, CPUFREQ_HIGH, 0); | 411 | do_set_cpu_speed(policy, CPUFREQ_HIGH); |
447 | return 0; | 412 | return 0; |
448 | } | 413 | } |
449 | 414 | ||
@@ -460,7 +425,7 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy) | |||
460 | * probably high speed due to our suspend() routine | 425 | * probably high speed due to our suspend() routine |
461 | */ | 426 | */ |
462 | do_set_cpu_speed(policy, sleep_freq == low_freq ? | 427 | do_set_cpu_speed(policy, sleep_freq == low_freq ? |
463 | CPUFREQ_LOW : CPUFREQ_HIGH, 0); | 428 | CPUFREQ_LOW : CPUFREQ_HIGH); |
464 | 429 | ||
465 | ppc_proc_freq = cur_freq * 1000ul; | 430 | ppc_proc_freq = cur_freq * 1000ul; |
466 | 431 | ||
@@ -469,14 +434,14 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy) | |||
469 | } | 434 | } |
470 | 435 | ||
471 | static struct cpufreq_driver pmac_cpufreq_driver = { | 436 | static struct cpufreq_driver pmac_cpufreq_driver = { |
472 | .verify = pmac_cpufreq_verify, | 437 | .verify = cpufreq_generic_frequency_table_verify, |
473 | .target = pmac_cpufreq_target, | 438 | .target_index = pmac_cpufreq_target, |
474 | .get = pmac_cpufreq_get_speed, | 439 | .get = pmac_cpufreq_get_speed, |
475 | .init = pmac_cpufreq_cpu_init, | 440 | .init = pmac_cpufreq_cpu_init, |
476 | .suspend = pmac_cpufreq_suspend, | 441 | .suspend = pmac_cpufreq_suspend, |
477 | .resume = pmac_cpufreq_resume, | 442 | .resume = pmac_cpufreq_resume, |
478 | .flags = CPUFREQ_PM_NO_WARN, | 443 | .flags = CPUFREQ_PM_NO_WARN, |
479 | .attr = pmac_cpu_freqs_attr, | 444 | .attr = cpufreq_generic_attr, |
480 | .name = "powermac", | 445 | .name = "powermac", |
481 | }; | 446 | }; |
482 | 447 | ||
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c index 3a51ad7e47c8..6a338f8c3860 100644 --- a/drivers/cpufreq/pmac64-cpufreq.c +++ b/drivers/cpufreq/pmac64-cpufreq.c | |||
@@ -70,11 +70,6 @@ static struct cpufreq_frequency_table g5_cpu_freqs[] = { | |||
70 | {0, CPUFREQ_TABLE_END}, | 70 | {0, CPUFREQ_TABLE_END}, |
71 | }; | 71 | }; |
72 | 72 | ||
73 | static struct freq_attr* g5_cpu_freqs_attr[] = { | ||
74 | &cpufreq_freq_attr_scaling_available_freqs, | ||
75 | NULL, | ||
76 | }; | ||
77 | |||
78 | /* Power mode data is an array of the 32 bits PCR values to use for | 73 | /* Power mode data is an array of the 32 bits PCR values to use for |
79 | * the various frequencies, retrieved from the device-tree | 74 | * the various frequencies, retrieved from the device-tree |
80 | */ | 75 | */ |
@@ -84,8 +79,6 @@ static void (*g5_switch_volt)(int speed_mode); | |||
84 | static int (*g5_switch_freq)(int speed_mode); | 79 | static int (*g5_switch_freq)(int speed_mode); |
85 | static int (*g5_query_freq)(void); | 80 | static int (*g5_query_freq)(void); |
86 | 81 | ||
87 | static DEFINE_MUTEX(g5_switch_mutex); | ||
88 | |||
89 | static unsigned long transition_latency; | 82 | static unsigned long transition_latency; |
90 | 83 | ||
91 | #ifdef CONFIG_PMAC_SMU | 84 | #ifdef CONFIG_PMAC_SMU |
@@ -142,7 +135,7 @@ static void g5_vdnap_switch_volt(int speed_mode) | |||
142 | pmf_call_one(pfunc_vdnap0_complete, &args); | 135 | pmf_call_one(pfunc_vdnap0_complete, &args); |
143 | if (done) | 136 | if (done) |
144 | break; | 137 | break; |
145 | msleep(1); | 138 | usleep_range(1000, 1000); |
146 | } | 139 | } |
147 | if (done == 0) | 140 | if (done == 0) |
148 | printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); | 141 | printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); |
@@ -241,7 +234,7 @@ static void g5_pfunc_switch_volt(int speed_mode) | |||
241 | if (pfunc_cpu1_volt_low) | 234 | if (pfunc_cpu1_volt_low) |
242 | pmf_call_one(pfunc_cpu1_volt_low, NULL); | 235 | pmf_call_one(pfunc_cpu1_volt_low, NULL); |
243 | } | 236 | } |
244 | msleep(10); /* should be faster , to fix */ | 237 | usleep_range(10000, 10000); /* should be faster , to fix */ |
245 | } | 238 | } |
246 | 239 | ||
247 | /* | 240 | /* |
@@ -286,7 +279,7 @@ static int g5_pfunc_switch_freq(int speed_mode) | |||
286 | pmf_call_one(pfunc_slewing_done, &args); | 279 | pmf_call_one(pfunc_slewing_done, &args); |
287 | if (done) | 280 | if (done) |
288 | break; | 281 | break; |
289 | msleep(1); | 282 | usleep_range(500, 500); |
290 | } | 283 | } |
291 | if (done == 0) | 284 | if (done == 0) |
292 | printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); | 285 | printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); |
@@ -317,37 +310,9 @@ static int g5_pfunc_query_freq(void) | |||
317 | * Common interface to the cpufreq core | 310 | * Common interface to the cpufreq core |
318 | */ | 311 | */ |
319 | 312 | ||
320 | static int g5_cpufreq_verify(struct cpufreq_policy *policy) | 313 | static int g5_cpufreq_target(struct cpufreq_policy *policy, unsigned int index) |
321 | { | 314 | { |
322 | return cpufreq_frequency_table_verify(policy, g5_cpu_freqs); | 315 | return g5_switch_freq(index); |
323 | } | ||
324 | |||
325 | static int g5_cpufreq_target(struct cpufreq_policy *policy, | ||
326 | unsigned int target_freq, unsigned int relation) | ||
327 | { | ||
328 | unsigned int newstate = 0; | ||
329 | struct cpufreq_freqs freqs; | ||
330 | int rc; | ||
331 | |||
332 | if (cpufreq_frequency_table_target(policy, g5_cpu_freqs, | ||
333 | target_freq, relation, &newstate)) | ||
334 | return -EINVAL; | ||
335 | |||
336 | if (g5_pmode_cur == newstate) | ||
337 | return 0; | ||
338 | |||
339 | mutex_lock(&g5_switch_mutex); | ||
340 | |||
341 | freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency; | ||
342 | freqs.new = g5_cpu_freqs[newstate].frequency; | ||
343 | |||
344 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
345 | rc = g5_switch_freq(newstate); | ||
346 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
347 | |||
348 | mutex_unlock(&g5_switch_mutex); | ||
349 | |||
350 | return rc; | ||
351 | } | 316 | } |
352 | 317 | ||
353 | static unsigned int g5_cpufreq_get_speed(unsigned int cpu) | 318 | static unsigned int g5_cpufreq_get_speed(unsigned int cpu) |
@@ -357,27 +322,17 @@ static unsigned int g5_cpufreq_get_speed(unsigned int cpu) | |||
357 | 322 | ||
358 | static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy) | 323 | static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy) |
359 | { | 324 | { |
360 | policy->cpuinfo.transition_latency = transition_latency; | 325 | return cpufreq_generic_init(policy, g5_cpu_freqs, transition_latency); |
361 | policy->cur = g5_cpu_freqs[g5_query_freq()].frequency; | ||
362 | /* secondary CPUs are tied to the primary one by the | ||
363 | * cpufreq core if in the secondary policy we tell it that | ||
364 | * it actually must be one policy together with all others. */ | ||
365 | cpumask_copy(policy->cpus, cpu_online_mask); | ||
366 | cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu); | ||
367 | |||
368 | return cpufreq_frequency_table_cpuinfo(policy, | ||
369 | g5_cpu_freqs); | ||
370 | } | 326 | } |
371 | 327 | ||
372 | |||
373 | static struct cpufreq_driver g5_cpufreq_driver = { | 328 | static struct cpufreq_driver g5_cpufreq_driver = { |
374 | .name = "powermac", | 329 | .name = "powermac", |
375 | .flags = CPUFREQ_CONST_LOOPS, | 330 | .flags = CPUFREQ_CONST_LOOPS, |
376 | .init = g5_cpufreq_cpu_init, | 331 | .init = g5_cpufreq_cpu_init, |
377 | .verify = g5_cpufreq_verify, | 332 | .verify = cpufreq_generic_frequency_table_verify, |
378 | .target = g5_cpufreq_target, | 333 | .target_index = g5_cpufreq_target, |
379 | .get = g5_cpufreq_get_speed, | 334 | .get = g5_cpufreq_get_speed, |
380 | .attr = g5_cpu_freqs_attr, | 335 | .attr = cpufreq_generic_attr, |
381 | }; | 336 | }; |
382 | 337 | ||
383 | 338 | ||
@@ -397,7 +352,8 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode) | |||
397 | /* Check supported platforms */ | 352 | /* Check supported platforms */ |
398 | if (of_machine_is_compatible("PowerMac8,1") || | 353 | if (of_machine_is_compatible("PowerMac8,1") || |
399 | of_machine_is_compatible("PowerMac8,2") || | 354 | of_machine_is_compatible("PowerMac8,2") || |
400 | of_machine_is_compatible("PowerMac9,1")) | 355 | of_machine_is_compatible("PowerMac9,1") || |
356 | of_machine_is_compatible("PowerMac12,1")) | ||
401 | use_volts_smu = 1; | 357 | use_volts_smu = 1; |
402 | else if (of_machine_is_compatible("PowerMac11,2")) | 358 | else if (of_machine_is_compatible("PowerMac11,2")) |
403 | use_volts_vdnap = 1; | 359 | use_volts_vdnap = 1; |
@@ -647,8 +603,10 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode) | |||
647 | g5_cpu_freqs[0].frequency = max_freq; | 603 | g5_cpu_freqs[0].frequency = max_freq; |
648 | g5_cpu_freqs[1].frequency = min_freq; | 604 | g5_cpu_freqs[1].frequency = min_freq; |
649 | 605 | ||
606 | /* Based on a measurement on Xserve G5, rounded up. */ | ||
607 | transition_latency = 10 * NSEC_PER_MSEC; | ||
608 | |||
650 | /* Set callbacks */ | 609 | /* Set callbacks */ |
651 | transition_latency = CPUFREQ_ETERNAL; | ||
652 | g5_switch_volt = g5_pfunc_switch_volt; | 610 | g5_switch_volt = g5_pfunc_switch_volt; |
653 | g5_switch_freq = g5_pfunc_switch_freq; | 611 | g5_switch_freq = g5_pfunc_switch_freq; |
654 | g5_query_freq = g5_pfunc_query_freq; | 612 | g5_query_freq = g5_pfunc_query_freq; |
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c index 85f1c8c25ddc..643e7952cad3 100644 --- a/drivers/cpufreq/powernow-k6.c +++ b/drivers/cpufreq/powernow-k6.c | |||
@@ -63,12 +63,12 @@ static int powernow_k6_get_cpu_multiplier(void) | |||
63 | 63 | ||
64 | 64 | ||
65 | /** | 65 | /** |
66 | * powernow_k6_set_state - set the PowerNow! multiplier | 66 | * powernow_k6_target - set the PowerNow! multiplier |
67 | * @best_i: clock_ratio[best_i] is the target multiplier | 67 | * @best_i: clock_ratio[best_i] is the target multiplier |
68 | * | 68 | * |
69 | * Tries to change the PowerNow! multiplier | 69 | * Tries to change the PowerNow! multiplier |
70 | */ | 70 | */ |
71 | static void powernow_k6_set_state(struct cpufreq_policy *policy, | 71 | static int powernow_k6_target(struct cpufreq_policy *policy, |
72 | unsigned int best_i) | 72 | unsigned int best_i) |
73 | { | 73 | { |
74 | unsigned long outvalue = 0, invalue = 0; | 74 | unsigned long outvalue = 0, invalue = 0; |
@@ -77,7 +77,7 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy, | |||
77 | 77 | ||
78 | if (clock_ratio[best_i].driver_data > max_multiplier) { | 78 | if (clock_ratio[best_i].driver_data > max_multiplier) { |
79 | printk(KERN_ERR PFX "invalid target frequency\n"); | 79 | printk(KERN_ERR PFX "invalid target frequency\n"); |
80 | return; | 80 | return -EINVAL; |
81 | } | 81 | } |
82 | 82 | ||
83 | freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); | 83 | freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); |
@@ -100,44 +100,6 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy, | |||
100 | 100 | ||
101 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 101 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
102 | 102 | ||
103 | return; | ||
104 | } | ||
105 | |||
106 | |||
107 | /** | ||
108 | * powernow_k6_verify - verifies a new CPUfreq policy | ||
109 | * @policy: new policy | ||
110 | * | ||
111 | * Policy must be within lowest and highest possible CPU Frequency, | ||
112 | * and at least one possible state must be within min and max. | ||
113 | */ | ||
114 | static int powernow_k6_verify(struct cpufreq_policy *policy) | ||
115 | { | ||
116 | return cpufreq_frequency_table_verify(policy, &clock_ratio[0]); | ||
117 | } | ||
118 | |||
119 | |||
120 | /** | ||
121 | * powernow_k6_setpolicy - sets a new CPUFreq policy | ||
122 | * @policy: new policy | ||
123 | * @target_freq: the target frequency | ||
124 | * @relation: how that frequency relates to achieved frequency | ||
125 | * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) | ||
126 | * | ||
127 | * sets a new CPUFreq policy | ||
128 | */ | ||
129 | static int powernow_k6_target(struct cpufreq_policy *policy, | ||
130 | unsigned int target_freq, | ||
131 | unsigned int relation) | ||
132 | { | ||
133 | unsigned int newstate = 0; | ||
134 | |||
135 | if (cpufreq_frequency_table_target(policy, &clock_ratio[0], | ||
136 | target_freq, relation, &newstate)) | ||
137 | return -EINVAL; | ||
138 | |||
139 | powernow_k6_set_state(policy, newstate); | ||
140 | |||
141 | return 0; | 103 | return 0; |
142 | } | 104 | } |
143 | 105 | ||
@@ -145,7 +107,6 @@ static int powernow_k6_target(struct cpufreq_policy *policy, | |||
145 | static int powernow_k6_cpu_init(struct cpufreq_policy *policy) | 107 | static int powernow_k6_cpu_init(struct cpufreq_policy *policy) |
146 | { | 108 | { |
147 | unsigned int i, f; | 109 | unsigned int i, f; |
148 | int result; | ||
149 | 110 | ||
150 | if (policy->cpu != 0) | 111 | if (policy->cpu != 0) |
151 | return -ENODEV; | 112 | return -ENODEV; |
@@ -165,15 +126,8 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy) | |||
165 | 126 | ||
166 | /* cpuinfo and default policy values */ | 127 | /* cpuinfo and default policy values */ |
167 | policy->cpuinfo.transition_latency = 200000; | 128 | policy->cpuinfo.transition_latency = 200000; |
168 | policy->cur = busfreq * max_multiplier; | ||
169 | |||
170 | result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio); | ||
171 | if (result) | ||
172 | return result; | ||
173 | |||
174 | cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu); | ||
175 | 129 | ||
176 | return 0; | 130 | return cpufreq_table_validate_and_show(policy, clock_ratio); |
177 | } | 131 | } |
178 | 132 | ||
179 | 133 | ||
@@ -182,7 +136,7 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) | |||
182 | unsigned int i; | 136 | unsigned int i; |
183 | for (i = 0; i < 8; i++) { | 137 | for (i = 0; i < 8; i++) { |
184 | if (i == max_multiplier) | 138 | if (i == max_multiplier) |
185 | powernow_k6_set_state(policy, i); | 139 | powernow_k6_target(policy, i); |
186 | } | 140 | } |
187 | cpufreq_frequency_table_put_attr(policy->cpu); | 141 | cpufreq_frequency_table_put_attr(policy->cpu); |
188 | return 0; | 142 | return 0; |
@@ -195,19 +149,14 @@ static unsigned int powernow_k6_get(unsigned int cpu) | |||
195 | return ret; | 149 | return ret; |
196 | } | 150 | } |
197 | 151 | ||
198 | static struct freq_attr *powernow_k6_attr[] = { | ||
199 | &cpufreq_freq_attr_scaling_available_freqs, | ||
200 | NULL, | ||
201 | }; | ||
202 | |||
203 | static struct cpufreq_driver powernow_k6_driver = { | 152 | static struct cpufreq_driver powernow_k6_driver = { |
204 | .verify = powernow_k6_verify, | 153 | .verify = cpufreq_generic_frequency_table_verify, |
205 | .target = powernow_k6_target, | 154 | .target_index = powernow_k6_target, |
206 | .init = powernow_k6_cpu_init, | 155 | .init = powernow_k6_cpu_init, |
207 | .exit = powernow_k6_cpu_exit, | 156 | .exit = powernow_k6_cpu_exit, |
208 | .get = powernow_k6_get, | 157 | .get = powernow_k6_get, |
209 | .name = "powernow-k6", | 158 | .name = "powernow-k6", |
210 | .attr = powernow_k6_attr, | 159 | .attr = cpufreq_generic_attr, |
211 | }; | 160 | }; |
212 | 161 | ||
213 | static const struct x86_cpu_id powernow_k6_ids[] = { | 162 | static const struct x86_cpu_id powernow_k6_ids[] = { |
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 14ce480be8ab..946708a1d745 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c | |||
@@ -248,7 +248,7 @@ static void change_VID(int vid) | |||
248 | } | 248 | } |
249 | 249 | ||
250 | 250 | ||
251 | static void change_speed(struct cpufreq_policy *policy, unsigned int index) | 251 | static int powernow_target(struct cpufreq_policy *policy, unsigned int index) |
252 | { | 252 | { |
253 | u8 fid, vid; | 253 | u8 fid, vid; |
254 | struct cpufreq_freqs freqs; | 254 | struct cpufreq_freqs freqs; |
@@ -291,6 +291,8 @@ static void change_speed(struct cpufreq_policy *policy, unsigned int index) | |||
291 | local_irq_enable(); | 291 | local_irq_enable(); |
292 | 292 | ||
293 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 293 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
294 | |||
295 | return 0; | ||
294 | } | 296 | } |
295 | 297 | ||
296 | 298 | ||
@@ -533,27 +535,6 @@ static int powernow_decode_bios(int maxfid, int startvid) | |||
533 | } | 535 | } |
534 | 536 | ||
535 | 537 | ||
536 | static int powernow_target(struct cpufreq_policy *policy, | ||
537 | unsigned int target_freq, | ||
538 | unsigned int relation) | ||
539 | { | ||
540 | unsigned int newstate; | ||
541 | |||
542 | if (cpufreq_frequency_table_target(policy, powernow_table, target_freq, | ||
543 | relation, &newstate)) | ||
544 | return -EINVAL; | ||
545 | |||
546 | change_speed(policy, newstate); | ||
547 | |||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | |||
552 | static int powernow_verify(struct cpufreq_policy *policy) | ||
553 | { | ||
554 | return cpufreq_frequency_table_verify(policy, powernow_table); | ||
555 | } | ||
556 | |||
557 | /* | 538 | /* |
558 | * We use the fact that the bus frequency is somehow | 539 | * We use the fact that the bus frequency is somehow |
559 | * a multiple of 100000/3 khz, then we compute sgtc according | 540 | * a multiple of 100000/3 khz, then we compute sgtc according |
@@ -678,11 +659,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy) | |||
678 | policy->cpuinfo.transition_latency = | 659 | policy->cpuinfo.transition_latency = |
679 | cpufreq_scale(2000000UL, fsb, latency); | 660 | cpufreq_scale(2000000UL, fsb, latency); |
680 | 661 | ||
681 | policy->cur = powernow_get(0); | 662 | return cpufreq_table_validate_and_show(policy, powernow_table); |
682 | |||
683 | cpufreq_frequency_table_get_attr(powernow_table, policy->cpu); | ||
684 | |||
685 | return cpufreq_frequency_table_cpuinfo(policy, powernow_table); | ||
686 | } | 663 | } |
687 | 664 | ||
688 | static int powernow_cpu_exit(struct cpufreq_policy *policy) | 665 | static int powernow_cpu_exit(struct cpufreq_policy *policy) |
@@ -701,14 +678,9 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy) | |||
701 | return 0; | 678 | return 0; |
702 | } | 679 | } |
703 | 680 | ||
704 | static struct freq_attr *powernow_table_attr[] = { | ||
705 | &cpufreq_freq_attr_scaling_available_freqs, | ||
706 | NULL, | ||
707 | }; | ||
708 | |||
709 | static struct cpufreq_driver powernow_driver = { | 681 | static struct cpufreq_driver powernow_driver = { |
710 | .verify = powernow_verify, | 682 | .verify = cpufreq_generic_frequency_table_verify, |
711 | .target = powernow_target, | 683 | .target_index = powernow_target, |
712 | .get = powernow_get, | 684 | .get = powernow_get, |
713 | #ifdef CONFIG_X86_POWERNOW_K7_ACPI | 685 | #ifdef CONFIG_X86_POWERNOW_K7_ACPI |
714 | .bios_limit = acpi_processor_get_bios_limit, | 686 | .bios_limit = acpi_processor_get_bios_limit, |
@@ -716,7 +688,7 @@ static struct cpufreq_driver powernow_driver = { | |||
716 | .init = powernow_cpu_init, | 688 | .init = powernow_cpu_init, |
717 | .exit = powernow_cpu_exit, | 689 | .exit = powernow_cpu_exit, |
718 | .name = "powernow-k7", | 690 | .name = "powernow-k7", |
719 | .attr = powernow_table_attr, | 691 | .attr = cpufreq_generic_attr, |
720 | }; | 692 | }; |
721 | 693 | ||
722 | static int __init powernow_init(void) | 694 | static int __init powernow_init(void) |
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 2344a9ed17f3..0023c7d40a51 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c | |||
@@ -977,20 +977,17 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, | |||
977 | 977 | ||
978 | struct powernowk8_target_arg { | 978 | struct powernowk8_target_arg { |
979 | struct cpufreq_policy *pol; | 979 | struct cpufreq_policy *pol; |
980 | unsigned targfreq; | 980 | unsigned newstate; |
981 | unsigned relation; | ||
982 | }; | 981 | }; |
983 | 982 | ||
984 | static long powernowk8_target_fn(void *arg) | 983 | static long powernowk8_target_fn(void *arg) |
985 | { | 984 | { |
986 | struct powernowk8_target_arg *pta = arg; | 985 | struct powernowk8_target_arg *pta = arg; |
987 | struct cpufreq_policy *pol = pta->pol; | 986 | struct cpufreq_policy *pol = pta->pol; |
988 | unsigned targfreq = pta->targfreq; | 987 | unsigned newstate = pta->newstate; |
989 | unsigned relation = pta->relation; | ||
990 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); | 988 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); |
991 | u32 checkfid; | 989 | u32 checkfid; |
992 | u32 checkvid; | 990 | u32 checkvid; |
993 | unsigned int newstate; | ||
994 | int ret; | 991 | int ret; |
995 | 992 | ||
996 | if (!data) | 993 | if (!data) |
@@ -1004,8 +1001,9 @@ static long powernowk8_target_fn(void *arg) | |||
1004 | return -EIO; | 1001 | return -EIO; |
1005 | } | 1002 | } |
1006 | 1003 | ||
1007 | pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", | 1004 | pr_debug("targ: cpu %d, %d kHz, min %d, max %d\n", |
1008 | pol->cpu, targfreq, pol->min, pol->max, relation); | 1005 | pol->cpu, data->powernow_table[newstate].frequency, pol->min, |
1006 | pol->max); | ||
1009 | 1007 | ||
1010 | if (query_current_values_with_pending_wait(data)) | 1008 | if (query_current_values_with_pending_wait(data)) |
1011 | return -EIO; | 1009 | return -EIO; |
@@ -1021,10 +1019,6 @@ static long powernowk8_target_fn(void *arg) | |||
1021 | checkvid, data->currvid); | 1019 | checkvid, data->currvid); |
1022 | } | 1020 | } |
1023 | 1021 | ||
1024 | if (cpufreq_frequency_table_target(pol, data->powernow_table, | ||
1025 | targfreq, relation, &newstate)) | ||
1026 | return -EIO; | ||
1027 | |||
1028 | mutex_lock(&fidvid_mutex); | 1022 | mutex_lock(&fidvid_mutex); |
1029 | 1023 | ||
1030 | powernow_k8_acpi_pst_values(data, newstate); | 1024 | powernow_k8_acpi_pst_values(data, newstate); |
@@ -1044,26 +1038,13 @@ static long powernowk8_target_fn(void *arg) | |||
1044 | } | 1038 | } |
1045 | 1039 | ||
1046 | /* Driver entry point to switch to the target frequency */ | 1040 | /* Driver entry point to switch to the target frequency */ |
1047 | static int powernowk8_target(struct cpufreq_policy *pol, | 1041 | static int powernowk8_target(struct cpufreq_policy *pol, unsigned index) |
1048 | unsigned targfreq, unsigned relation) | ||
1049 | { | 1042 | { |
1050 | struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq, | 1043 | struct powernowk8_target_arg pta = { .pol = pol, .newstate = index }; |
1051 | .relation = relation }; | ||
1052 | 1044 | ||
1053 | return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta); | 1045 | return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta); |
1054 | } | 1046 | } |
1055 | 1047 | ||
1056 | /* Driver entry point to verify the policy and range of frequencies */ | ||
1057 | static int powernowk8_verify(struct cpufreq_policy *pol) | ||
1058 | { | ||
1059 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); | ||
1060 | |||
1061 | if (!data) | ||
1062 | return -EINVAL; | ||
1063 | |||
1064 | return cpufreq_frequency_table_verify(pol, data->powernow_table); | ||
1065 | } | ||
1066 | |||
1067 | struct init_on_cpu { | 1048 | struct init_on_cpu { |
1068 | struct powernow_k8_data *data; | 1049 | struct powernow_k8_data *data; |
1069 | int rc; | 1050 | int rc; |
@@ -1152,11 +1133,8 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1152 | cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu)); | 1133 | cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu)); |
1153 | data->available_cores = pol->cpus; | 1134 | data->available_cores = pol->cpus; |
1154 | 1135 | ||
1155 | pol->cur = find_khz_freq_from_fid(data->currfid); | ||
1156 | pr_debug("policy current frequency %d kHz\n", pol->cur); | ||
1157 | |||
1158 | /* min/max the cpu is capable of */ | 1136 | /* min/max the cpu is capable of */ |
1159 | if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) { | 1137 | if (cpufreq_table_validate_and_show(pol, data->powernow_table)) { |
1160 | printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n"); | 1138 | printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n"); |
1161 | powernow_k8_cpu_exit_acpi(data); | 1139 | powernow_k8_cpu_exit_acpi(data); |
1162 | kfree(data->powernow_table); | 1140 | kfree(data->powernow_table); |
@@ -1164,8 +1142,6 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1164 | return -EINVAL; | 1142 | return -EINVAL; |
1165 | } | 1143 | } |
1166 | 1144 | ||
1167 | cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); | ||
1168 | |||
1169 | pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", | 1145 | pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", |
1170 | data->currfid, data->currvid); | 1146 | data->currfid, data->currvid); |
1171 | 1147 | ||
@@ -1227,20 +1203,16 @@ out: | |||
1227 | return khz; | 1203 | return khz; |
1228 | } | 1204 | } |
1229 | 1205 | ||
1230 | static struct freq_attr *powernow_k8_attr[] = { | ||
1231 | &cpufreq_freq_attr_scaling_available_freqs, | ||
1232 | NULL, | ||
1233 | }; | ||
1234 | |||
1235 | static struct cpufreq_driver cpufreq_amd64_driver = { | 1206 | static struct cpufreq_driver cpufreq_amd64_driver = { |
1236 | .verify = powernowk8_verify, | 1207 | .flags = CPUFREQ_ASYNC_NOTIFICATION, |
1237 | .target = powernowk8_target, | 1208 | .verify = cpufreq_generic_frequency_table_verify, |
1209 | .target_index = powernowk8_target, | ||
1238 | .bios_limit = acpi_processor_get_bios_limit, | 1210 | .bios_limit = acpi_processor_get_bios_limit, |
1239 | .init = powernowk8_cpu_init, | 1211 | .init = powernowk8_cpu_init, |
1240 | .exit = powernowk8_cpu_exit, | 1212 | .exit = powernowk8_cpu_exit, |
1241 | .get = powernowk8_get, | 1213 | .get = powernowk8_get, |
1242 | .name = "powernow-k8", | 1214 | .name = "powernow-k8", |
1243 | .attr = powernow_k8_attr, | 1215 | .attr = cpufreq_generic_attr, |
1244 | }; | 1216 | }; |
1245 | 1217 | ||
1246 | static void __request_acpi_cpufreq(void) | 1218 | static void __request_acpi_cpufreq(void) |
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c index 60e81d524ea8..3f7be46d2b27 100644 --- a/drivers/cpufreq/ppc-corenet-cpufreq.c +++ b/drivers/cpufreq/ppc-corenet-cpufreq.c | |||
@@ -69,8 +69,6 @@ static const struct soc_data sdata[] = { | |||
69 | static u32 min_cpufreq; | 69 | static u32 min_cpufreq; |
70 | static const u32 *fmask; | 70 | static const u32 *fmask; |
71 | 71 | ||
72 | /* serialize frequency changes */ | ||
73 | static DEFINE_MUTEX(cpufreq_lock); | ||
74 | static DEFINE_PER_CPU(struct cpu_data *, cpu_data); | 72 | static DEFINE_PER_CPU(struct cpu_data *, cpu_data); |
75 | 73 | ||
76 | /* cpumask in a cluster */ | 74 | /* cpumask in a cluster */ |
@@ -202,7 +200,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
202 | table[i].frequency = CPUFREQ_TABLE_END; | 200 | table[i].frequency = CPUFREQ_TABLE_END; |
203 | 201 | ||
204 | /* set the min and max frequency properly */ | 202 | /* set the min and max frequency properly */ |
205 | ret = cpufreq_frequency_table_cpuinfo(policy, table); | 203 | ret = cpufreq_table_validate_and_show(policy, table); |
206 | if (ret) { | 204 | if (ret) { |
207 | pr_err("invalid frequency table: %d\n", ret); | 205 | pr_err("invalid frequency table: %d\n", ret); |
208 | goto err_nomem1; | 206 | goto err_nomem1; |
@@ -217,9 +215,6 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
217 | per_cpu(cpu_data, i) = data; | 215 | per_cpu(cpu_data, i) = data; |
218 | 216 | ||
219 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 217 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
220 | policy->cur = corenet_cpufreq_get_speed(policy->cpu); | ||
221 | |||
222 | cpufreq_frequency_table_get_attr(table, cpu); | ||
223 | of_node_put(np); | 218 | of_node_put(np); |
224 | 219 | ||
225 | return 0; | 220 | return 0; |
@@ -253,60 +248,25 @@ static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |||
253 | return 0; | 248 | return 0; |
254 | } | 249 | } |
255 | 250 | ||
256 | static int corenet_cpufreq_verify(struct cpufreq_policy *policy) | ||
257 | { | ||
258 | struct cpufreq_frequency_table *table = | ||
259 | per_cpu(cpu_data, policy->cpu)->table; | ||
260 | |||
261 | return cpufreq_frequency_table_verify(policy, table); | ||
262 | } | ||
263 | |||
264 | static int corenet_cpufreq_target(struct cpufreq_policy *policy, | 251 | static int corenet_cpufreq_target(struct cpufreq_policy *policy, |
265 | unsigned int target_freq, unsigned int relation) | 252 | unsigned int index) |
266 | { | 253 | { |
267 | struct cpufreq_freqs freqs; | ||
268 | unsigned int new; | ||
269 | struct clk *parent; | 254 | struct clk *parent; |
270 | int ret; | ||
271 | struct cpu_data *data = per_cpu(cpu_data, policy->cpu); | 255 | struct cpu_data *data = per_cpu(cpu_data, policy->cpu); |
272 | 256 | ||
273 | cpufreq_frequency_table_target(policy, data->table, | 257 | parent = of_clk_get(data->parent, data->table[index].driver_data); |
274 | target_freq, relation, &new); | 258 | return clk_set_parent(data->clk, parent); |
275 | |||
276 | if (policy->cur == data->table[new].frequency) | ||
277 | return 0; | ||
278 | |||
279 | freqs.old = policy->cur; | ||
280 | freqs.new = data->table[new].frequency; | ||
281 | |||
282 | mutex_lock(&cpufreq_lock); | ||
283 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
284 | |||
285 | parent = of_clk_get(data->parent, data->table[new].driver_data); | ||
286 | ret = clk_set_parent(data->clk, parent); | ||
287 | if (ret) | ||
288 | freqs.new = freqs.old; | ||
289 | |||
290 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
291 | mutex_unlock(&cpufreq_lock); | ||
292 | |||
293 | return ret; | ||
294 | } | 259 | } |
295 | 260 | ||
296 | static struct freq_attr *corenet_cpufreq_attr[] = { | ||
297 | &cpufreq_freq_attr_scaling_available_freqs, | ||
298 | NULL, | ||
299 | }; | ||
300 | |||
301 | static struct cpufreq_driver ppc_corenet_cpufreq_driver = { | 261 | static struct cpufreq_driver ppc_corenet_cpufreq_driver = { |
302 | .name = "ppc_cpufreq", | 262 | .name = "ppc_cpufreq", |
303 | .flags = CPUFREQ_CONST_LOOPS, | 263 | .flags = CPUFREQ_CONST_LOOPS, |
304 | .init = corenet_cpufreq_cpu_init, | 264 | .init = corenet_cpufreq_cpu_init, |
305 | .exit = __exit_p(corenet_cpufreq_cpu_exit), | 265 | .exit = __exit_p(corenet_cpufreq_cpu_exit), |
306 | .verify = corenet_cpufreq_verify, | 266 | .verify = cpufreq_generic_frequency_table_verify, |
307 | .target = corenet_cpufreq_target, | 267 | .target_index = corenet_cpufreq_target, |
308 | .get = corenet_cpufreq_get_speed, | 268 | .get = corenet_cpufreq_get_speed, |
309 | .attr = corenet_cpufreq_attr, | 269 | .attr = cpufreq_generic_attr, |
310 | }; | 270 | }; |
311 | 271 | ||
312 | static const struct of_device_id node_matches[] __initdata = { | 272 | static const struct of_device_id node_matches[] __initdata = { |
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c index 2e448f0bbdc5..e42ca9c31cea 100644 --- a/drivers/cpufreq/ppc_cbe_cpufreq.c +++ b/drivers/cpufreq/ppc_cbe_cpufreq.c | |||
@@ -30,9 +30,6 @@ | |||
30 | 30 | ||
31 | #include "ppc_cbe_cpufreq.h" | 31 | #include "ppc_cbe_cpufreq.h" |
32 | 32 | ||
33 | static DEFINE_MUTEX(cbe_switch_mutex); | ||
34 | |||
35 | |||
36 | /* the CBE supports an 8 step frequency scaling */ | 33 | /* the CBE supports an 8 step frequency scaling */ |
37 | static struct cpufreq_frequency_table cbe_freqs[] = { | 34 | static struct cpufreq_frequency_table cbe_freqs[] = { |
38 | {1, 0}, | 35 | {1, 0}, |
@@ -123,63 +120,28 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
123 | cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); | 120 | cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); |
124 | #endif | 121 | #endif |
125 | 122 | ||
126 | cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); | ||
127 | |||
128 | /* this ensures that policy->cpuinfo_min | 123 | /* this ensures that policy->cpuinfo_min |
129 | * and policy->cpuinfo_max are set correctly */ | 124 | * and policy->cpuinfo_max are set correctly */ |
130 | return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs); | 125 | return cpufreq_table_validate_and_show(policy, cbe_freqs); |
131 | } | ||
132 | |||
133 | static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy) | ||
134 | { | ||
135 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static int cbe_cpufreq_verify(struct cpufreq_policy *policy) | ||
140 | { | ||
141 | return cpufreq_frequency_table_verify(policy, cbe_freqs); | ||
142 | } | 126 | } |
143 | 127 | ||
144 | static int cbe_cpufreq_target(struct cpufreq_policy *policy, | 128 | static int cbe_cpufreq_target(struct cpufreq_policy *policy, |
145 | unsigned int target_freq, | 129 | unsigned int cbe_pmode_new) |
146 | unsigned int relation) | ||
147 | { | 130 | { |
148 | int rc; | ||
149 | struct cpufreq_freqs freqs; | ||
150 | unsigned int cbe_pmode_new; | ||
151 | |||
152 | cpufreq_frequency_table_target(policy, | ||
153 | cbe_freqs, | ||
154 | target_freq, | ||
155 | relation, | ||
156 | &cbe_pmode_new); | ||
157 | |||
158 | freqs.old = policy->cur; | ||
159 | freqs.new = cbe_freqs[cbe_pmode_new].frequency; | ||
160 | |||
161 | mutex_lock(&cbe_switch_mutex); | ||
162 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
163 | |||
164 | pr_debug("setting frequency for cpu %d to %d kHz, " \ | 131 | pr_debug("setting frequency for cpu %d to %d kHz, " \ |
165 | "1/%d of max frequency\n", | 132 | "1/%d of max frequency\n", |
166 | policy->cpu, | 133 | policy->cpu, |
167 | cbe_freqs[cbe_pmode_new].frequency, | 134 | cbe_freqs[cbe_pmode_new].frequency, |
168 | cbe_freqs[cbe_pmode_new].driver_data); | 135 | cbe_freqs[cbe_pmode_new].driver_data); |
169 | 136 | ||
170 | rc = set_pmode(policy->cpu, cbe_pmode_new); | 137 | return set_pmode(policy->cpu, cbe_pmode_new); |
171 | |||
172 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
173 | mutex_unlock(&cbe_switch_mutex); | ||
174 | |||
175 | return rc; | ||
176 | } | 138 | } |
177 | 139 | ||
178 | static struct cpufreq_driver cbe_cpufreq_driver = { | 140 | static struct cpufreq_driver cbe_cpufreq_driver = { |
179 | .verify = cbe_cpufreq_verify, | 141 | .verify = cpufreq_generic_frequency_table_verify, |
180 | .target = cbe_cpufreq_target, | 142 | .target_index = cbe_cpufreq_target, |
181 | .init = cbe_cpufreq_cpu_init, | 143 | .init = cbe_cpufreq_cpu_init, |
182 | .exit = cbe_cpufreq_cpu_exit, | 144 | .exit = cpufreq_generic_exit, |
183 | .name = "cbe-cpufreq", | 145 | .name = "cbe-cpufreq", |
184 | .flags = CPUFREQ_CONST_LOOPS, | 146 | .flags = CPUFREQ_CONST_LOOPS, |
185 | }; | 147 | }; |
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c index 8749eaf18793..0a0f4369636a 100644 --- a/drivers/cpufreq/pxa2xx-cpufreq.c +++ b/drivers/cpufreq/pxa2xx-cpufreq.c | |||
@@ -262,36 +262,15 @@ static u32 mdrefr_dri(unsigned int freq) | |||
262 | return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32; | 262 | return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32; |
263 | } | 263 | } |
264 | 264 | ||
265 | /* find a valid frequency point */ | ||
266 | static int pxa_verify_policy(struct cpufreq_policy *policy) | ||
267 | { | ||
268 | struct cpufreq_frequency_table *pxa_freqs_table; | ||
269 | pxa_freqs_t *pxa_freqs; | ||
270 | int ret; | ||
271 | |||
272 | find_freq_tables(&pxa_freqs_table, &pxa_freqs); | ||
273 | ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table); | ||
274 | |||
275 | if (freq_debug) | ||
276 | pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n", | ||
277 | policy->min, policy->max); | ||
278 | |||
279 | return ret; | ||
280 | } | ||
281 | |||
282 | static unsigned int pxa_cpufreq_get(unsigned int cpu) | 265 | static unsigned int pxa_cpufreq_get(unsigned int cpu) |
283 | { | 266 | { |
284 | return get_clk_frequency_khz(0); | 267 | return get_clk_frequency_khz(0); |
285 | } | 268 | } |
286 | 269 | ||
287 | static int pxa_set_target(struct cpufreq_policy *policy, | 270 | static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx) |
288 | unsigned int target_freq, | ||
289 | unsigned int relation) | ||
290 | { | 271 | { |
291 | struct cpufreq_frequency_table *pxa_freqs_table; | 272 | struct cpufreq_frequency_table *pxa_freqs_table; |
292 | pxa_freqs_t *pxa_freq_settings; | 273 | pxa_freqs_t *pxa_freq_settings; |
293 | struct cpufreq_freqs freqs; | ||
294 | unsigned int idx; | ||
295 | unsigned long flags; | 274 | unsigned long flags; |
296 | unsigned int new_freq_cpu, new_freq_mem; | 275 | unsigned int new_freq_cpu, new_freq_mem; |
297 | unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg; | 276 | unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg; |
@@ -300,32 +279,19 @@ static int pxa_set_target(struct cpufreq_policy *policy, | |||
300 | /* Get the current policy */ | 279 | /* Get the current policy */ |
301 | find_freq_tables(&pxa_freqs_table, &pxa_freq_settings); | 280 | find_freq_tables(&pxa_freqs_table, &pxa_freq_settings); |
302 | 281 | ||
303 | /* Lookup the next frequency */ | ||
304 | if (cpufreq_frequency_table_target(policy, pxa_freqs_table, | ||
305 | target_freq, relation, &idx)) { | ||
306 | return -EINVAL; | ||
307 | } | ||
308 | |||
309 | new_freq_cpu = pxa_freq_settings[idx].khz; | 282 | new_freq_cpu = pxa_freq_settings[idx].khz; |
310 | new_freq_mem = pxa_freq_settings[idx].membus; | 283 | new_freq_mem = pxa_freq_settings[idx].membus; |
311 | freqs.old = policy->cur; | ||
312 | freqs.new = new_freq_cpu; | ||
313 | 284 | ||
314 | if (freq_debug) | 285 | if (freq_debug) |
315 | pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n", | 286 | pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n", |
316 | freqs.new / 1000, (pxa_freq_settings[idx].div2) ? | 287 | new_freq_cpu / 1000, (pxa_freq_settings[idx].div2) ? |
317 | (new_freq_mem / 2000) : (new_freq_mem / 1000)); | 288 | (new_freq_mem / 2000) : (new_freq_mem / 1000)); |
318 | 289 | ||
319 | if (vcc_core && freqs.new > freqs.old) | 290 | if (vcc_core && new_freq_cpu > policy->cur) { |
320 | ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]); | 291 | ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]); |
321 | if (ret) | 292 | if (ret) |
322 | return ret; | 293 | return ret; |
323 | /* | 294 | } |
324 | * Tell everyone what we're about to do... | ||
325 | * you should add a notify client with any platform specific | ||
326 | * Vcc changing capability | ||
327 | */ | ||
328 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
329 | 295 | ||
330 | /* Calculate the next MDREFR. If we're slowing down the SDRAM clock | 296 | /* Calculate the next MDREFR. If we're slowing down the SDRAM clock |
331 | * we need to preset the smaller DRI before the change. If we're | 297 | * we need to preset the smaller DRI before the change. If we're |
@@ -376,13 +342,6 @@ static int pxa_set_target(struct cpufreq_policy *policy, | |||
376 | local_irq_restore(flags); | 342 | local_irq_restore(flags); |
377 | 343 | ||
378 | /* | 344 | /* |
379 | * Tell everyone what we've just done... | ||
380 | * you should add a notify client with any platform specific | ||
381 | * SDRAM refresh timer adjustments | ||
382 | */ | ||
383 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
384 | |||
385 | /* | ||
386 | * Even if voltage setting fails, we don't report it, as the frequency | 345 | * Even if voltage setting fails, we don't report it, as the frequency |
387 | * change succeeded. The voltage reduction is not a critical failure, | 346 | * change succeeded. The voltage reduction is not a critical failure, |
388 | * only power savings will suffer from this. | 347 | * only power savings will suffer from this. |
@@ -391,7 +350,7 @@ static int pxa_set_target(struct cpufreq_policy *policy, | |||
391 | * bug is triggered (seems a deadlock). Should anybody find out where, | 350 | * bug is triggered (seems a deadlock). Should anybody find out where, |
392 | * the "return 0" should become a "return ret". | 351 | * the "return 0" should become a "return ret". |
393 | */ | 352 | */ |
394 | if (vcc_core && freqs.new < freqs.old) | 353 | if (vcc_core && new_freq_cpu < policy->cur) |
395 | ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]); | 354 | ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]); |
396 | 355 | ||
397 | return 0; | 356 | return 0; |
@@ -414,8 +373,6 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy) | |||
414 | 373 | ||
415 | /* set default policy and cpuinfo */ | 374 | /* set default policy and cpuinfo */ |
416 | policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ | 375 | policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ |
417 | policy->cur = get_clk_frequency_khz(0); /* current freq */ | ||
418 | policy->min = policy->max = policy->cur; | ||
419 | 376 | ||
420 | /* Generate pxa25x the run cpufreq_frequency_table struct */ | 377 | /* Generate pxa25x the run cpufreq_frequency_table struct */ |
421 | for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) { | 378 | for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) { |
@@ -453,10 +410,12 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy) | |||
453 | find_freq_tables(&pxa255_freq_table, &pxa255_freqs); | 410 | find_freq_tables(&pxa255_freq_table, &pxa255_freqs); |
454 | pr_info("PXA255 cpufreq using %s frequency table\n", | 411 | pr_info("PXA255 cpufreq using %s frequency table\n", |
455 | pxa255_turbo_table ? "turbo" : "run"); | 412 | pxa255_turbo_table ? "turbo" : "run"); |
456 | cpufreq_frequency_table_cpuinfo(policy, pxa255_freq_table); | 413 | |
414 | cpufreq_table_validate_and_show(policy, pxa255_freq_table); | ||
415 | } | ||
416 | else if (cpu_is_pxa27x()) { | ||
417 | cpufreq_table_validate_and_show(policy, pxa27x_freq_table); | ||
457 | } | 418 | } |
458 | else if (cpu_is_pxa27x()) | ||
459 | cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table); | ||
460 | 419 | ||
461 | printk(KERN_INFO "PXA CPU frequency change support initialized\n"); | 420 | printk(KERN_INFO "PXA CPU frequency change support initialized\n"); |
462 | 421 | ||
@@ -464,9 +423,10 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy) | |||
464 | } | 423 | } |
465 | 424 | ||
466 | static struct cpufreq_driver pxa_cpufreq_driver = { | 425 | static struct cpufreq_driver pxa_cpufreq_driver = { |
467 | .verify = pxa_verify_policy, | 426 | .verify = cpufreq_generic_frequency_table_verify, |
468 | .target = pxa_set_target, | 427 | .target_index = pxa_set_target, |
469 | .init = pxa_cpufreq_init, | 428 | .init = pxa_cpufreq_init, |
429 | .exit = cpufreq_generic_exit, | ||
470 | .get = pxa_cpufreq_get, | 430 | .get = pxa_cpufreq_get, |
471 | .name = "PXA2xx", | 431 | .name = "PXA2xx", |
472 | }; | 432 | }; |
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c index d26306fb00d2..93840048dd11 100644 --- a/drivers/cpufreq/pxa3xx-cpufreq.c +++ b/drivers/cpufreq/pxa3xx-cpufreq.c | |||
@@ -108,7 +108,7 @@ static int setup_freqs_table(struct cpufreq_policy *policy, | |||
108 | pxa3xx_freqs_num = num; | 108 | pxa3xx_freqs_num = num; |
109 | pxa3xx_freqs_table = table; | 109 | pxa3xx_freqs_table = table; |
110 | 110 | ||
111 | return cpufreq_frequency_table_cpuinfo(policy, table); | 111 | return cpufreq_table_validate_and_show(policy, table); |
112 | } | 112 | } |
113 | 113 | ||
114 | static void __update_core_freq(struct pxa3xx_freq_info *info) | 114 | static void __update_core_freq(struct pxa3xx_freq_info *info) |
@@ -150,54 +150,26 @@ static void __update_bus_freq(struct pxa3xx_freq_info *info) | |||
150 | cpu_relax(); | 150 | cpu_relax(); |
151 | } | 151 | } |
152 | 152 | ||
153 | static int pxa3xx_cpufreq_verify(struct cpufreq_policy *policy) | ||
154 | { | ||
155 | return cpufreq_frequency_table_verify(policy, pxa3xx_freqs_table); | ||
156 | } | ||
157 | |||
158 | static unsigned int pxa3xx_cpufreq_get(unsigned int cpu) | 153 | static unsigned int pxa3xx_cpufreq_get(unsigned int cpu) |
159 | { | 154 | { |
160 | return pxa3xx_get_clk_frequency_khz(0); | 155 | return pxa3xx_get_clk_frequency_khz(0); |
161 | } | 156 | } |
162 | 157 | ||
163 | static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, | 158 | static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, unsigned int index) |
164 | unsigned int target_freq, | ||
165 | unsigned int relation) | ||
166 | { | 159 | { |
167 | struct pxa3xx_freq_info *next; | 160 | struct pxa3xx_freq_info *next; |
168 | struct cpufreq_freqs freqs; | ||
169 | unsigned long flags; | 161 | unsigned long flags; |
170 | int idx; | ||
171 | 162 | ||
172 | if (policy->cpu != 0) | 163 | if (policy->cpu != 0) |
173 | return -EINVAL; | 164 | return -EINVAL; |
174 | 165 | ||
175 | /* Lookup the next frequency */ | 166 | next = &pxa3xx_freqs[index]; |
176 | if (cpufreq_frequency_table_target(policy, pxa3xx_freqs_table, | ||
177 | target_freq, relation, &idx)) | ||
178 | return -EINVAL; | ||
179 | |||
180 | next = &pxa3xx_freqs[idx]; | ||
181 | |||
182 | freqs.old = policy->cur; | ||
183 | freqs.new = next->cpufreq_mhz * 1000; | ||
184 | |||
185 | pr_debug("CPU frequency from %d MHz to %d MHz%s\n", | ||
186 | freqs.old / 1000, freqs.new / 1000, | ||
187 | (freqs.old == freqs.new) ? " (skipped)" : ""); | ||
188 | |||
189 | if (freqs.old == target_freq) | ||
190 | return 0; | ||
191 | |||
192 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
193 | 167 | ||
194 | local_irq_save(flags); | 168 | local_irq_save(flags); |
195 | __update_core_freq(next); | 169 | __update_core_freq(next); |
196 | __update_bus_freq(next); | 170 | __update_bus_freq(next); |
197 | local_irq_restore(flags); | 171 | local_irq_restore(flags); |
198 | 172 | ||
199 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
200 | |||
201 | return 0; | 173 | return 0; |
202 | } | 174 | } |
203 | 175 | ||
@@ -206,11 +178,10 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) | |||
206 | int ret = -EINVAL; | 178 | int ret = -EINVAL; |
207 | 179 | ||
208 | /* set default policy and cpuinfo */ | 180 | /* set default policy and cpuinfo */ |
209 | policy->cpuinfo.min_freq = 104000; | 181 | policy->min = policy->cpuinfo.min_freq = 104000; |
210 | policy->cpuinfo.max_freq = (cpu_is_pxa320()) ? 806000 : 624000; | 182 | policy->max = policy->cpuinfo.max_freq = |
183 | (cpu_is_pxa320()) ? 806000 : 624000; | ||
211 | policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ | 184 | policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ |
212 | policy->max = pxa3xx_get_clk_frequency_khz(0); | ||
213 | policy->cur = policy->min = policy->max; | ||
214 | 185 | ||
215 | if (cpu_is_pxa300() || cpu_is_pxa310()) | 186 | if (cpu_is_pxa300() || cpu_is_pxa310()) |
216 | ret = setup_freqs_table(policy, pxa300_freqs, | 187 | ret = setup_freqs_table(policy, pxa300_freqs, |
@@ -230,9 +201,10 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) | |||
230 | } | 201 | } |
231 | 202 | ||
232 | static struct cpufreq_driver pxa3xx_cpufreq_driver = { | 203 | static struct cpufreq_driver pxa3xx_cpufreq_driver = { |
233 | .verify = pxa3xx_cpufreq_verify, | 204 | .verify = cpufreq_generic_frequency_table_verify, |
234 | .target = pxa3xx_cpufreq_set, | 205 | .target_index = pxa3xx_cpufreq_set, |
235 | .init = pxa3xx_cpufreq_init, | 206 | .init = pxa3xx_cpufreq_init, |
207 | .exit = cpufreq_generic_exit, | ||
236 | .get = pxa3xx_cpufreq_get, | 208 | .get = pxa3xx_cpufreq_get, |
237 | .name = "pxa3xx-cpufreq", | 209 | .name = "pxa3xx-cpufreq", |
238 | }; | 210 | }; |
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c index 22dcb81ef9d0..8d904a00027b 100644 --- a/drivers/cpufreq/s3c2416-cpufreq.c +++ b/drivers/cpufreq/s3c2416-cpufreq.c | |||
@@ -87,16 +87,6 @@ static struct cpufreq_frequency_table s3c2450_freq_table[] = { | |||
87 | { 0, CPUFREQ_TABLE_END }, | 87 | { 0, CPUFREQ_TABLE_END }, |
88 | }; | 88 | }; |
89 | 89 | ||
90 | static int s3c2416_cpufreq_verify_speed(struct cpufreq_policy *policy) | ||
91 | { | ||
92 | struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; | ||
93 | |||
94 | if (policy->cpu != 0) | ||
95 | return -EINVAL; | ||
96 | |||
97 | return cpufreq_frequency_table_verify(policy, s3c_freq->freq_table); | ||
98 | } | ||
99 | |||
100 | static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu) | 90 | static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu) |
101 | { | 91 | { |
102 | struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; | 92 | struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; |
@@ -227,24 +217,15 @@ static int s3c2416_cpufreq_leave_dvs(struct s3c2416_data *s3c_freq, int idx) | |||
227 | } | 217 | } |
228 | 218 | ||
229 | static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy, | 219 | static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy, |
230 | unsigned int target_freq, | 220 | unsigned int index) |
231 | unsigned int relation) | ||
232 | { | 221 | { |
233 | struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; | 222 | struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; |
234 | struct cpufreq_freqs freqs; | 223 | unsigned int new_freq; |
235 | int idx, ret, to_dvs = 0; | 224 | int idx, ret, to_dvs = 0; |
236 | unsigned int i; | ||
237 | 225 | ||
238 | mutex_lock(&cpufreq_lock); | 226 | mutex_lock(&cpufreq_lock); |
239 | 227 | ||
240 | pr_debug("cpufreq: to %dKHz, relation %d\n", target_freq, relation); | 228 | idx = s3c_freq->freq_table[index].driver_data; |
241 | |||
242 | ret = cpufreq_frequency_table_target(policy, s3c_freq->freq_table, | ||
243 | target_freq, relation, &i); | ||
244 | if (ret != 0) | ||
245 | goto out; | ||
246 | |||
247 | idx = s3c_freq->freq_table[i].driver_data; | ||
248 | 229 | ||
249 | if (idx == SOURCE_HCLK) | 230 | if (idx == SOURCE_HCLK) |
250 | to_dvs = 1; | 231 | to_dvs = 1; |
@@ -256,24 +237,13 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy, | |||
256 | goto out; | 237 | goto out; |
257 | } | 238 | } |
258 | 239 | ||
259 | freqs.flags = 0; | ||
260 | freqs.old = s3c_freq->is_dvs ? FREQ_DVS | ||
261 | : clk_get_rate(s3c_freq->armclk) / 1000; | ||
262 | |||
263 | /* When leavin dvs mode, always switch the armdiv to the hclk rate | 240 | /* When leavin dvs mode, always switch the armdiv to the hclk rate |
264 | * The S3C2416 has stability issues when switching directly to | 241 | * The S3C2416 has stability issues when switching directly to |
265 | * higher frequencies. | 242 | * higher frequencies. |
266 | */ | 243 | */ |
267 | freqs.new = (s3c_freq->is_dvs && !to_dvs) | 244 | new_freq = (s3c_freq->is_dvs && !to_dvs) |
268 | ? clk_get_rate(s3c_freq->hclk) / 1000 | 245 | ? clk_get_rate(s3c_freq->hclk) / 1000 |
269 | : s3c_freq->freq_table[i].frequency; | 246 | : s3c_freq->freq_table[index].frequency; |
270 | |||
271 | pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new); | ||
272 | |||
273 | if (!to_dvs && freqs.old == freqs.new) | ||
274 | goto out; | ||
275 | |||
276 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
277 | 247 | ||
278 | if (to_dvs) { | 248 | if (to_dvs) { |
279 | pr_debug("cpufreq: enter dvs\n"); | 249 | pr_debug("cpufreq: enter dvs\n"); |
@@ -282,12 +252,10 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy, | |||
282 | pr_debug("cpufreq: leave dvs\n"); | 252 | pr_debug("cpufreq: leave dvs\n"); |
283 | ret = s3c2416_cpufreq_leave_dvs(s3c_freq, idx); | 253 | ret = s3c2416_cpufreq_leave_dvs(s3c_freq, idx); |
284 | } else { | 254 | } else { |
285 | pr_debug("cpufreq: change armdiv to %dkHz\n", freqs.new); | 255 | pr_debug("cpufreq: change armdiv to %dkHz\n", new_freq); |
286 | ret = s3c2416_cpufreq_set_armdiv(s3c_freq, freqs.new); | 256 | ret = s3c2416_cpufreq_set_armdiv(s3c_freq, new_freq); |
287 | } | 257 | } |
288 | 258 | ||
289 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
290 | |||
291 | out: | 259 | out: |
292 | mutex_unlock(&cpufreq_lock); | 260 | mutex_unlock(&cpufreq_lock); |
293 | 261 | ||
@@ -486,20 +454,14 @@ static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) | |||
486 | freq++; | 454 | freq++; |
487 | } | 455 | } |
488 | 456 | ||
489 | policy->cur = clk_get_rate(s3c_freq->armclk) / 1000; | ||
490 | |||
491 | /* Datasheet says PLL stabalisation time must be at least 300us, | 457 | /* Datasheet says PLL stabalisation time must be at least 300us, |
492 | * so but add some fudge. (reference in LOCKCON0 register description) | 458 | * so but add some fudge. (reference in LOCKCON0 register description) |
493 | */ | 459 | */ |
494 | policy->cpuinfo.transition_latency = (500 * 1000) + | 460 | ret = cpufreq_generic_init(policy, s3c_freq->freq_table, |
495 | s3c_freq->regulator_latency; | 461 | (500 * 1000) + s3c_freq->regulator_latency); |
496 | |||
497 | ret = cpufreq_frequency_table_cpuinfo(policy, s3c_freq->freq_table); | ||
498 | if (ret) | 462 | if (ret) |
499 | goto err_freq_table; | 463 | goto err_freq_table; |
500 | 464 | ||
501 | cpufreq_frequency_table_get_attr(s3c_freq->freq_table, 0); | ||
502 | |||
503 | register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier); | 465 | register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier); |
504 | 466 | ||
505 | return 0; | 467 | return 0; |
@@ -518,19 +480,14 @@ err_hclk: | |||
518 | return ret; | 480 | return ret; |
519 | } | 481 | } |
520 | 482 | ||
521 | static struct freq_attr *s3c2416_cpufreq_attr[] = { | ||
522 | &cpufreq_freq_attr_scaling_available_freqs, | ||
523 | NULL, | ||
524 | }; | ||
525 | |||
526 | static struct cpufreq_driver s3c2416_cpufreq_driver = { | 483 | static struct cpufreq_driver s3c2416_cpufreq_driver = { |
527 | .flags = 0, | 484 | .flags = 0, |
528 | .verify = s3c2416_cpufreq_verify_speed, | 485 | .verify = cpufreq_generic_frequency_table_verify, |
529 | .target = s3c2416_cpufreq_set_target, | 486 | .target_index = s3c2416_cpufreq_set_target, |
530 | .get = s3c2416_cpufreq_get_speed, | 487 | .get = s3c2416_cpufreq_get_speed, |
531 | .init = s3c2416_cpufreq_driver_init, | 488 | .init = s3c2416_cpufreq_driver_init, |
532 | .name = "s3c2416", | 489 | .name = "s3c2416", |
533 | .attr = s3c2416_cpufreq_attr, | 490 | .attr = cpufreq_generic_attr, |
534 | }; | 491 | }; |
535 | 492 | ||
536 | static int __init s3c2416_cpufreq_init(void) | 493 | static int __init s3c2416_cpufreq_init(void) |
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c index b0f343fcb7ee..485088253358 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq.c +++ b/drivers/cpufreq/s3c24xx-cpufreq.c | |||
@@ -373,23 +373,7 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name) | |||
373 | 373 | ||
374 | static int s3c_cpufreq_init(struct cpufreq_policy *policy) | 374 | static int s3c_cpufreq_init(struct cpufreq_policy *policy) |
375 | { | 375 | { |
376 | printk(KERN_INFO "%s: initialising policy %p\n", __func__, policy); | 376 | return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency); |
377 | |||
378 | if (policy->cpu != 0) | ||
379 | return -EINVAL; | ||
380 | |||
381 | policy->cur = s3c_cpufreq_get(0); | ||
382 | policy->min = policy->cpuinfo.min_freq = 0; | ||
383 | policy->max = policy->cpuinfo.max_freq = cpu_cur.info->max.fclk / 1000; | ||
384 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
385 | |||
386 | /* feed the latency information from the cpu driver */ | ||
387 | policy->cpuinfo.transition_latency = cpu_cur.info->latency; | ||
388 | |||
389 | if (ftab) | ||
390 | cpufreq_frequency_table_cpuinfo(policy, ftab); | ||
391 | |||
392 | return 0; | ||
393 | } | 377 | } |
394 | 378 | ||
395 | static int __init s3c_cpufreq_initclks(void) | 379 | static int __init s3c_cpufreq_initclks(void) |
@@ -416,14 +400,6 @@ static int __init s3c_cpufreq_initclks(void) | |||
416 | return 0; | 400 | return 0; |
417 | } | 401 | } |
418 | 402 | ||
419 | static int s3c_cpufreq_verify(struct cpufreq_policy *policy) | ||
420 | { | ||
421 | if (policy->cpu != 0) | ||
422 | return -EINVAL; | ||
423 | |||
424 | return 0; | ||
425 | } | ||
426 | |||
427 | #ifdef CONFIG_PM | 403 | #ifdef CONFIG_PM |
428 | static struct cpufreq_frequency_table suspend_pll; | 404 | static struct cpufreq_frequency_table suspend_pll; |
429 | static unsigned int suspend_freq; | 405 | static unsigned int suspend_freq; |
@@ -473,7 +449,6 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy) | |||
473 | 449 | ||
474 | static struct cpufreq_driver s3c24xx_driver = { | 450 | static struct cpufreq_driver s3c24xx_driver = { |
475 | .flags = CPUFREQ_STICKY, | 451 | .flags = CPUFREQ_STICKY, |
476 | .verify = s3c_cpufreq_verify, | ||
477 | .target = s3c_cpufreq_target, | 452 | .target = s3c_cpufreq_target, |
478 | .get = s3c_cpufreq_get, | 453 | .get = s3c_cpufreq_get, |
479 | .init = s3c_cpufreq_init, | 454 | .init = s3c_cpufreq_init, |
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c index 15631f92ab7d..67e302eeefec 100644 --- a/drivers/cpufreq/s3c64xx-cpufreq.c +++ b/drivers/cpufreq/s3c64xx-cpufreq.c | |||
@@ -54,14 +54,6 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = { | |||
54 | }; | 54 | }; |
55 | #endif | 55 | #endif |
56 | 56 | ||
57 | static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy) | ||
58 | { | ||
59 | if (policy->cpu != 0) | ||
60 | return -EINVAL; | ||
61 | |||
62 | return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table); | ||
63 | } | ||
64 | |||
65 | static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu) | 57 | static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu) |
66 | { | 58 | { |
67 | if (cpu != 0) | 59 | if (cpu != 0) |
@@ -71,66 +63,48 @@ static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu) | |||
71 | } | 63 | } |
72 | 64 | ||
73 | static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, | 65 | static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, |
74 | unsigned int target_freq, | 66 | unsigned int index) |
75 | unsigned int relation) | ||
76 | { | 67 | { |
77 | int ret; | ||
78 | unsigned int i; | ||
79 | struct cpufreq_freqs freqs; | ||
80 | struct s3c64xx_dvfs *dvfs; | 68 | struct s3c64xx_dvfs *dvfs; |
69 | unsigned int old_freq, new_freq; | ||
70 | int ret; | ||
81 | 71 | ||
82 | ret = cpufreq_frequency_table_target(policy, s3c64xx_freq_table, | 72 | old_freq = clk_get_rate(armclk) / 1000; |
83 | target_freq, relation, &i); | 73 | new_freq = s3c64xx_freq_table[index].frequency; |
84 | if (ret != 0) | 74 | dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data]; |
85 | return ret; | ||
86 | |||
87 | freqs.old = clk_get_rate(armclk) / 1000; | ||
88 | freqs.new = s3c64xx_freq_table[i].frequency; | ||
89 | freqs.flags = 0; | ||
90 | dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].driver_data]; | ||
91 | |||
92 | if (freqs.old == freqs.new) | ||
93 | return 0; | ||
94 | |||
95 | pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new); | ||
96 | |||
97 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
98 | 75 | ||
99 | #ifdef CONFIG_REGULATOR | 76 | #ifdef CONFIG_REGULATOR |
100 | if (vddarm && freqs.new > freqs.old) { | 77 | if (vddarm && new_freq > old_freq) { |
101 | ret = regulator_set_voltage(vddarm, | 78 | ret = regulator_set_voltage(vddarm, |
102 | dvfs->vddarm_min, | 79 | dvfs->vddarm_min, |
103 | dvfs->vddarm_max); | 80 | dvfs->vddarm_max); |
104 | if (ret != 0) { | 81 | if (ret != 0) { |
105 | pr_err("Failed to set VDDARM for %dkHz: %d\n", | 82 | pr_err("Failed to set VDDARM for %dkHz: %d\n", |
106 | freqs.new, ret); | 83 | new_freq, ret); |
107 | freqs.new = freqs.old; | 84 | return ret; |
108 | goto post_notify; | ||
109 | } | 85 | } |
110 | } | 86 | } |
111 | #endif | 87 | #endif |
112 | 88 | ||
113 | ret = clk_set_rate(armclk, freqs.new * 1000); | 89 | ret = clk_set_rate(armclk, new_freq * 1000); |
114 | if (ret < 0) { | 90 | if (ret < 0) { |
115 | pr_err("Failed to set rate %dkHz: %d\n", | 91 | pr_err("Failed to set rate %dkHz: %d\n", |
116 | freqs.new, ret); | 92 | new_freq, ret); |
117 | freqs.new = freqs.old; | 93 | return ret; |
118 | } | 94 | } |
119 | 95 | ||
120 | post_notify: | ||
121 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
122 | if (ret) | ||
123 | goto err; | ||
124 | |||
125 | #ifdef CONFIG_REGULATOR | 96 | #ifdef CONFIG_REGULATOR |
126 | if (vddarm && freqs.new < freqs.old) { | 97 | if (vddarm && new_freq < old_freq) { |
127 | ret = regulator_set_voltage(vddarm, | 98 | ret = regulator_set_voltage(vddarm, |
128 | dvfs->vddarm_min, | 99 | dvfs->vddarm_min, |
129 | dvfs->vddarm_max); | 100 | dvfs->vddarm_max); |
130 | if (ret != 0) { | 101 | if (ret != 0) { |
131 | pr_err("Failed to set VDDARM for %dkHz: %d\n", | 102 | pr_err("Failed to set VDDARM for %dkHz: %d\n", |
132 | freqs.new, ret); | 103 | new_freq, ret); |
133 | goto err_clk; | 104 | if (clk_set_rate(armclk, old_freq * 1000) < 0) |
105 | pr_err("Failed to restore original clock rate\n"); | ||
106 | |||
107 | return ret; | ||
134 | } | 108 | } |
135 | } | 109 | } |
136 | #endif | 110 | #endif |
@@ -139,14 +113,6 @@ post_notify: | |||
139 | clk_get_rate(armclk) / 1000); | 113 | clk_get_rate(armclk) / 1000); |
140 | 114 | ||
141 | return 0; | 115 | return 0; |
142 | |||
143 | err_clk: | ||
144 | if (clk_set_rate(armclk, freqs.old * 1000) < 0) | ||
145 | pr_err("Failed to restore original clock rate\n"); | ||
146 | err: | ||
147 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
148 | |||
149 | return ret; | ||
150 | } | 116 | } |
151 | 117 | ||
152 | #ifdef CONFIG_REGULATOR | 118 | #ifdef CONFIG_REGULATOR |
@@ -243,15 +209,12 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) | |||
243 | freq++; | 209 | freq++; |
244 | } | 210 | } |
245 | 211 | ||
246 | policy->cur = clk_get_rate(armclk) / 1000; | ||
247 | |||
248 | /* Datasheet says PLL stabalisation time (if we were to use | 212 | /* Datasheet says PLL stabalisation time (if we were to use |
249 | * the PLLs, which we don't currently) is ~300us worst case, | 213 | * the PLLs, which we don't currently) is ~300us worst case, |
250 | * but add some fudge. | 214 | * but add some fudge. |
251 | */ | 215 | */ |
252 | policy->cpuinfo.transition_latency = (500 * 1000) + regulator_latency; | 216 | ret = cpufreq_generic_init(policy, s3c64xx_freq_table, |
253 | 217 | (500 * 1000) + regulator_latency); | |
254 | ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table); | ||
255 | if (ret != 0) { | 218 | if (ret != 0) { |
256 | pr_err("Failed to configure frequency table: %d\n", | 219 | pr_err("Failed to configure frequency table: %d\n", |
257 | ret); | 220 | ret); |
@@ -264,8 +227,8 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) | |||
264 | 227 | ||
265 | static struct cpufreq_driver s3c64xx_cpufreq_driver = { | 228 | static struct cpufreq_driver s3c64xx_cpufreq_driver = { |
266 | .flags = 0, | 229 | .flags = 0, |
267 | .verify = s3c64xx_cpufreq_verify_speed, | 230 | .verify = cpufreq_generic_frequency_table_verify, |
268 | .target = s3c64xx_cpufreq_set_target, | 231 | .target_index = s3c64xx_cpufreq_set_target, |
269 | .get = s3c64xx_cpufreq_get_speed, | 232 | .get = s3c64xx_cpufreq_get_speed, |
270 | .init = s3c64xx_cpufreq_driver_init, | 233 | .init = s3c64xx_cpufreq_driver_init, |
271 | .name = "s3c", | 234 | .name = "s3c", |
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 5c7757073793..e3973dae28a7 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c | |||
@@ -26,7 +26,6 @@ | |||
26 | static struct clk *cpu_clk; | 26 | static struct clk *cpu_clk; |
27 | static struct clk *dmc0_clk; | 27 | static struct clk *dmc0_clk; |
28 | static struct clk *dmc1_clk; | 28 | static struct clk *dmc1_clk; |
29 | static struct cpufreq_freqs freqs; | ||
30 | static DEFINE_MUTEX(set_freq_lock); | 29 | static DEFINE_MUTEX(set_freq_lock); |
31 | 30 | ||
32 | /* APLL M,P,S values for 1G/800Mhz */ | 31 | /* APLL M,P,S values for 1G/800Mhz */ |
@@ -36,16 +35,7 @@ static DEFINE_MUTEX(set_freq_lock); | |||
36 | /* Use 800MHz when entering sleep mode */ | 35 | /* Use 800MHz when entering sleep mode */ |
37 | #define SLEEP_FREQ (800 * 1000) | 36 | #define SLEEP_FREQ (800 * 1000) |
38 | 37 | ||
39 | /* | 38 | /* Tracks if cpu freqency can be updated anymore */ |
40 | * relation has an additional symantics other than the standard of cpufreq | ||
41 | * DISALBE_FURTHER_CPUFREQ: disable further access to target | ||
42 | * ENABLE_FURTUER_CPUFREQ: enable access to target | ||
43 | */ | ||
44 | enum cpufreq_access { | ||
45 | DISABLE_FURTHER_CPUFREQ = 0x10, | ||
46 | ENABLE_FURTHER_CPUFREQ = 0x20, | ||
47 | }; | ||
48 | |||
49 | static bool no_cpufreq_access; | 39 | static bool no_cpufreq_access; |
50 | 40 | ||
51 | /* | 41 | /* |
@@ -174,14 +164,6 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq) | |||
174 | __raw_writel(tmp1, reg); | 164 | __raw_writel(tmp1, reg); |
175 | } | 165 | } |
176 | 166 | ||
177 | static int s5pv210_verify_speed(struct cpufreq_policy *policy) | ||
178 | { | ||
179 | if (policy->cpu) | ||
180 | return -EINVAL; | ||
181 | |||
182 | return cpufreq_frequency_table_verify(policy, s5pv210_freq_table); | ||
183 | } | ||
184 | |||
185 | static unsigned int s5pv210_getspeed(unsigned int cpu) | 167 | static unsigned int s5pv210_getspeed(unsigned int cpu) |
186 | { | 168 | { |
187 | if (cpu) | 169 | if (cpu) |
@@ -190,22 +172,18 @@ static unsigned int s5pv210_getspeed(unsigned int cpu) | |||
190 | return clk_get_rate(cpu_clk) / 1000; | 172 | return clk_get_rate(cpu_clk) / 1000; |
191 | } | 173 | } |
192 | 174 | ||
193 | static int s5pv210_target(struct cpufreq_policy *policy, | 175 | static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index) |
194 | unsigned int target_freq, | ||
195 | unsigned int relation) | ||
196 | { | 176 | { |
197 | unsigned long reg; | 177 | unsigned long reg; |
198 | unsigned int index, priv_index; | 178 | unsigned int priv_index; |
199 | unsigned int pll_changing = 0; | 179 | unsigned int pll_changing = 0; |
200 | unsigned int bus_speed_changing = 0; | 180 | unsigned int bus_speed_changing = 0; |
181 | unsigned int old_freq, new_freq; | ||
201 | int arm_volt, int_volt; | 182 | int arm_volt, int_volt; |
202 | int ret = 0; | 183 | int ret = 0; |
203 | 184 | ||
204 | mutex_lock(&set_freq_lock); | 185 | mutex_lock(&set_freq_lock); |
205 | 186 | ||
206 | if (relation & ENABLE_FURTHER_CPUFREQ) | ||
207 | no_cpufreq_access = false; | ||
208 | |||
209 | if (no_cpufreq_access) { | 187 | if (no_cpufreq_access) { |
210 | #ifdef CONFIG_PM_VERBOSE | 188 | #ifdef CONFIG_PM_VERBOSE |
211 | pr_err("%s:%d denied access to %s as it is disabled" | 189 | pr_err("%s:%d denied access to %s as it is disabled" |
@@ -215,27 +193,13 @@ static int s5pv210_target(struct cpufreq_policy *policy, | |||
215 | goto exit; | 193 | goto exit; |
216 | } | 194 | } |
217 | 195 | ||
218 | if (relation & DISABLE_FURTHER_CPUFREQ) | 196 | old_freq = s5pv210_getspeed(0); |
219 | no_cpufreq_access = true; | 197 | new_freq = s5pv210_freq_table[index].frequency; |
220 | |||
221 | relation &= ~(ENABLE_FURTHER_CPUFREQ | DISABLE_FURTHER_CPUFREQ); | ||
222 | |||
223 | freqs.old = s5pv210_getspeed(0); | ||
224 | |||
225 | if (cpufreq_frequency_table_target(policy, s5pv210_freq_table, | ||
226 | target_freq, relation, &index)) { | ||
227 | ret = -EINVAL; | ||
228 | goto exit; | ||
229 | } | ||
230 | |||
231 | freqs.new = s5pv210_freq_table[index].frequency; | ||
232 | |||
233 | if (freqs.new == freqs.old) | ||
234 | goto exit; | ||
235 | 198 | ||
236 | /* Finding current running level index */ | 199 | /* Finding current running level index */ |
237 | if (cpufreq_frequency_table_target(policy, s5pv210_freq_table, | 200 | if (cpufreq_frequency_table_target(policy, s5pv210_freq_table, |
238 | freqs.old, relation, &priv_index)) { | 201 | old_freq, CPUFREQ_RELATION_H, |
202 | &priv_index)) { | ||
239 | ret = -EINVAL; | 203 | ret = -EINVAL; |
240 | goto exit; | 204 | goto exit; |
241 | } | 205 | } |
@@ -243,7 +207,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, | |||
243 | arm_volt = dvs_conf[index].arm_volt; | 207 | arm_volt = dvs_conf[index].arm_volt; |
244 | int_volt = dvs_conf[index].int_volt; | 208 | int_volt = dvs_conf[index].int_volt; |
245 | 209 | ||
246 | if (freqs.new > freqs.old) { | 210 | if (new_freq > old_freq) { |
247 | ret = regulator_set_voltage(arm_regulator, | 211 | ret = regulator_set_voltage(arm_regulator, |
248 | arm_volt, arm_volt_max); | 212 | arm_volt, arm_volt_max); |
249 | if (ret) | 213 | if (ret) |
@@ -255,8 +219,6 @@ static int s5pv210_target(struct cpufreq_policy *policy, | |||
255 | goto exit; | 219 | goto exit; |
256 | } | 220 | } |
257 | 221 | ||
258 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
259 | |||
260 | /* Check if there need to change PLL */ | 222 | /* Check if there need to change PLL */ |
261 | if ((index == L0) || (priv_index == L0)) | 223 | if ((index == L0) || (priv_index == L0)) |
262 | pll_changing = 1; | 224 | pll_changing = 1; |
@@ -467,9 +429,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, | |||
467 | } | 429 | } |
468 | } | 430 | } |
469 | 431 | ||
470 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 432 | if (new_freq < old_freq) { |
471 | |||
472 | if (freqs.new < freqs.old) { | ||
473 | regulator_set_voltage(int_regulator, | 433 | regulator_set_voltage(int_regulator, |
474 | int_volt, int_volt_max); | 434 | int_volt, int_volt_max); |
475 | 435 | ||
@@ -551,13 +511,7 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy) | |||
551 | s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000); | 511 | s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000); |
552 | s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk); | 512 | s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk); |
553 | 513 | ||
554 | policy->cur = policy->min = policy->max = s5pv210_getspeed(0); | 514 | return cpufreq_generic_init(policy, s5pv210_freq_table, 40000); |
555 | |||
556 | cpufreq_frequency_table_get_attr(s5pv210_freq_table, policy->cpu); | ||
557 | |||
558 | policy->cpuinfo.transition_latency = 40000; | ||
559 | |||
560 | return cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table); | ||
561 | 515 | ||
562 | out_dmc1: | 516 | out_dmc1: |
563 | clk_put(dmc0_clk); | 517 | clk_put(dmc0_clk); |
@@ -573,16 +527,18 @@ static int s5pv210_cpufreq_notifier_event(struct notifier_block *this, | |||
573 | 527 | ||
574 | switch (event) { | 528 | switch (event) { |
575 | case PM_SUSPEND_PREPARE: | 529 | case PM_SUSPEND_PREPARE: |
576 | ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, | 530 | ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0); |
577 | DISABLE_FURTHER_CPUFREQ); | ||
578 | if (ret < 0) | 531 | if (ret < 0) |
579 | return NOTIFY_BAD; | 532 | return NOTIFY_BAD; |
580 | 533 | ||
534 | /* Disable updation of cpu frequency */ | ||
535 | no_cpufreq_access = true; | ||
581 | return NOTIFY_OK; | 536 | return NOTIFY_OK; |
582 | case PM_POST_RESTORE: | 537 | case PM_POST_RESTORE: |
583 | case PM_POST_SUSPEND: | 538 | case PM_POST_SUSPEND: |
584 | cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, | 539 | /* Enable updation of cpu frequency */ |
585 | ENABLE_FURTHER_CPUFREQ); | 540 | no_cpufreq_access = false; |
541 | cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0); | ||
586 | 542 | ||
587 | return NOTIFY_OK; | 543 | return NOTIFY_OK; |
588 | } | 544 | } |
@@ -595,18 +551,18 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this, | |||
595 | { | 551 | { |
596 | int ret; | 552 | int ret; |
597 | 553 | ||
598 | ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, | 554 | ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0); |
599 | DISABLE_FURTHER_CPUFREQ); | ||
600 | if (ret < 0) | 555 | if (ret < 0) |
601 | return NOTIFY_BAD; | 556 | return NOTIFY_BAD; |
602 | 557 | ||
558 | no_cpufreq_access = true; | ||
603 | return NOTIFY_DONE; | 559 | return NOTIFY_DONE; |
604 | } | 560 | } |
605 | 561 | ||
606 | static struct cpufreq_driver s5pv210_driver = { | 562 | static struct cpufreq_driver s5pv210_driver = { |
607 | .flags = CPUFREQ_STICKY, | 563 | .flags = CPUFREQ_STICKY, |
608 | .verify = s5pv210_verify_speed, | 564 | .verify = cpufreq_generic_frequency_table_verify, |
609 | .target = s5pv210_target, | 565 | .target_index = s5pv210_target, |
610 | .get = s5pv210_getspeed, | 566 | .get = s5pv210_getspeed, |
611 | .init = s5pv210_cpu_init, | 567 | .init = s5pv210_cpu_init, |
612 | .name = "s5pv210", | 568 | .name = "s5pv210", |
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c index cff18e87ca58..623da742f8e7 100644 --- a/drivers/cpufreq/sa1100-cpufreq.c +++ b/drivers/cpufreq/sa1100-cpufreq.c | |||
@@ -177,60 +177,33 @@ static void sa1100_update_dram_timings(int current_speed, int new_speed) | |||
177 | } | 177 | } |
178 | } | 178 | } |
179 | 179 | ||
180 | static int sa1100_target(struct cpufreq_policy *policy, | 180 | static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr) |
181 | unsigned int target_freq, | ||
182 | unsigned int relation) | ||
183 | { | 181 | { |
184 | unsigned int cur = sa11x0_getspeed(0); | 182 | unsigned int cur = sa11x0_getspeed(0); |
185 | unsigned int new_ppcr; | 183 | unsigned int new_freq; |
186 | struct cpufreq_freqs freqs; | ||
187 | |||
188 | new_ppcr = sa11x0_freq_to_ppcr(target_freq); | ||
189 | switch (relation) { | ||
190 | case CPUFREQ_RELATION_L: | ||
191 | if (sa11x0_ppcr_to_freq(new_ppcr) > policy->max) | ||
192 | new_ppcr--; | ||
193 | break; | ||
194 | case CPUFREQ_RELATION_H: | ||
195 | if ((sa11x0_ppcr_to_freq(new_ppcr) > target_freq) && | ||
196 | (sa11x0_ppcr_to_freq(new_ppcr - 1) >= policy->min)) | ||
197 | new_ppcr--; | ||
198 | break; | ||
199 | } | ||
200 | |||
201 | freqs.old = cur; | ||
202 | freqs.new = sa11x0_ppcr_to_freq(new_ppcr); | ||
203 | 184 | ||
204 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 185 | new_freq = sa11x0_freq_table[ppcr].frequency; |
205 | 186 | ||
206 | if (freqs.new > cur) | 187 | if (new_freq > cur) |
207 | sa1100_update_dram_timings(cur, freqs.new); | 188 | sa1100_update_dram_timings(cur, new_freq); |
208 | 189 | ||
209 | PPCR = new_ppcr; | 190 | PPCR = ppcr; |
210 | 191 | ||
211 | if (freqs.new < cur) | 192 | if (new_freq < cur) |
212 | sa1100_update_dram_timings(cur, freqs.new); | 193 | sa1100_update_dram_timings(cur, new_freq); |
213 | |||
214 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
215 | 194 | ||
216 | return 0; | 195 | return 0; |
217 | } | 196 | } |
218 | 197 | ||
219 | static int __init sa1100_cpu_init(struct cpufreq_policy *policy) | 198 | static int __init sa1100_cpu_init(struct cpufreq_policy *policy) |
220 | { | 199 | { |
221 | if (policy->cpu != 0) | 200 | return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL); |
222 | return -EINVAL; | ||
223 | policy->cur = policy->min = policy->max = sa11x0_getspeed(0); | ||
224 | policy->cpuinfo.min_freq = 59000; | ||
225 | policy->cpuinfo.max_freq = 287000; | ||
226 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
227 | return 0; | ||
228 | } | 201 | } |
229 | 202 | ||
230 | static struct cpufreq_driver sa1100_driver __refdata = { | 203 | static struct cpufreq_driver sa1100_driver __refdata = { |
231 | .flags = CPUFREQ_STICKY, | 204 | .flags = CPUFREQ_STICKY, |
232 | .verify = sa11x0_verify_speed, | 205 | .verify = cpufreq_generic_frequency_table_verify, |
233 | .target = sa1100_target, | 206 | .target_index = sa1100_target, |
234 | .get = sa11x0_getspeed, | 207 | .get = sa11x0_getspeed, |
235 | .init = sa1100_cpu_init, | 208 | .init = sa1100_cpu_init, |
236 | .name = "sa1100", | 209 | .name = "sa1100", |
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c index 39c90b6f4286..2c2b2e601d13 100644 --- a/drivers/cpufreq/sa1110-cpufreq.c +++ b/drivers/cpufreq/sa1110-cpufreq.c | |||
@@ -229,36 +229,14 @@ sdram_update_refresh(u_int cpu_khz, struct sdram_params *sdram) | |||
229 | /* | 229 | /* |
230 | * Ok, set the CPU frequency. | 230 | * Ok, set the CPU frequency. |
231 | */ | 231 | */ |
232 | static int sa1110_target(struct cpufreq_policy *policy, | 232 | static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr) |
233 | unsigned int target_freq, | ||
234 | unsigned int relation) | ||
235 | { | 233 | { |
236 | struct sdram_params *sdram = &sdram_params; | 234 | struct sdram_params *sdram = &sdram_params; |
237 | struct cpufreq_freqs freqs; | ||
238 | struct sdram_info sd; | 235 | struct sdram_info sd; |
239 | unsigned long flags; | 236 | unsigned long flags; |
240 | unsigned int ppcr, unused; | 237 | unsigned int unused; |
241 | |||
242 | switch (relation) { | ||
243 | case CPUFREQ_RELATION_L: | ||
244 | ppcr = sa11x0_freq_to_ppcr(target_freq); | ||
245 | if (sa11x0_ppcr_to_freq(ppcr) > policy->max) | ||
246 | ppcr--; | ||
247 | break; | ||
248 | case CPUFREQ_RELATION_H: | ||
249 | ppcr = sa11x0_freq_to_ppcr(target_freq); | ||
250 | if (ppcr && (sa11x0_ppcr_to_freq(ppcr) > target_freq) && | ||
251 | (sa11x0_ppcr_to_freq(ppcr-1) >= policy->min)) | ||
252 | ppcr--; | ||
253 | break; | ||
254 | default: | ||
255 | return -EINVAL; | ||
256 | } | ||
257 | |||
258 | freqs.old = sa11x0_getspeed(0); | ||
259 | freqs.new = sa11x0_ppcr_to_freq(ppcr); | ||
260 | 238 | ||
261 | sdram_calculate_timing(&sd, freqs.new, sdram); | 239 | sdram_calculate_timing(&sd, sa11x0_freq_table[ppcr].frequency, sdram); |
262 | 240 | ||
263 | #if 0 | 241 | #if 0 |
264 | /* | 242 | /* |
@@ -277,8 +255,6 @@ static int sa1110_target(struct cpufreq_policy *policy, | |||
277 | sd.mdcas[2] = 0xaaaaaaaa; | 255 | sd.mdcas[2] = 0xaaaaaaaa; |
278 | #endif | 256 | #endif |
279 | 257 | ||
280 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
281 | |||
282 | /* | 258 | /* |
283 | * The clock could be going away for some time. Set the SDRAMs | 259 | * The clock could be going away for some time. Set the SDRAMs |
284 | * to refresh rapidly (every 64 memory clock cycles). To get | 260 | * to refresh rapidly (every 64 memory clock cycles). To get |
@@ -323,30 +299,22 @@ static int sa1110_target(struct cpufreq_policy *policy, | |||
323 | /* | 299 | /* |
324 | * Now, return the SDRAM refresh back to normal. | 300 | * Now, return the SDRAM refresh back to normal. |
325 | */ | 301 | */ |
326 | sdram_update_refresh(freqs.new, sdram); | 302 | sdram_update_refresh(sa11x0_freq_table[ppcr].frequency, sdram); |
327 | |||
328 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
329 | 303 | ||
330 | return 0; | 304 | return 0; |
331 | } | 305 | } |
332 | 306 | ||
333 | static int __init sa1110_cpu_init(struct cpufreq_policy *policy) | 307 | static int __init sa1110_cpu_init(struct cpufreq_policy *policy) |
334 | { | 308 | { |
335 | if (policy->cpu != 0) | 309 | return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL); |
336 | return -EINVAL; | ||
337 | policy->cur = policy->min = policy->max = sa11x0_getspeed(0); | ||
338 | policy->cpuinfo.min_freq = 59000; | ||
339 | policy->cpuinfo.max_freq = 287000; | ||
340 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
341 | return 0; | ||
342 | } | 310 | } |
343 | 311 | ||
344 | /* sa1110_driver needs __refdata because it must remain after init registers | 312 | /* sa1110_driver needs __refdata because it must remain after init registers |
345 | * it with cpufreq_register_driver() */ | 313 | * it with cpufreq_register_driver() */ |
346 | static struct cpufreq_driver sa1110_driver __refdata = { | 314 | static struct cpufreq_driver sa1110_driver __refdata = { |
347 | .flags = CPUFREQ_STICKY, | 315 | .flags = CPUFREQ_STICKY, |
348 | .verify = sa11x0_verify_speed, | 316 | .verify = cpufreq_generic_frequency_table_verify, |
349 | .target = sa1110_target, | 317 | .target_index = sa1110_target, |
350 | .get = sa11x0_getspeed, | 318 | .get = sa11x0_getspeed, |
351 | .init = sa1110_cpu_init, | 319 | .init = sa1110_cpu_init, |
352 | .name = "sa1110", | 320 | .name = "sa1110", |
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c index d6f6c6f4efa7..6adb354e359c 100644 --- a/drivers/cpufreq/sc520_freq.c +++ b/drivers/cpufreq/sc520_freq.c | |||
@@ -53,21 +53,11 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu) | |||
53 | } | 53 | } |
54 | } | 54 | } |
55 | 55 | ||
56 | static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy, | 56 | static int sc520_freq_target(struct cpufreq_policy *policy, unsigned int state) |
57 | unsigned int state) | ||
58 | { | 57 | { |
59 | 58 | ||
60 | struct cpufreq_freqs freqs; | ||
61 | u8 clockspeed_reg; | 59 | u8 clockspeed_reg; |
62 | 60 | ||
63 | freqs.old = sc520_freq_get_cpu_frequency(0); | ||
64 | freqs.new = sc520_freq_table[state].frequency; | ||
65 | |||
66 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
67 | |||
68 | pr_debug("attempting to set frequency to %i kHz\n", | ||
69 | sc520_freq_table[state].frequency); | ||
70 | |||
71 | local_irq_disable(); | 61 | local_irq_disable(); |
72 | 62 | ||
73 | clockspeed_reg = *cpuctl & ~0x03; | 63 | clockspeed_reg = *cpuctl & ~0x03; |
@@ -75,30 +65,9 @@ static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy, | |||
75 | 65 | ||
76 | local_irq_enable(); | 66 | local_irq_enable(); |
77 | 67 | ||
78 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
79 | }; | ||
80 | |||
81 | static int sc520_freq_verify(struct cpufreq_policy *policy) | ||
82 | { | ||
83 | return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]); | ||
84 | } | ||
85 | |||
86 | static int sc520_freq_target(struct cpufreq_policy *policy, | ||
87 | unsigned int target_freq, | ||
88 | unsigned int relation) | ||
89 | { | ||
90 | unsigned int newstate = 0; | ||
91 | |||
92 | if (cpufreq_frequency_table_target(policy, sc520_freq_table, | ||
93 | target_freq, relation, &newstate)) | ||
94 | return -EINVAL; | ||
95 | |||
96 | sc520_freq_set_cpu_state(policy, newstate); | ||
97 | |||
98 | return 0; | 68 | return 0; |
99 | } | 69 | } |
100 | 70 | ||
101 | |||
102 | /* | 71 | /* |
103 | * Module init and exit code | 72 | * Module init and exit code |
104 | */ | 73 | */ |
@@ -106,7 +75,6 @@ static int sc520_freq_target(struct cpufreq_policy *policy, | |||
106 | static int sc520_freq_cpu_init(struct cpufreq_policy *policy) | 75 | static int sc520_freq_cpu_init(struct cpufreq_policy *policy) |
107 | { | 76 | { |
108 | struct cpuinfo_x86 *c = &cpu_data(0); | 77 | struct cpuinfo_x86 *c = &cpu_data(0); |
109 | int result; | ||
110 | 78 | ||
111 | /* capability check */ | 79 | /* capability check */ |
112 | if (c->x86_vendor != X86_VENDOR_AMD || | 80 | if (c->x86_vendor != X86_VENDOR_AMD || |
@@ -115,39 +83,19 @@ static int sc520_freq_cpu_init(struct cpufreq_policy *policy) | |||
115 | 83 | ||
116 | /* cpuinfo and default policy values */ | 84 | /* cpuinfo and default policy values */ |
117 | policy->cpuinfo.transition_latency = 1000000; /* 1ms */ | 85 | policy->cpuinfo.transition_latency = 1000000; /* 1ms */ |
118 | policy->cur = sc520_freq_get_cpu_frequency(0); | ||
119 | |||
120 | result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table); | ||
121 | if (result) | ||
122 | return result; | ||
123 | |||
124 | cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu); | ||
125 | |||
126 | return 0; | ||
127 | } | ||
128 | 86 | ||
129 | 87 | return cpufreq_table_validate_and_show(policy, sc520_freq_table); | |
130 | static int sc520_freq_cpu_exit(struct cpufreq_policy *policy) | ||
131 | { | ||
132 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
133 | return 0; | ||
134 | } | 88 | } |
135 | 89 | ||
136 | 90 | ||
137 | static struct freq_attr *sc520_freq_attr[] = { | ||
138 | &cpufreq_freq_attr_scaling_available_freqs, | ||
139 | NULL, | ||
140 | }; | ||
141 | |||
142 | |||
143 | static struct cpufreq_driver sc520_freq_driver = { | 91 | static struct cpufreq_driver sc520_freq_driver = { |
144 | .get = sc520_freq_get_cpu_frequency, | 92 | .get = sc520_freq_get_cpu_frequency, |
145 | .verify = sc520_freq_verify, | 93 | .verify = cpufreq_generic_frequency_table_verify, |
146 | .target = sc520_freq_target, | 94 | .target_index = sc520_freq_target, |
147 | .init = sc520_freq_cpu_init, | 95 | .init = sc520_freq_cpu_init, |
148 | .exit = sc520_freq_cpu_exit, | 96 | .exit = cpufreq_generic_exit, |
149 | .name = "sc520_freq", | 97 | .name = "sc520_freq", |
150 | .attr = sc520_freq_attr, | 98 | .attr = cpufreq_generic_attr, |
151 | }; | 99 | }; |
152 | 100 | ||
153 | static const struct x86_cpu_id sc520_ids[] = { | 101 | static const struct x86_cpu_id sc520_ids[] = { |
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c index ffc6d24b0cfb..387af12503a6 100644 --- a/drivers/cpufreq/sh-cpufreq.c +++ b/drivers/cpufreq/sh-cpufreq.c | |||
@@ -87,15 +87,12 @@ static int sh_cpufreq_verify(struct cpufreq_policy *policy) | |||
87 | if (freq_table) | 87 | if (freq_table) |
88 | return cpufreq_frequency_table_verify(policy, freq_table); | 88 | return cpufreq_frequency_table_verify(policy, freq_table); |
89 | 89 | ||
90 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | 90 | cpufreq_verify_within_cpu_limits(policy); |
91 | policy->cpuinfo.max_freq); | ||
92 | 91 | ||
93 | policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000; | 92 | policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000; |
94 | policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; | 93 | policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; |
95 | 94 | ||
96 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | 95 | cpufreq_verify_within_cpu_limits(policy); |
97 | policy->cpuinfo.max_freq); | ||
98 | |||
99 | return 0; | 96 | return 0; |
100 | } | 97 | } |
101 | 98 | ||
@@ -114,15 +111,13 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
114 | return PTR_ERR(cpuclk); | 111 | return PTR_ERR(cpuclk); |
115 | } | 112 | } |
116 | 113 | ||
117 | policy->cur = sh_cpufreq_get(cpu); | ||
118 | |||
119 | freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL; | 114 | freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL; |
120 | if (freq_table) { | 115 | if (freq_table) { |
121 | int result; | 116 | int result; |
122 | 117 | ||
123 | result = cpufreq_frequency_table_cpuinfo(policy, freq_table); | 118 | result = cpufreq_table_validate_and_show(policy, freq_table); |
124 | if (!result) | 119 | if (result) |
125 | cpufreq_frequency_table_get_attr(freq_table, cpu); | 120 | return result; |
126 | } else { | 121 | } else { |
127 | dev_notice(dev, "no frequency table found, falling back " | 122 | dev_notice(dev, "no frequency table found, falling back " |
128 | "to rate rounding.\n"); | 123 | "to rate rounding.\n"); |
@@ -154,11 +149,6 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |||
154 | return 0; | 149 | return 0; |
155 | } | 150 | } |
156 | 151 | ||
157 | static struct freq_attr *sh_freq_attr[] = { | ||
158 | &cpufreq_freq_attr_scaling_available_freqs, | ||
159 | NULL, | ||
160 | }; | ||
161 | |||
162 | static struct cpufreq_driver sh_cpufreq_driver = { | 152 | static struct cpufreq_driver sh_cpufreq_driver = { |
163 | .name = "sh", | 153 | .name = "sh", |
164 | .get = sh_cpufreq_get, | 154 | .get = sh_cpufreq_get, |
@@ -166,7 +156,7 @@ static struct cpufreq_driver sh_cpufreq_driver = { | |||
166 | .verify = sh_cpufreq_verify, | 156 | .verify = sh_cpufreq_verify, |
167 | .init = sh_cpufreq_cpu_init, | 157 | .init = sh_cpufreq_cpu_init, |
168 | .exit = sh_cpufreq_cpu_exit, | 158 | .exit = sh_cpufreq_cpu_exit, |
169 | .attr = sh_freq_attr, | 159 | .attr = cpufreq_generic_attr, |
170 | }; | 160 | }; |
171 | 161 | ||
172 | static int __init sh_cpufreq_module_init(void) | 162 | static int __init sh_cpufreq_module_init(void) |
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c index cf5bc2ca16fa..62aa23e219d4 100644 --- a/drivers/cpufreq/sparc-us2e-cpufreq.c +++ b/drivers/cpufreq/sparc-us2e-cpufreq.c | |||
@@ -245,14 +245,12 @@ static unsigned int us2e_freq_get(unsigned int cpu) | |||
245 | return clock_tick / estar_to_divisor(estar); | 245 | return clock_tick / estar_to_divisor(estar); |
246 | } | 246 | } |
247 | 247 | ||
248 | static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy, | 248 | static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index) |
249 | unsigned int index) | ||
250 | { | 249 | { |
251 | unsigned int cpu = policy->cpu; | 250 | unsigned int cpu = policy->cpu; |
252 | unsigned long new_bits, new_freq; | 251 | unsigned long new_bits, new_freq; |
253 | unsigned long clock_tick, divisor, old_divisor, estar; | 252 | unsigned long clock_tick, divisor, old_divisor, estar; |
254 | cpumask_t cpus_allowed; | 253 | cpumask_t cpus_allowed; |
255 | struct cpufreq_freqs freqs; | ||
256 | 254 | ||
257 | cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); | 255 | cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); |
258 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | 256 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); |
@@ -266,41 +264,15 @@ static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy, | |||
266 | 264 | ||
267 | old_divisor = estar_to_divisor(estar); | 265 | old_divisor = estar_to_divisor(estar); |
268 | 266 | ||
269 | freqs.old = clock_tick / old_divisor; | ||
270 | freqs.new = new_freq; | ||
271 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
272 | |||
273 | if (old_divisor != divisor) | 267 | if (old_divisor != divisor) |
274 | us2e_transition(estar, new_bits, clock_tick * 1000, | 268 | us2e_transition(estar, new_bits, clock_tick * 1000, |
275 | old_divisor, divisor); | 269 | old_divisor, divisor); |
276 | 270 | ||
277 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
278 | |||
279 | set_cpus_allowed_ptr(current, &cpus_allowed); | 271 | set_cpus_allowed_ptr(current, &cpus_allowed); |
280 | } | ||
281 | |||
282 | static int us2e_freq_target(struct cpufreq_policy *policy, | ||
283 | unsigned int target_freq, | ||
284 | unsigned int relation) | ||
285 | { | ||
286 | unsigned int new_index = 0; | ||
287 | |||
288 | if (cpufreq_frequency_table_target(policy, | ||
289 | &us2e_freq_table[policy->cpu].table[0], | ||
290 | target_freq, relation, &new_index)) | ||
291 | return -EINVAL; | ||
292 | |||
293 | us2e_set_cpu_divider_index(policy, new_index); | ||
294 | 272 | ||
295 | return 0; | 273 | return 0; |
296 | } | 274 | } |
297 | 275 | ||
298 | static int us2e_freq_verify(struct cpufreq_policy *policy) | ||
299 | { | ||
300 | return cpufreq_frequency_table_verify(policy, | ||
301 | &us2e_freq_table[policy->cpu].table[0]); | ||
302 | } | ||
303 | |||
304 | static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) | 276 | static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) |
305 | { | 277 | { |
306 | unsigned int cpu = policy->cpu; | 278 | unsigned int cpu = policy->cpu; |
@@ -324,13 +296,15 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) | |||
324 | policy->cpuinfo.transition_latency = 0; | 296 | policy->cpuinfo.transition_latency = 0; |
325 | policy->cur = clock_tick; | 297 | policy->cur = clock_tick; |
326 | 298 | ||
327 | return cpufreq_frequency_table_cpuinfo(policy, table); | 299 | return cpufreq_table_validate_and_show(policy, table); |
328 | } | 300 | } |
329 | 301 | ||
330 | static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) | 302 | static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) |
331 | { | 303 | { |
332 | if (cpufreq_us2e_driver) | 304 | if (cpufreq_us2e_driver) { |
333 | us2e_set_cpu_divider_index(policy, 0); | 305 | cpufreq_frequency_table_put_attr(policy->cpu); |
306 | us2e_freq_target(policy, 0); | ||
307 | } | ||
334 | 308 | ||
335 | return 0; | 309 | return 0; |
336 | } | 310 | } |
@@ -361,8 +335,8 @@ static int __init us2e_freq_init(void) | |||
361 | goto err_out; | 335 | goto err_out; |
362 | 336 | ||
363 | driver->init = us2e_freq_cpu_init; | 337 | driver->init = us2e_freq_cpu_init; |
364 | driver->verify = us2e_freq_verify; | 338 | driver->verify = cpufreq_generic_frequency_table_verify; |
365 | driver->target = us2e_freq_target; | 339 | driver->target_index = us2e_freq_target; |
366 | driver->get = us2e_freq_get; | 340 | driver->get = us2e_freq_get; |
367 | driver->exit = us2e_freq_cpu_exit; | 341 | driver->exit = us2e_freq_cpu_exit; |
368 | strcpy(driver->name, "UltraSPARC-IIe"); | 342 | strcpy(driver->name, "UltraSPARC-IIe"); |
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c index ac76b489979d..724ffbd7105d 100644 --- a/drivers/cpufreq/sparc-us3-cpufreq.c +++ b/drivers/cpufreq/sparc-us3-cpufreq.c | |||
@@ -93,13 +93,11 @@ static unsigned int us3_freq_get(unsigned int cpu) | |||
93 | return ret; | 93 | return ret; |
94 | } | 94 | } |
95 | 95 | ||
96 | static void us3_set_cpu_divider_index(struct cpufreq_policy *policy, | 96 | static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index) |
97 | unsigned int index) | ||
98 | { | 97 | { |
99 | unsigned int cpu = policy->cpu; | 98 | unsigned int cpu = policy->cpu; |
100 | unsigned long new_bits, new_freq, reg; | 99 | unsigned long new_bits, new_freq, reg; |
101 | cpumask_t cpus_allowed; | 100 | cpumask_t cpus_allowed; |
102 | struct cpufreq_freqs freqs; | ||
103 | 101 | ||
104 | cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); | 102 | cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); |
105 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | 103 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); |
@@ -125,43 +123,15 @@ static void us3_set_cpu_divider_index(struct cpufreq_policy *policy, | |||
125 | 123 | ||
126 | reg = read_safari_cfg(); | 124 | reg = read_safari_cfg(); |
127 | 125 | ||
128 | freqs.old = get_current_freq(cpu, reg); | ||
129 | freqs.new = new_freq; | ||
130 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
131 | |||
132 | reg &= ~SAFARI_CFG_DIV_MASK; | 126 | reg &= ~SAFARI_CFG_DIV_MASK; |
133 | reg |= new_bits; | 127 | reg |= new_bits; |
134 | write_safari_cfg(reg); | 128 | write_safari_cfg(reg); |
135 | 129 | ||
136 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
137 | |||
138 | set_cpus_allowed_ptr(current, &cpus_allowed); | 130 | set_cpus_allowed_ptr(current, &cpus_allowed); |
139 | } | ||
140 | |||
141 | static int us3_freq_target(struct cpufreq_policy *policy, | ||
142 | unsigned int target_freq, | ||
143 | unsigned int relation) | ||
144 | { | ||
145 | unsigned int new_index = 0; | ||
146 | |||
147 | if (cpufreq_frequency_table_target(policy, | ||
148 | &us3_freq_table[policy->cpu].table[0], | ||
149 | target_freq, | ||
150 | relation, | ||
151 | &new_index)) | ||
152 | return -EINVAL; | ||
153 | |||
154 | us3_set_cpu_divider_index(policy, new_index); | ||
155 | 131 | ||
156 | return 0; | 132 | return 0; |
157 | } | 133 | } |
158 | 134 | ||
159 | static int us3_freq_verify(struct cpufreq_policy *policy) | ||
160 | { | ||
161 | return cpufreq_frequency_table_verify(policy, | ||
162 | &us3_freq_table[policy->cpu].table[0]); | ||
163 | } | ||
164 | |||
165 | static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) | 135 | static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) |
166 | { | 136 | { |
167 | unsigned int cpu = policy->cpu; | 137 | unsigned int cpu = policy->cpu; |
@@ -181,13 +151,15 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) | |||
181 | policy->cpuinfo.transition_latency = 0; | 151 | policy->cpuinfo.transition_latency = 0; |
182 | policy->cur = clock_tick; | 152 | policy->cur = clock_tick; |
183 | 153 | ||
184 | return cpufreq_frequency_table_cpuinfo(policy, table); | 154 | return cpufreq_table_validate_and_show(policy, table); |
185 | } | 155 | } |
186 | 156 | ||
187 | static int us3_freq_cpu_exit(struct cpufreq_policy *policy) | 157 | static int us3_freq_cpu_exit(struct cpufreq_policy *policy) |
188 | { | 158 | { |
189 | if (cpufreq_us3_driver) | 159 | if (cpufreq_us3_driver) { |
190 | us3_set_cpu_divider_index(policy, 0); | 160 | cpufreq_frequency_table_put_attr(policy->cpu); |
161 | us3_freq_target(policy, 0); | ||
162 | } | ||
191 | 163 | ||
192 | return 0; | 164 | return 0; |
193 | } | 165 | } |
@@ -222,8 +194,8 @@ static int __init us3_freq_init(void) | |||
222 | goto err_out; | 194 | goto err_out; |
223 | 195 | ||
224 | driver->init = us3_freq_cpu_init; | 196 | driver->init = us3_freq_cpu_init; |
225 | driver->verify = us3_freq_verify; | 197 | driver->verify = cpufreq_generic_frequency_table_verify; |
226 | driver->target = us3_freq_target; | 198 | driver->target_index = us3_freq_target; |
227 | driver->get = us3_freq_get; | 199 | driver->get = us3_freq_get; |
228 | driver->exit = us3_freq_cpu_exit; | 200 | driver->exit = us3_freq_cpu_exit; |
229 | strcpy(driver->name, "UltraSPARC-III"); | 201 | strcpy(driver->name, "UltraSPARC-III"); |
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c index 3f418166ce02..d02ccd19c9c4 100644 --- a/drivers/cpufreq/spear-cpufreq.c +++ b/drivers/cpufreq/spear-cpufreq.c | |||
@@ -30,11 +30,6 @@ static struct { | |||
30 | u32 cnt; | 30 | u32 cnt; |
31 | } spear_cpufreq; | 31 | } spear_cpufreq; |
32 | 32 | ||
33 | static int spear_cpufreq_verify(struct cpufreq_policy *policy) | ||
34 | { | ||
35 | return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl); | ||
36 | } | ||
37 | |||
38 | static unsigned int spear_cpufreq_get(unsigned int cpu) | 33 | static unsigned int spear_cpufreq_get(unsigned int cpu) |
39 | { | 34 | { |
40 | return clk_get_rate(spear_cpufreq.clk) / 1000; | 35 | return clk_get_rate(spear_cpufreq.clk) / 1000; |
@@ -110,20 +105,14 @@ static int spear1340_set_cpu_rate(struct clk *sys_pclk, unsigned long newfreq) | |||
110 | } | 105 | } |
111 | 106 | ||
112 | static int spear_cpufreq_target(struct cpufreq_policy *policy, | 107 | static int spear_cpufreq_target(struct cpufreq_policy *policy, |
113 | unsigned int target_freq, unsigned int relation) | 108 | unsigned int index) |
114 | { | 109 | { |
115 | struct cpufreq_freqs freqs; | ||
116 | long newfreq; | 110 | long newfreq; |
117 | struct clk *srcclk; | 111 | struct clk *srcclk; |
118 | int index, ret, mult = 1; | 112 | int ret, mult = 1; |
119 | |||
120 | if (cpufreq_frequency_table_target(policy, spear_cpufreq.freq_tbl, | ||
121 | target_freq, relation, &index)) | ||
122 | return -EINVAL; | ||
123 | |||
124 | freqs.old = spear_cpufreq_get(0); | ||
125 | 113 | ||
126 | newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000; | 114 | newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000; |
115 | |||
127 | if (of_machine_is_compatible("st,spear1340")) { | 116 | if (of_machine_is_compatible("st,spear1340")) { |
128 | /* | 117 | /* |
129 | * SPEAr1340 is special in the sense that due to the possibility | 118 | * SPEAr1340 is special in the sense that due to the possibility |
@@ -154,65 +143,32 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy, | |||
154 | return newfreq; | 143 | return newfreq; |
155 | } | 144 | } |
156 | 145 | ||
157 | freqs.new = newfreq / 1000; | ||
158 | freqs.new /= mult; | ||
159 | |||
160 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
161 | |||
162 | if (mult == 2) | 146 | if (mult == 2) |
163 | ret = spear1340_set_cpu_rate(srcclk, newfreq); | 147 | ret = spear1340_set_cpu_rate(srcclk, newfreq); |
164 | else | 148 | else |
165 | ret = clk_set_rate(spear_cpufreq.clk, newfreq); | 149 | ret = clk_set_rate(spear_cpufreq.clk, newfreq); |
166 | 150 | ||
167 | /* Get current rate after clk_set_rate, in case of failure */ | 151 | if (ret) |
168 | if (ret) { | ||
169 | pr_err("CPU Freq: cpu clk_set_rate failed: %d\n", ret); | 152 | pr_err("CPU Freq: cpu clk_set_rate failed: %d\n", ret); |
170 | freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000; | ||
171 | } | ||
172 | 153 | ||
173 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
174 | return ret; | 154 | return ret; |
175 | } | 155 | } |
176 | 156 | ||
177 | static int spear_cpufreq_init(struct cpufreq_policy *policy) | 157 | static int spear_cpufreq_init(struct cpufreq_policy *policy) |
178 | { | 158 | { |
179 | int ret; | 159 | return cpufreq_generic_init(policy, spear_cpufreq.freq_tbl, |
180 | 160 | spear_cpufreq.transition_latency); | |
181 | ret = cpufreq_frequency_table_cpuinfo(policy, spear_cpufreq.freq_tbl); | ||
182 | if (ret) { | ||
183 | pr_err("cpufreq_frequency_table_cpuinfo() failed"); | ||
184 | return ret; | ||
185 | } | ||
186 | |||
187 | cpufreq_frequency_table_get_attr(spear_cpufreq.freq_tbl, policy->cpu); | ||
188 | policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency; | ||
189 | policy->cur = spear_cpufreq_get(0); | ||
190 | |||
191 | cpumask_setall(policy->cpus); | ||
192 | |||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | static int spear_cpufreq_exit(struct cpufreq_policy *policy) | ||
197 | { | ||
198 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
199 | return 0; | ||
200 | } | 161 | } |
201 | 162 | ||
202 | static struct freq_attr *spear_cpufreq_attr[] = { | ||
203 | &cpufreq_freq_attr_scaling_available_freqs, | ||
204 | NULL, | ||
205 | }; | ||
206 | |||
207 | static struct cpufreq_driver spear_cpufreq_driver = { | 163 | static struct cpufreq_driver spear_cpufreq_driver = { |
208 | .name = "cpufreq-spear", | 164 | .name = "cpufreq-spear", |
209 | .flags = CPUFREQ_STICKY, | 165 | .flags = CPUFREQ_STICKY, |
210 | .verify = spear_cpufreq_verify, | 166 | .verify = cpufreq_generic_frequency_table_verify, |
211 | .target = spear_cpufreq_target, | 167 | .target_index = spear_cpufreq_target, |
212 | .get = spear_cpufreq_get, | 168 | .get = spear_cpufreq_get, |
213 | .init = spear_cpufreq_init, | 169 | .init = spear_cpufreq_init, |
214 | .exit = spear_cpufreq_exit, | 170 | .exit = cpufreq_generic_exit, |
215 | .attr = spear_cpufreq_attr, | 171 | .attr = cpufreq_generic_attr, |
216 | }; | 172 | }; |
217 | 173 | ||
218 | static int spear_cpufreq_driver_init(void) | 174 | static int spear_cpufreq_driver_init(void) |
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c index f897d5105842..4e1daca5ce3b 100644 --- a/drivers/cpufreq/speedstep-centrino.c +++ b/drivers/cpufreq/speedstep-centrino.c | |||
@@ -343,9 +343,7 @@ static unsigned int get_cur_freq(unsigned int cpu) | |||
343 | static int centrino_cpu_init(struct cpufreq_policy *policy) | 343 | static int centrino_cpu_init(struct cpufreq_policy *policy) |
344 | { | 344 | { |
345 | struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu); | 345 | struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu); |
346 | unsigned freq; | ||
347 | unsigned l, h; | 346 | unsigned l, h; |
348 | int ret; | ||
349 | int i; | 347 | int i; |
350 | 348 | ||
351 | /* Only Intel makes Enhanced Speedstep-capable CPUs */ | 349 | /* Only Intel makes Enhanced Speedstep-capable CPUs */ |
@@ -373,9 +371,8 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
373 | return -ENODEV; | 371 | return -ENODEV; |
374 | } | 372 | } |
375 | 373 | ||
376 | if (centrino_cpu_init_table(policy)) { | 374 | if (centrino_cpu_init_table(policy)) |
377 | return -ENODEV; | 375 | return -ENODEV; |
378 | } | ||
379 | 376 | ||
380 | /* Check to see if Enhanced SpeedStep is enabled, and try to | 377 | /* Check to see if Enhanced SpeedStep is enabled, and try to |
381 | enable it if not. */ | 378 | enable it if not. */ |
@@ -395,22 +392,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
395 | } | 392 | } |
396 | } | 393 | } |
397 | 394 | ||
398 | freq = get_cur_freq(policy->cpu); | ||
399 | policy->cpuinfo.transition_latency = 10000; | 395 | policy->cpuinfo.transition_latency = 10000; |
400 | /* 10uS transition latency */ | 396 | /* 10uS transition latency */ |
401 | policy->cur = freq; | ||
402 | |||
403 | pr_debug("centrino_cpu_init: cur=%dkHz\n", policy->cur); | ||
404 | 397 | ||
405 | ret = cpufreq_frequency_table_cpuinfo(policy, | 398 | return cpufreq_table_validate_and_show(policy, |
406 | per_cpu(centrino_model, policy->cpu)->op_points); | 399 | per_cpu(centrino_model, policy->cpu)->op_points); |
407 | if (ret) | ||
408 | return (ret); | ||
409 | |||
410 | cpufreq_frequency_table_get_attr( | ||
411 | per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu); | ||
412 | |||
413 | return 0; | ||
414 | } | 400 | } |
415 | 401 | ||
416 | static int centrino_cpu_exit(struct cpufreq_policy *policy) | 402 | static int centrino_cpu_exit(struct cpufreq_policy *policy) |
@@ -428,36 +414,18 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy) | |||
428 | } | 414 | } |
429 | 415 | ||
430 | /** | 416 | /** |
431 | * centrino_verify - verifies a new CPUFreq policy | ||
432 | * @policy: new policy | ||
433 | * | ||
434 | * Limit must be within this model's frequency range at least one | ||
435 | * border included. | ||
436 | */ | ||
437 | static int centrino_verify (struct cpufreq_policy *policy) | ||
438 | { | ||
439 | return cpufreq_frequency_table_verify(policy, | ||
440 | per_cpu(centrino_model, policy->cpu)->op_points); | ||
441 | } | ||
442 | |||
443 | /** | ||
444 | * centrino_setpolicy - set a new CPUFreq policy | 417 | * centrino_setpolicy - set a new CPUFreq policy |
445 | * @policy: new policy | 418 | * @policy: new policy |
446 | * @target_freq: the target frequency | 419 | * @index: index of target frequency |
447 | * @relation: how that frequency relates to achieved frequency | ||
448 | * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) | ||
449 | * | 420 | * |
450 | * Sets a new CPUFreq policy. | 421 | * Sets a new CPUFreq policy. |
451 | */ | 422 | */ |
452 | static int centrino_target (struct cpufreq_policy *policy, | 423 | static int centrino_target(struct cpufreq_policy *policy, unsigned int index) |
453 | unsigned int target_freq, | ||
454 | unsigned int relation) | ||
455 | { | 424 | { |
456 | unsigned int newstate = 0; | ||
457 | unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; | 425 | unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; |
458 | struct cpufreq_freqs freqs; | ||
459 | int retval = 0; | 426 | int retval = 0; |
460 | unsigned int j, first_cpu, tmp; | 427 | unsigned int j, first_cpu; |
428 | struct cpufreq_frequency_table *op_points; | ||
461 | cpumask_var_t covered_cpus; | 429 | cpumask_var_t covered_cpus; |
462 | 430 | ||
463 | if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) | 431 | if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) |
@@ -468,16 +436,8 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
468 | goto out; | 436 | goto out; |
469 | } | 437 | } |
470 | 438 | ||
471 | if (unlikely(cpufreq_frequency_table_target(policy, | ||
472 | per_cpu(centrino_model, cpu)->op_points, | ||
473 | target_freq, | ||
474 | relation, | ||
475 | &newstate))) { | ||
476 | retval = -EINVAL; | ||
477 | goto out; | ||
478 | } | ||
479 | |||
480 | first_cpu = 1; | 439 | first_cpu = 1; |
440 | op_points = &per_cpu(centrino_model, cpu)->op_points[index]; | ||
481 | for_each_cpu(j, policy->cpus) { | 441 | for_each_cpu(j, policy->cpus) { |
482 | int good_cpu; | 442 | int good_cpu; |
483 | 443 | ||
@@ -501,7 +461,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
501 | break; | 461 | break; |
502 | } | 462 | } |
503 | 463 | ||
504 | msr = per_cpu(centrino_model, cpu)->op_points[newstate].driver_data; | 464 | msr = op_points->driver_data; |
505 | 465 | ||
506 | if (first_cpu) { | 466 | if (first_cpu) { |
507 | rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); | 467 | rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); |
@@ -512,15 +472,6 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
512 | goto out; | 472 | goto out; |
513 | } | 473 | } |
514 | 474 | ||
515 | freqs.old = extract_clock(oldmsr, cpu, 0); | ||
516 | freqs.new = extract_clock(msr, cpu, 0); | ||
517 | |||
518 | pr_debug("target=%dkHz old=%d new=%d msr=%04x\n", | ||
519 | target_freq, freqs.old, freqs.new, msr); | ||
520 | |||
521 | cpufreq_notify_transition(policy, &freqs, | ||
522 | CPUFREQ_PRECHANGE); | ||
523 | |||
524 | first_cpu = 0; | 475 | first_cpu = 0; |
525 | /* all but 16 LSB are reserved, treat them with care */ | 476 | /* all but 16 LSB are reserved, treat them with care */ |
526 | oldmsr &= ~0xffff; | 477 | oldmsr &= ~0xffff; |
@@ -535,8 +486,6 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
535 | cpumask_set_cpu(j, covered_cpus); | 486 | cpumask_set_cpu(j, covered_cpus); |
536 | } | 487 | } |
537 | 488 | ||
538 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
539 | |||
540 | if (unlikely(retval)) { | 489 | if (unlikely(retval)) { |
541 | /* | 490 | /* |
542 | * We have failed halfway through the frequency change. | 491 | * We have failed halfway through the frequency change. |
@@ -547,12 +496,6 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
547 | 496 | ||
548 | for_each_cpu(j, covered_cpus) | 497 | for_each_cpu(j, covered_cpus) |
549 | wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h); | 498 | wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h); |
550 | |||
551 | tmp = freqs.new; | ||
552 | freqs.new = freqs.old; | ||
553 | freqs.old = tmp; | ||
554 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
555 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
556 | } | 499 | } |
557 | retval = 0; | 500 | retval = 0; |
558 | 501 | ||
@@ -561,20 +504,15 @@ out: | |||
561 | return retval; | 504 | return retval; |
562 | } | 505 | } |
563 | 506 | ||
564 | static struct freq_attr* centrino_attr[] = { | ||
565 | &cpufreq_freq_attr_scaling_available_freqs, | ||
566 | NULL, | ||
567 | }; | ||
568 | |||
569 | static struct cpufreq_driver centrino_driver = { | 507 | static struct cpufreq_driver centrino_driver = { |
570 | .name = "centrino", /* should be speedstep-centrino, | 508 | .name = "centrino", /* should be speedstep-centrino, |
571 | but there's a 16 char limit */ | 509 | but there's a 16 char limit */ |
572 | .init = centrino_cpu_init, | 510 | .init = centrino_cpu_init, |
573 | .exit = centrino_cpu_exit, | 511 | .exit = centrino_cpu_exit, |
574 | .verify = centrino_verify, | 512 | .verify = cpufreq_generic_frequency_table_verify, |
575 | .target = centrino_target, | 513 | .target_index = centrino_target, |
576 | .get = get_cur_freq, | 514 | .get = get_cur_freq, |
577 | .attr = centrino_attr, | 515 | .attr = cpufreq_generic_attr, |
578 | }; | 516 | }; |
579 | 517 | ||
580 | /* | 518 | /* |
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c index 5355abb69afc..7639b2be2a90 100644 --- a/drivers/cpufreq/speedstep-ich.c +++ b/drivers/cpufreq/speedstep-ich.c | |||
@@ -251,56 +251,23 @@ static unsigned int speedstep_get(unsigned int cpu) | |||
251 | /** | 251 | /** |
252 | * speedstep_target - set a new CPUFreq policy | 252 | * speedstep_target - set a new CPUFreq policy |
253 | * @policy: new policy | 253 | * @policy: new policy |
254 | * @target_freq: the target frequency | 254 | * @index: index of target frequency |
255 | * @relation: how that frequency relates to achieved frequency | ||
256 | * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) | ||
257 | * | 255 | * |
258 | * Sets a new CPUFreq policy. | 256 | * Sets a new CPUFreq policy. |
259 | */ | 257 | */ |
260 | static int speedstep_target(struct cpufreq_policy *policy, | 258 | static int speedstep_target(struct cpufreq_policy *policy, unsigned int index) |
261 | unsigned int target_freq, | ||
262 | unsigned int relation) | ||
263 | { | 259 | { |
264 | unsigned int newstate = 0, policy_cpu; | 260 | unsigned int policy_cpu; |
265 | struct cpufreq_freqs freqs; | ||
266 | |||
267 | if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], | ||
268 | target_freq, relation, &newstate)) | ||
269 | return -EINVAL; | ||
270 | 261 | ||
271 | policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); | 262 | policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); |
272 | freqs.old = speedstep_get(policy_cpu); | ||
273 | freqs.new = speedstep_freqs[newstate].frequency; | ||
274 | |||
275 | pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new); | ||
276 | |||
277 | /* no transition necessary */ | ||
278 | if (freqs.old == freqs.new) | ||
279 | return 0; | ||
280 | 263 | ||
281 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 264 | smp_call_function_single(policy_cpu, _speedstep_set_state, &index, |
282 | |||
283 | smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate, | ||
284 | true); | 265 | true); |
285 | 266 | ||
286 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
287 | |||
288 | return 0; | 267 | return 0; |
289 | } | 268 | } |
290 | 269 | ||
291 | 270 | ||
292 | /** | ||
293 | * speedstep_verify - verifies a new CPUFreq policy | ||
294 | * @policy: new policy | ||
295 | * | ||
296 | * Limit must be within speedstep_low_freq and speedstep_high_freq, with | ||
297 | * at least one border included. | ||
298 | */ | ||
299 | static int speedstep_verify(struct cpufreq_policy *policy) | ||
300 | { | ||
301 | return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); | ||
302 | } | ||
303 | |||
304 | struct get_freqs { | 271 | struct get_freqs { |
305 | struct cpufreq_policy *policy; | 272 | struct cpufreq_policy *policy; |
306 | int ret; | 273 | int ret; |
@@ -320,8 +287,7 @@ static void get_freqs_on_cpu(void *_get_freqs) | |||
320 | 287 | ||
321 | static int speedstep_cpu_init(struct cpufreq_policy *policy) | 288 | static int speedstep_cpu_init(struct cpufreq_policy *policy) |
322 | { | 289 | { |
323 | int result; | 290 | unsigned int policy_cpu; |
324 | unsigned int policy_cpu, speed; | ||
325 | struct get_freqs gf; | 291 | struct get_freqs gf; |
326 | 292 | ||
327 | /* only run on CPU to be set, or on its sibling */ | 293 | /* only run on CPU to be set, or on its sibling */ |
@@ -336,49 +302,18 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
336 | if (gf.ret) | 302 | if (gf.ret) |
337 | return gf.ret; | 303 | return gf.ret; |
338 | 304 | ||
339 | /* get current speed setting */ | 305 | return cpufreq_table_validate_and_show(policy, speedstep_freqs); |
340 | speed = speedstep_get(policy_cpu); | ||
341 | if (!speed) | ||
342 | return -EIO; | ||
343 | |||
344 | pr_debug("currently at %s speed setting - %i MHz\n", | ||
345 | (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) | ||
346 | ? "low" : "high", | ||
347 | (speed / 1000)); | ||
348 | |||
349 | /* cpuinfo and default policy values */ | ||
350 | policy->cur = speed; | ||
351 | |||
352 | result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); | ||
353 | if (result) | ||
354 | return result; | ||
355 | |||
356 | cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); | ||
357 | |||
358 | return 0; | ||
359 | } | 306 | } |
360 | 307 | ||
361 | 308 | ||
362 | static int speedstep_cpu_exit(struct cpufreq_policy *policy) | ||
363 | { | ||
364 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
365 | return 0; | ||
366 | } | ||
367 | |||
368 | static struct freq_attr *speedstep_attr[] = { | ||
369 | &cpufreq_freq_attr_scaling_available_freqs, | ||
370 | NULL, | ||
371 | }; | ||
372 | |||
373 | |||
374 | static struct cpufreq_driver speedstep_driver = { | 309 | static struct cpufreq_driver speedstep_driver = { |
375 | .name = "speedstep-ich", | 310 | .name = "speedstep-ich", |
376 | .verify = speedstep_verify, | 311 | .verify = cpufreq_generic_frequency_table_verify, |
377 | .target = speedstep_target, | 312 | .target_index = speedstep_target, |
378 | .init = speedstep_cpu_init, | 313 | .init = speedstep_cpu_init, |
379 | .exit = speedstep_cpu_exit, | 314 | .exit = cpufreq_generic_exit, |
380 | .get = speedstep_get, | 315 | .get = speedstep_get, |
381 | .attr = speedstep_attr, | 316 | .attr = cpufreq_generic_attr, |
382 | }; | 317 | }; |
383 | 318 | ||
384 | static const struct x86_cpu_id ss_smi_ids[] = { | 319 | static const struct x86_cpu_id ss_smi_ids[] = { |
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c index abfba4f731eb..0f5326d6f79f 100644 --- a/drivers/cpufreq/speedstep-smi.c +++ b/drivers/cpufreq/speedstep-smi.c | |||
@@ -235,52 +235,21 @@ static void speedstep_set_state(unsigned int state) | |||
235 | /** | 235 | /** |
236 | * speedstep_target - set a new CPUFreq policy | 236 | * speedstep_target - set a new CPUFreq policy |
237 | * @policy: new policy | 237 | * @policy: new policy |
238 | * @target_freq: new freq | 238 | * @index: index of new freq |
239 | * @relation: | ||
240 | * | 239 | * |
241 | * Sets a new CPUFreq policy/freq. | 240 | * Sets a new CPUFreq policy/freq. |
242 | */ | 241 | */ |
243 | static int speedstep_target(struct cpufreq_policy *policy, | 242 | static int speedstep_target(struct cpufreq_policy *policy, unsigned int index) |
244 | unsigned int target_freq, unsigned int relation) | ||
245 | { | 243 | { |
246 | unsigned int newstate = 0; | 244 | speedstep_set_state(index); |
247 | struct cpufreq_freqs freqs; | ||
248 | |||
249 | if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], | ||
250 | target_freq, relation, &newstate)) | ||
251 | return -EINVAL; | ||
252 | |||
253 | freqs.old = speedstep_freqs[speedstep_get_state()].frequency; | ||
254 | freqs.new = speedstep_freqs[newstate].frequency; | ||
255 | |||
256 | if (freqs.old == freqs.new) | ||
257 | return 0; | ||
258 | |||
259 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | ||
260 | speedstep_set_state(newstate); | ||
261 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
262 | 245 | ||
263 | return 0; | 246 | return 0; |
264 | } | 247 | } |
265 | 248 | ||
266 | 249 | ||
267 | /** | ||
268 | * speedstep_verify - verifies a new CPUFreq policy | ||
269 | * @policy: new policy | ||
270 | * | ||
271 | * Limit must be within speedstep_low_freq and speedstep_high_freq, with | ||
272 | * at least one border included. | ||
273 | */ | ||
274 | static int speedstep_verify(struct cpufreq_policy *policy) | ||
275 | { | ||
276 | return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); | ||
277 | } | ||
278 | |||
279 | |||
280 | static int speedstep_cpu_init(struct cpufreq_policy *policy) | 250 | static int speedstep_cpu_init(struct cpufreq_policy *policy) |
281 | { | 251 | { |
282 | int result; | 252 | int result; |
283 | unsigned int speed, state; | ||
284 | unsigned int *low, *high; | 253 | unsigned int *low, *high; |
285 | 254 | ||
286 | /* capability check */ | 255 | /* capability check */ |
@@ -316,32 +285,8 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
316 | pr_debug("workaround worked.\n"); | 285 | pr_debug("workaround worked.\n"); |
317 | } | 286 | } |
318 | 287 | ||
319 | /* get current speed setting */ | ||
320 | state = speedstep_get_state(); | ||
321 | speed = speedstep_freqs[state].frequency; | ||
322 | |||
323 | pr_debug("currently at %s speed setting - %i MHz\n", | ||
324 | (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) | ||
325 | ? "low" : "high", | ||
326 | (speed / 1000)); | ||
327 | |||
328 | /* cpuinfo and default policy values */ | ||
329 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 288 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
330 | policy->cur = speed; | 289 | return cpufreq_table_validate_and_show(policy, speedstep_freqs); |
331 | |||
332 | result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); | ||
333 | if (result) | ||
334 | return result; | ||
335 | |||
336 | cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); | ||
337 | |||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | static int speedstep_cpu_exit(struct cpufreq_policy *policy) | ||
342 | { | ||
343 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
344 | return 0; | ||
345 | } | 290 | } |
346 | 291 | ||
347 | static unsigned int speedstep_get(unsigned int cpu) | 292 | static unsigned int speedstep_get(unsigned int cpu) |
@@ -362,20 +307,15 @@ static int speedstep_resume(struct cpufreq_policy *policy) | |||
362 | return result; | 307 | return result; |
363 | } | 308 | } |
364 | 309 | ||
365 | static struct freq_attr *speedstep_attr[] = { | ||
366 | &cpufreq_freq_attr_scaling_available_freqs, | ||
367 | NULL, | ||
368 | }; | ||
369 | |||
370 | static struct cpufreq_driver speedstep_driver = { | 310 | static struct cpufreq_driver speedstep_driver = { |
371 | .name = "speedstep-smi", | 311 | .name = "speedstep-smi", |
372 | .verify = speedstep_verify, | 312 | .verify = cpufreq_generic_frequency_table_verify, |
373 | .target = speedstep_target, | 313 | .target_index = speedstep_target, |
374 | .init = speedstep_cpu_init, | 314 | .init = speedstep_cpu_init, |
375 | .exit = speedstep_cpu_exit, | 315 | .exit = cpufreq_generic_exit, |
376 | .get = speedstep_get, | 316 | .get = speedstep_get, |
377 | .resume = speedstep_resume, | 317 | .resume = speedstep_resume, |
378 | .attr = speedstep_attr, | 318 | .attr = cpufreq_generic_attr, |
379 | }; | 319 | }; |
380 | 320 | ||
381 | static const struct x86_cpu_id ss_smi_ids[] = { | 321 | static const struct x86_cpu_id ss_smi_ids[] = { |
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c index a7b876fdc1d8..f42df7ec03c5 100644 --- a/drivers/cpufreq/tegra-cpufreq.c +++ b/drivers/cpufreq/tegra-cpufreq.c | |||
@@ -51,11 +51,6 @@ static unsigned long target_cpu_speed[NUM_CPUS]; | |||
51 | static DEFINE_MUTEX(tegra_cpu_lock); | 51 | static DEFINE_MUTEX(tegra_cpu_lock); |
52 | static bool is_suspended; | 52 | static bool is_suspended; |
53 | 53 | ||
54 | static int tegra_verify_speed(struct cpufreq_policy *policy) | ||
55 | { | ||
56 | return cpufreq_frequency_table_verify(policy, freq_table); | ||
57 | } | ||
58 | |||
59 | static unsigned int tegra_getspeed(unsigned int cpu) | 54 | static unsigned int tegra_getspeed(unsigned int cpu) |
60 | { | 55 | { |
61 | unsigned long rate; | 56 | unsigned long rate; |
@@ -107,12 +102,8 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy, | |||
107 | unsigned long rate) | 102 | unsigned long rate) |
108 | { | 103 | { |
109 | int ret = 0; | 104 | int ret = 0; |
110 | struct cpufreq_freqs freqs; | ||
111 | 105 | ||
112 | freqs.old = tegra_getspeed(0); | 106 | if (tegra_getspeed(0) == rate) |
113 | freqs.new = rate; | ||
114 | |||
115 | if (freqs.old == freqs.new) | ||
116 | return ret; | 107 | return ret; |
117 | 108 | ||
118 | /* | 109 | /* |
@@ -126,21 +117,10 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy, | |||
126 | else | 117 | else |
127 | clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ | 118 | clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ |
128 | 119 | ||
129 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 120 | ret = tegra_cpu_clk_set_rate(rate * 1000); |
130 | 121 | if (ret) | |
131 | #ifdef CONFIG_CPU_FREQ_DEBUG | 122 | pr_err("cpu-tegra: Failed to set cpu frequency to %lu kHz\n", |
132 | printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n", | 123 | rate); |
133 | freqs.old, freqs.new); | ||
134 | #endif | ||
135 | |||
136 | ret = tegra_cpu_clk_set_rate(freqs.new * 1000); | ||
137 | if (ret) { | ||
138 | pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n", | ||
139 | freqs.new); | ||
140 | freqs.new = freqs.old; | ||
141 | } | ||
142 | |||
143 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | ||
144 | 124 | ||
145 | return ret; | 125 | return ret; |
146 | } | 126 | } |
@@ -155,11 +135,8 @@ static unsigned long tegra_cpu_highest_speed(void) | |||
155 | return rate; | 135 | return rate; |
156 | } | 136 | } |
157 | 137 | ||
158 | static int tegra_target(struct cpufreq_policy *policy, | 138 | static int tegra_target(struct cpufreq_policy *policy, unsigned int index) |
159 | unsigned int target_freq, | ||
160 | unsigned int relation) | ||
161 | { | 139 | { |
162 | unsigned int idx; | ||
163 | unsigned int freq; | 140 | unsigned int freq; |
164 | int ret = 0; | 141 | int ret = 0; |
165 | 142 | ||
@@ -170,10 +147,7 @@ static int tegra_target(struct cpufreq_policy *policy, | |||
170 | goto out; | 147 | goto out; |
171 | } | 148 | } |
172 | 149 | ||
173 | cpufreq_frequency_table_target(policy, freq_table, target_freq, | 150 | freq = freq_table[index].frequency; |
174 | relation, &idx); | ||
175 | |||
176 | freq = freq_table[idx].frequency; | ||
177 | 151 | ||
178 | target_cpu_speed[policy->cpu] = freq; | 152 | target_cpu_speed[policy->cpu] = freq; |
179 | 153 | ||
@@ -209,21 +183,23 @@ static struct notifier_block tegra_cpu_pm_notifier = { | |||
209 | 183 | ||
210 | static int tegra_cpu_init(struct cpufreq_policy *policy) | 184 | static int tegra_cpu_init(struct cpufreq_policy *policy) |
211 | { | 185 | { |
186 | int ret; | ||
187 | |||
212 | if (policy->cpu >= NUM_CPUS) | 188 | if (policy->cpu >= NUM_CPUS) |
213 | return -EINVAL; | 189 | return -EINVAL; |
214 | 190 | ||
215 | clk_prepare_enable(emc_clk); | 191 | clk_prepare_enable(emc_clk); |
216 | clk_prepare_enable(cpu_clk); | 192 | clk_prepare_enable(cpu_clk); |
217 | 193 | ||
218 | cpufreq_frequency_table_cpuinfo(policy, freq_table); | 194 | target_cpu_speed[policy->cpu] = tegra_getspeed(policy->cpu); |
219 | cpufreq_frequency_table_get_attr(freq_table, policy->cpu); | ||
220 | policy->cur = tegra_getspeed(policy->cpu); | ||
221 | target_cpu_speed[policy->cpu] = policy->cur; | ||
222 | 195 | ||
223 | /* FIXME: what's the actual transition time? */ | 196 | /* FIXME: what's the actual transition time? */ |
224 | policy->cpuinfo.transition_latency = 300 * 1000; | 197 | ret = cpufreq_generic_init(policy, freq_table, 300 * 1000); |
225 | 198 | if (ret) { | |
226 | cpumask_copy(policy->cpus, cpu_possible_mask); | 199 | clk_disable_unprepare(cpu_clk); |
200 | clk_disable_unprepare(emc_clk); | ||
201 | return ret; | ||
202 | } | ||
227 | 203 | ||
228 | if (policy->cpu == 0) | 204 | if (policy->cpu == 0) |
229 | register_pm_notifier(&tegra_cpu_pm_notifier); | 205 | register_pm_notifier(&tegra_cpu_pm_notifier); |
@@ -233,24 +209,20 @@ static int tegra_cpu_init(struct cpufreq_policy *policy) | |||
233 | 209 | ||
234 | static int tegra_cpu_exit(struct cpufreq_policy *policy) | 210 | static int tegra_cpu_exit(struct cpufreq_policy *policy) |
235 | { | 211 | { |
236 | cpufreq_frequency_table_cpuinfo(policy, freq_table); | 212 | cpufreq_frequency_table_put_attr(policy->cpu); |
213 | clk_disable_unprepare(cpu_clk); | ||
237 | clk_disable_unprepare(emc_clk); | 214 | clk_disable_unprepare(emc_clk); |
238 | return 0; | 215 | return 0; |
239 | } | 216 | } |
240 | 217 | ||
241 | static struct freq_attr *tegra_cpufreq_attr[] = { | ||
242 | &cpufreq_freq_attr_scaling_available_freqs, | ||
243 | NULL, | ||
244 | }; | ||
245 | |||
246 | static struct cpufreq_driver tegra_cpufreq_driver = { | 218 | static struct cpufreq_driver tegra_cpufreq_driver = { |
247 | .verify = tegra_verify_speed, | 219 | .verify = cpufreq_generic_frequency_table_verify, |
248 | .target = tegra_target, | 220 | .target_index = tegra_target, |
249 | .get = tegra_getspeed, | 221 | .get = tegra_getspeed, |
250 | .init = tegra_cpu_init, | 222 | .init = tegra_cpu_init, |
251 | .exit = tegra_cpu_exit, | 223 | .exit = tegra_cpu_exit, |
252 | .name = "tegra", | 224 | .name = "tegra", |
253 | .attr = tegra_cpufreq_attr, | 225 | .attr = cpufreq_generic_attr, |
254 | }; | 226 | }; |
255 | 227 | ||
256 | static int __init tegra_cpufreq_init(void) | 228 | static int __init tegra_cpufreq_init(void) |
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c index b225f04d8ae5..653ae2955b55 100644 --- a/drivers/cpufreq/unicore2-cpufreq.c +++ b/drivers/cpufreq/unicore2-cpufreq.c | |||
@@ -29,9 +29,7 @@ static int ucv2_verify_speed(struct cpufreq_policy *policy) | |||
29 | if (policy->cpu) | 29 | if (policy->cpu) |
30 | return -EINVAL; | 30 | return -EINVAL; |
31 | 31 | ||
32 | cpufreq_verify_within_limits(policy, | 32 | cpufreq_verify_within_cpu_limits(policy); |
33 | policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); | ||
34 | |||
35 | return 0; | 33 | return 0; |
36 | } | 34 | } |
37 | 35 | ||
@@ -68,7 +66,6 @@ static int __init ucv2_cpu_init(struct cpufreq_policy *policy) | |||
68 | { | 66 | { |
69 | if (policy->cpu != 0) | 67 | if (policy->cpu != 0) |
70 | return -EINVAL; | 68 | return -EINVAL; |
71 | policy->cur = ucv2_getspeed(0); | ||
72 | policy->min = policy->cpuinfo.min_freq = 250000; | 69 | policy->min = policy->cpuinfo.min_freq = 250000; |
73 | policy->max = policy->cpuinfo.max_freq = 1000000; | 70 | policy->max = policy->cpuinfo.max_freq = 1000000; |
74 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 71 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c new file mode 100644 index 000000000000..7f7c9c01b44e --- /dev/null +++ b/drivers/cpufreq/vexpress-spc-cpufreq.c | |||
@@ -0,0 +1,70 @@ | |||
1 | /* | ||
2 | * Versatile Express SPC CPUFreq Interface driver | ||
3 | * | ||
4 | * It provides necessary ops to arm_big_little cpufreq driver. | ||
5 | * | ||
6 | * Copyright (C) 2013 ARM Ltd. | ||
7 | * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
14 | * kind, whether express or implied; without even the implied warranty | ||
15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | */ | ||
18 | |||
19 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
20 | |||
21 | #include <linux/cpufreq.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/pm_opp.h> | ||
25 | #include <linux/types.h> | ||
26 | |||
27 | #include "arm_big_little.h" | ||
28 | |||
29 | static int ve_spc_init_opp_table(struct device *cpu_dev) | ||
30 | { | ||
31 | /* | ||
32 | * platform specific SPC code must initialise the opp table | ||
33 | * so just check if the OPP count is non-zero | ||
34 | */ | ||
35 | return dev_pm_opp_get_opp_count(cpu_dev) <= 0; | ||
36 | } | ||
37 | |||
38 | static int ve_spc_get_transition_latency(struct device *cpu_dev) | ||
39 | { | ||
40 | return 1000000; /* 1 ms */ | ||
41 | } | ||
42 | |||
43 | static struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = { | ||
44 | .name = "vexpress-spc", | ||
45 | .get_transition_latency = ve_spc_get_transition_latency, | ||
46 | .init_opp_table = ve_spc_init_opp_table, | ||
47 | }; | ||
48 | |||
49 | static int ve_spc_cpufreq_probe(struct platform_device *pdev) | ||
50 | { | ||
51 | return bL_cpufreq_register(&ve_spc_cpufreq_ops); | ||
52 | } | ||
53 | |||
54 | static int ve_spc_cpufreq_remove(struct platform_device *pdev) | ||
55 | { | ||
56 | bL_cpufreq_unregister(&ve_spc_cpufreq_ops); | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | static struct platform_driver ve_spc_cpufreq_platdrv = { | ||
61 | .driver = { | ||
62 | .name = "vexpress-spc-cpufreq", | ||
63 | .owner = THIS_MODULE, | ||
64 | }, | ||
65 | .probe = ve_spc_cpufreq_probe, | ||
66 | .remove = ve_spc_cpufreq_remove, | ||
67 | }; | ||
68 | module_platform_driver(ve_spc_cpufreq_platdrv); | ||
69 | |||
70 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index d6f57d5d9631..d988948a89a0 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm | |||
@@ -2,6 +2,17 @@ | |||
2 | # ARM CPU Idle drivers | 2 | # ARM CPU Idle drivers |
3 | # | 3 | # |
4 | 4 | ||
5 | config ARM_BIG_LITTLE_CPUIDLE | ||
6 | bool "Support for ARM big.LITTLE processors" | ||
7 | depends on ARCH_VEXPRESS_TC2_PM | ||
8 | select ARM_CPU_SUSPEND | ||
9 | select CPU_IDLE_MULTIPLE_DRIVERS | ||
10 | help | ||
11 | Select this option to enable CPU idle driver for big.LITTLE based | ||
12 | ARM systems. Driver manages CPUs coordination through MCPM and | ||
13 | define different C-states for little and big cores through the | ||
14 | multiple CPU idle drivers infrastructure. | ||
15 | |||
5 | config ARM_HIGHBANK_CPUIDLE | 16 | config ARM_HIGHBANK_CPUIDLE |
6 | bool "CPU Idle Driver for Calxeda processors" | 17 | bool "CPU Idle Driver for Calxeda processors" |
7 | depends on ARM_PSCI | 18 | depends on ARM_PSCI |
@@ -27,13 +38,9 @@ config ARM_U8500_CPUIDLE | |||
27 | help | 38 | help |
28 | Select this to enable cpuidle for ST-E u8500 processors | 39 | Select this to enable cpuidle for ST-E u8500 processors |
29 | 40 | ||
30 | config CPU_IDLE_BIG_LITTLE | 41 | config ARM_AT91_CPUIDLE |
31 | bool "Support for ARM big.LITTLE processors" | 42 | bool "Cpu Idle Driver for the AT91 processors" |
32 | depends on ARCH_VEXPRESS_TC2_PM | 43 | default y |
33 | select ARM_CPU_SUSPEND | 44 | depends on ARCH_AT91 |
34 | select CPU_IDLE_MULTIPLE_DRIVERS | ||
35 | help | 45 | help |
36 | Select this option to enable CPU idle driver for big.LITTLE based | 46 | Select this to enable cpuidle for AT91 processors |
37 | ARM systems. Driver manages CPUs coordination through MCPM and | ||
38 | define different C-states for little and big cores through the | ||
39 | multiple CPU idle drivers infrastructure. | ||
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index cea5ef58876d..527be28e5c1e 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile | |||
@@ -7,8 +7,9 @@ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o | |||
7 | 7 | ||
8 | ################################################################################## | 8 | ################################################################################## |
9 | # ARM SoC drivers | 9 | # ARM SoC drivers |
10 | obj-$(CONFIG_ARM_BIG_LITTLE_CPUIDLE) += cpuidle-big_little.o | ||
10 | obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE) += cpuidle-calxeda.o | 11 | obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE) += cpuidle-calxeda.o |
11 | obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o | 12 | obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o |
12 | obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o | 13 | obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o |
13 | obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o | 14 | obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o |
14 | obj-$(CONFIG_CPU_IDLE_BIG_LITTLE) += cpuidle-big_little.o | 15 | obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o |
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index f8a86364c6b6..e952936418d0 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c | |||
@@ -147,7 +147,7 @@ static cpumask_t cpuidle_coupled_poked; | |||
147 | * has returned from this function, the barrier is immediately available for | 147 | * has returned from this function, the barrier is immediately available for |
148 | * reuse. | 148 | * reuse. |
149 | * | 149 | * |
150 | * The atomic variable a must be initialized to 0 before any cpu calls | 150 | * The atomic variable must be initialized to 0 before any cpu calls |
151 | * this function, will be reset to 0 before any cpu returns from this function. | 151 | * this function, will be reset to 0 before any cpu returns from this function. |
152 | * | 152 | * |
153 | * Must only be called from within a coupled idle state handler | 153 | * Must only be called from within a coupled idle state handler |
diff --git a/drivers/cpuidle/cpuidle-at91.c b/drivers/cpuidle/cpuidle-at91.c new file mode 100644 index 000000000000..a0774370c6bc --- /dev/null +++ b/drivers/cpuidle/cpuidle-at91.c | |||
@@ -0,0 +1,69 @@ | |||
1 | /* | ||
2 | * based on arch/arm/mach-kirkwood/cpuidle.c | ||
3 | * | ||
4 | * CPU idle support for AT91 SoC | ||
5 | * | ||
6 | * This file is licensed under the terms of the GNU General Public | ||
7 | * License version 2. This program is licensed "as is" without any | ||
8 | * warranty of any kind, whether express or implied. | ||
9 | * | ||
10 | * The cpu idle uses wait-for-interrupt and RAM self refresh in order | ||
11 | * to implement two idle states - | ||
12 | * #1 wait-for-interrupt | ||
13 | * #2 wait-for-interrupt and RAM self refresh | ||
14 | */ | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | #include <linux/cpuidle.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/export.h> | ||
22 | #include <asm/proc-fns.h> | ||
23 | #include <asm/cpuidle.h> | ||
24 | |||
25 | #define AT91_MAX_STATES 2 | ||
26 | |||
27 | static void (*at91_standby)(void); | ||
28 | |||
29 | /* Actual code that puts the SoC in different idle states */ | ||
30 | static int at91_enter_idle(struct cpuidle_device *dev, | ||
31 | struct cpuidle_driver *drv, | ||
32 | int index) | ||
33 | { | ||
34 | at91_standby(); | ||
35 | return index; | ||
36 | } | ||
37 | |||
38 | static struct cpuidle_driver at91_idle_driver = { | ||
39 | .name = "at91_idle", | ||
40 | .owner = THIS_MODULE, | ||
41 | .states[0] = ARM_CPUIDLE_WFI_STATE, | ||
42 | .states[1] = { | ||
43 | .enter = at91_enter_idle, | ||
44 | .exit_latency = 10, | ||
45 | .target_residency = 10000, | ||
46 | .flags = CPUIDLE_FLAG_TIME_VALID, | ||
47 | .name = "RAM_SR", | ||
48 | .desc = "WFI and DDR Self Refresh", | ||
49 | }, | ||
50 | .state_count = AT91_MAX_STATES, | ||
51 | }; | ||
52 | |||
53 | /* Initialize CPU idle by registering the idle states */ | ||
54 | static int at91_cpuidle_probe(struct platform_device *dev) | ||
55 | { | ||
56 | at91_standby = (void *)(dev->dev.platform_data); | ||
57 | |||
58 | return cpuidle_register(&at91_idle_driver, NULL); | ||
59 | } | ||
60 | |||
61 | static struct platform_driver at91_cpuidle_driver = { | ||
62 | .driver = { | ||
63 | .name = "cpuidle-at91", | ||
64 | .owner = THIS_MODULE, | ||
65 | }, | ||
66 | .probe = at91_cpuidle_probe, | ||
67 | }; | ||
68 | |||
69 | module_platform_driver(at91_cpuidle_driver); | ||
diff --git a/drivers/cpuidle/cpuidle-ux500.c b/drivers/cpuidle/cpuidle-ux500.c index e0564652af35..5e35804b1a95 100644 --- a/drivers/cpuidle/cpuidle-ux500.c +++ b/drivers/cpuidle/cpuidle-ux500.c | |||
@@ -111,7 +111,7 @@ static struct cpuidle_driver ux500_idle_driver = { | |||
111 | .state_count = 2, | 111 | .state_count = 2, |
112 | }; | 112 | }; |
113 | 113 | ||
114 | static int __init dbx500_cpuidle_probe(struct platform_device *pdev) | 114 | static int dbx500_cpuidle_probe(struct platform_device *pdev) |
115 | { | 115 | { |
116 | /* Configure wake up reasons */ | 116 | /* Configure wake up reasons */ |
117 | prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) | | 117 | prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) | |
diff --git a/drivers/cpuidle/cpuidle-zynq.c b/drivers/cpuidle/cpuidle-zynq.c index 38e03a183591..aded75928028 100644 --- a/drivers/cpuidle/cpuidle-zynq.c +++ b/drivers/cpuidle/cpuidle-zynq.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/cpu_pm.h> | 29 | #include <linux/cpu_pm.h> |
30 | #include <linux/cpuidle.h> | 30 | #include <linux/cpuidle.h> |
31 | #include <linux/of.h> | 31 | #include <linux/platform_device.h> |
32 | #include <asm/proc-fns.h> | 32 | #include <asm/proc-fns.h> |
33 | #include <asm/cpuidle.h> | 33 | #include <asm/cpuidle.h> |
34 | 34 | ||
@@ -70,14 +70,19 @@ static struct cpuidle_driver zynq_idle_driver = { | |||
70 | }; | 70 | }; |
71 | 71 | ||
72 | /* Initialize CPU idle by registering the idle states */ | 72 | /* Initialize CPU idle by registering the idle states */ |
73 | static int __init zynq_cpuidle_init(void) | 73 | static int zynq_cpuidle_probe(struct platform_device *pdev) |
74 | { | 74 | { |
75 | if (!of_machine_is_compatible("xlnx,zynq-7000")) | ||
76 | return -ENODEV; | ||
77 | |||
78 | pr_info("Xilinx Zynq CpuIdle Driver started\n"); | 75 | pr_info("Xilinx Zynq CpuIdle Driver started\n"); |
79 | 76 | ||
80 | return cpuidle_register(&zynq_idle_driver, NULL); | 77 | return cpuidle_register(&zynq_idle_driver, NULL); |
81 | } | 78 | } |
82 | 79 | ||
83 | device_initcall(zynq_cpuidle_init); | 80 | static struct platform_driver zynq_cpuidle_driver = { |
81 | .driver = { | ||
82 | .name = "cpuidle-zynq", | ||
83 | .owner = THIS_MODULE, | ||
84 | }, | ||
85 | .probe = zynq_cpuidle_probe, | ||
86 | }; | ||
87 | |||
88 | module_platform_driver(zynq_cpuidle_driver); | ||
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index d75040ddd2b3..2a991e468f78 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -118,11 +118,9 @@ int cpuidle_idle_call(void) | |||
118 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 118 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
119 | struct cpuidle_driver *drv; | 119 | struct cpuidle_driver *drv; |
120 | int next_state, entered_state; | 120 | int next_state, entered_state; |
121 | bool broadcast; | ||
121 | 122 | ||
122 | if (off) | 123 | if (off || !initialized) |
123 | return -ENODEV; | ||
124 | |||
125 | if (!initialized) | ||
126 | return -ENODEV; | 124 | return -ENODEV; |
127 | 125 | ||
128 | /* check if the device is ready */ | 126 | /* check if the device is ready */ |
@@ -144,9 +142,10 @@ int cpuidle_idle_call(void) | |||
144 | 142 | ||
145 | trace_cpu_idle_rcuidle(next_state, dev->cpu); | 143 | trace_cpu_idle_rcuidle(next_state, dev->cpu); |
146 | 144 | ||
147 | if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP) | 145 | broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP); |
148 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, | 146 | |
149 | &dev->cpu); | 147 | if (broadcast) |
148 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); | ||
150 | 149 | ||
151 | if (cpuidle_state_is_coupled(dev, drv, next_state)) | 150 | if (cpuidle_state_is_coupled(dev, drv, next_state)) |
152 | entered_state = cpuidle_enter_state_coupled(dev, drv, | 151 | entered_state = cpuidle_enter_state_coupled(dev, drv, |
@@ -154,9 +153,8 @@ int cpuidle_idle_call(void) | |||
154 | else | 153 | else |
155 | entered_state = cpuidle_enter_state(dev, drv, next_state); | 154 | entered_state = cpuidle_enter_state(dev, drv, next_state); |
156 | 155 | ||
157 | if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP) | 156 | if (broadcast) |
158 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, | 157 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); |
159 | &dev->cpu); | ||
160 | 158 | ||
161 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); | 159 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); |
162 | 160 | ||
@@ -228,45 +226,6 @@ void cpuidle_resume(void) | |||
228 | mutex_unlock(&cpuidle_lock); | 226 | mutex_unlock(&cpuidle_lock); |
229 | } | 227 | } |
230 | 228 | ||
231 | #ifdef CONFIG_ARCH_HAS_CPU_RELAX | ||
232 | static int poll_idle(struct cpuidle_device *dev, | ||
233 | struct cpuidle_driver *drv, int index) | ||
234 | { | ||
235 | ktime_t t1, t2; | ||
236 | s64 diff; | ||
237 | |||
238 | t1 = ktime_get(); | ||
239 | local_irq_enable(); | ||
240 | while (!need_resched()) | ||
241 | cpu_relax(); | ||
242 | |||
243 | t2 = ktime_get(); | ||
244 | diff = ktime_to_us(ktime_sub(t2, t1)); | ||
245 | if (diff > INT_MAX) | ||
246 | diff = INT_MAX; | ||
247 | |||
248 | dev->last_residency = (int) diff; | ||
249 | |||
250 | return index; | ||
251 | } | ||
252 | |||
253 | static void poll_idle_init(struct cpuidle_driver *drv) | ||
254 | { | ||
255 | struct cpuidle_state *state = &drv->states[0]; | ||
256 | |||
257 | snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); | ||
258 | snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); | ||
259 | state->exit_latency = 0; | ||
260 | state->target_residency = 0; | ||
261 | state->power_usage = -1; | ||
262 | state->flags = 0; | ||
263 | state->enter = poll_idle; | ||
264 | state->disabled = false; | ||
265 | } | ||
266 | #else | ||
267 | static void poll_idle_init(struct cpuidle_driver *drv) {} | ||
268 | #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ | ||
269 | |||
270 | /** | 229 | /** |
271 | * cpuidle_enable_device - enables idle PM for a CPU | 230 | * cpuidle_enable_device - enables idle PM for a CPU |
272 | * @dev: the CPU | 231 | * @dev: the CPU |
@@ -296,8 +255,6 @@ int cpuidle_enable_device(struct cpuidle_device *dev) | |||
296 | if (!dev->state_count) | 255 | if (!dev->state_count) |
297 | dev->state_count = drv->state_count; | 256 | dev->state_count = drv->state_count; |
298 | 257 | ||
299 | poll_idle_init(drv); | ||
300 | |||
301 | ret = cpuidle_add_device_sysfs(dev); | 258 | ret = cpuidle_add_device_sysfs(dev); |
302 | if (ret) | 259 | if (ret) |
303 | return ret; | 260 | return ret; |
@@ -358,12 +315,10 @@ static void __cpuidle_unregister_device(struct cpuidle_device *dev) | |||
358 | module_put(drv->owner); | 315 | module_put(drv->owner); |
359 | } | 316 | } |
360 | 317 | ||
361 | static int __cpuidle_device_init(struct cpuidle_device *dev) | 318 | static void __cpuidle_device_init(struct cpuidle_device *dev) |
362 | { | 319 | { |
363 | memset(dev->states_usage, 0, sizeof(dev->states_usage)); | 320 | memset(dev->states_usage, 0, sizeof(dev->states_usage)); |
364 | dev->last_residency = 0; | 321 | dev->last_residency = 0; |
365 | |||
366 | return 0; | ||
367 | } | 322 | } |
368 | 323 | ||
369 | /** | 324 | /** |
@@ -385,13 +340,12 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) | |||
385 | list_add(&dev->device_list, &cpuidle_detected_devices); | 340 | list_add(&dev->device_list, &cpuidle_detected_devices); |
386 | 341 | ||
387 | ret = cpuidle_coupled_register_device(dev); | 342 | ret = cpuidle_coupled_register_device(dev); |
388 | if (ret) { | 343 | if (ret) |
389 | __cpuidle_unregister_device(dev); | 344 | __cpuidle_unregister_device(dev); |
390 | return ret; | 345 | else |
391 | } | 346 | dev->registered = 1; |
392 | 347 | ||
393 | dev->registered = 1; | 348 | return ret; |
394 | return 0; | ||
395 | } | 349 | } |
396 | 350 | ||
397 | /** | 351 | /** |
@@ -410,9 +364,7 @@ int cpuidle_register_device(struct cpuidle_device *dev) | |||
410 | if (dev->registered) | 364 | if (dev->registered) |
411 | goto out_unlock; | 365 | goto out_unlock; |
412 | 366 | ||
413 | ret = __cpuidle_device_init(dev); | 367 | __cpuidle_device_init(dev); |
414 | if (ret) | ||
415 | goto out_unlock; | ||
416 | 368 | ||
417 | ret = __cpuidle_register_device(dev); | 369 | ret = __cpuidle_register_device(dev); |
418 | if (ret) | 370 | if (ret) |
@@ -516,7 +468,7 @@ int cpuidle_register(struct cpuidle_driver *drv, | |||
516 | 468 | ||
517 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED | 469 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED |
518 | /* | 470 | /* |
519 | * On multiplatform for ARM, the coupled idle states could | 471 | * On multiplatform for ARM, the coupled idle states could be |
520 | * enabled in the kernel even if the cpuidle driver does not | 472 | * enabled in the kernel even if the cpuidle driver does not |
521 | * use it. Note, coupled_cpus is a struct copy. | 473 | * use it. Note, coupled_cpus is a struct copy. |
522 | */ | 474 | */ |
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 6e11701f0fca..06dbe7c86199 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/mutex.h> | 11 | #include <linux/mutex.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/sched.h> | ||
13 | #include <linux/cpuidle.h> | 14 | #include <linux/cpuidle.h> |
14 | #include <linux/cpumask.h> | 15 | #include <linux/cpumask.h> |
15 | #include <linux/clockchips.h> | 16 | #include <linux/clockchips.h> |
@@ -56,7 +57,7 @@ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv) | |||
56 | } | 57 | } |
57 | 58 | ||
58 | /** | 59 | /** |
59 | * __cpuidle_set_driver - set per CPU driver variables the the given driver. | 60 | * __cpuidle_set_driver - set per CPU driver variables for the given driver. |
60 | * @drv: a valid pointer to a struct cpuidle_driver | 61 | * @drv: a valid pointer to a struct cpuidle_driver |
61 | * | 62 | * |
62 | * For each CPU in the driver's cpumask, unset the registered driver per CPU | 63 | * For each CPU in the driver's cpumask, unset the registered driver per CPU |
@@ -132,7 +133,7 @@ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv) | |||
132 | * cpuidle_setup_broadcast_timer - enable/disable the broadcast timer | 133 | * cpuidle_setup_broadcast_timer - enable/disable the broadcast timer |
133 | * @arg: a void pointer used to match the SMP cross call API | 134 | * @arg: a void pointer used to match the SMP cross call API |
134 | * | 135 | * |
135 | * @arg is used as a value of type 'long' with on of the two values: | 136 | * @arg is used as a value of type 'long' with one of the two values: |
136 | * - CLOCK_EVT_NOTIFY_BROADCAST_ON | 137 | * - CLOCK_EVT_NOTIFY_BROADCAST_ON |
137 | * - CLOCK_EVT_NOTIFY_BROADCAST_OFF | 138 | * - CLOCK_EVT_NOTIFY_BROADCAST_OFF |
138 | * | 139 | * |
@@ -149,10 +150,8 @@ static void cpuidle_setup_broadcast_timer(void *arg) | |||
149 | /** | 150 | /** |
150 | * __cpuidle_driver_init - initialize the driver's internal data | 151 | * __cpuidle_driver_init - initialize the driver's internal data |
151 | * @drv: a valid pointer to a struct cpuidle_driver | 152 | * @drv: a valid pointer to a struct cpuidle_driver |
152 | * | ||
153 | * Returns 0 on success, a negative error code otherwise. | ||
154 | */ | 153 | */ |
155 | static int __cpuidle_driver_init(struct cpuidle_driver *drv) | 154 | static void __cpuidle_driver_init(struct cpuidle_driver *drv) |
156 | { | 155 | { |
157 | int i; | 156 | int i; |
158 | 157 | ||
@@ -169,20 +168,55 @@ static int __cpuidle_driver_init(struct cpuidle_driver *drv) | |||
169 | /* | 168 | /* |
170 | * Look for the timer stop flag in the different states, so that we know | 169 | * Look for the timer stop flag in the different states, so that we know |
171 | * if the broadcast timer has to be set up. The loop is in the reverse | 170 | * if the broadcast timer has to be set up. The loop is in the reverse |
172 | * order, because usually on of the the deeper states has this flag set. | 171 | * order, because usually one of the deeper states have this flag set. |
173 | */ | 172 | */ |
174 | for (i = drv->state_count - 1; i >= 0 ; i--) { | 173 | for (i = drv->state_count - 1; i >= 0 ; i--) { |
174 | if (drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP) { | ||
175 | drv->bctimer = 1; | ||
176 | break; | ||
177 | } | ||
178 | } | ||
179 | } | ||
175 | 180 | ||
176 | if (!(drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP)) | 181 | #ifdef CONFIG_ARCH_HAS_CPU_RELAX |
177 | continue; | 182 | static int poll_idle(struct cpuidle_device *dev, |
183 | struct cpuidle_driver *drv, int index) | ||
184 | { | ||
185 | ktime_t t1, t2; | ||
186 | s64 diff; | ||
178 | 187 | ||
179 | drv->bctimer = 1; | 188 | t1 = ktime_get(); |
180 | break; | 189 | local_irq_enable(); |
181 | } | 190 | while (!need_resched()) |
191 | cpu_relax(); | ||
182 | 192 | ||
183 | return 0; | 193 | t2 = ktime_get(); |
194 | diff = ktime_to_us(ktime_sub(t2, t1)); | ||
195 | if (diff > INT_MAX) | ||
196 | diff = INT_MAX; | ||
197 | |||
198 | dev->last_residency = (int) diff; | ||
199 | |||
200 | return index; | ||
184 | } | 201 | } |
185 | 202 | ||
203 | static void poll_idle_init(struct cpuidle_driver *drv) | ||
204 | { | ||
205 | struct cpuidle_state *state = &drv->states[0]; | ||
206 | |||
207 | snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); | ||
208 | snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); | ||
209 | state->exit_latency = 0; | ||
210 | state->target_residency = 0; | ||
211 | state->power_usage = -1; | ||
212 | state->flags = 0; | ||
213 | state->enter = poll_idle; | ||
214 | state->disabled = false; | ||
215 | } | ||
216 | #else | ||
217 | static void poll_idle_init(struct cpuidle_driver *drv) {} | ||
218 | #endif /* !CONFIG_ARCH_HAS_CPU_RELAX */ | ||
219 | |||
186 | /** | 220 | /** |
187 | * __cpuidle_register_driver: register the driver | 221 | * __cpuidle_register_driver: register the driver |
188 | * @drv: a valid pointer to a struct cpuidle_driver | 222 | * @drv: a valid pointer to a struct cpuidle_driver |
@@ -206,9 +240,7 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv) | |||
206 | if (cpuidle_disabled()) | 240 | if (cpuidle_disabled()) |
207 | return -ENODEV; | 241 | return -ENODEV; |
208 | 242 | ||
209 | ret = __cpuidle_driver_init(drv); | 243 | __cpuidle_driver_init(drv); |
210 | if (ret) | ||
211 | return ret; | ||
212 | 244 | ||
213 | ret = __cpuidle_set_driver(drv); | 245 | ret = __cpuidle_set_driver(drv); |
214 | if (ret) | 246 | if (ret) |
@@ -218,6 +250,8 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv) | |||
218 | on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer, | 250 | on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer, |
219 | (void *)CLOCK_EVT_NOTIFY_BROADCAST_ON, 1); | 251 | (void *)CLOCK_EVT_NOTIFY_BROADCAST_ON, 1); |
220 | 252 | ||
253 | poll_idle_init(drv); | ||
254 | |||
221 | return 0; | 255 | return 0; |
222 | } | 256 | } |
223 | 257 | ||
@@ -346,10 +380,11 @@ struct cpuidle_driver *cpuidle_driver_ref(void) | |||
346 | */ | 380 | */ |
347 | void cpuidle_driver_unref(void) | 381 | void cpuidle_driver_unref(void) |
348 | { | 382 | { |
349 | struct cpuidle_driver *drv = cpuidle_get_driver(); | 383 | struct cpuidle_driver *drv; |
350 | 384 | ||
351 | spin_lock(&cpuidle_driver_lock); | 385 | spin_lock(&cpuidle_driver_lock); |
352 | 386 | ||
387 | drv = cpuidle_get_driver(); | ||
353 | if (drv && !WARN_ON(drv->refcnt <= 0)) | 388 | if (drv && !WARN_ON(drv->refcnt <= 0)) |
354 | drv->refcnt--; | 389 | drv->refcnt--; |
355 | 390 | ||
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c index ea2f8e7aa24a..ca89412f5122 100644 --- a/drivers/cpuidle/governor.c +++ b/drivers/cpuidle/governor.c | |||
@@ -96,46 +96,3 @@ int cpuidle_register_governor(struct cpuidle_governor *gov) | |||
96 | 96 | ||
97 | return ret; | 97 | return ret; |
98 | } | 98 | } |
99 | |||
100 | /** | ||
101 | * cpuidle_replace_governor - find a replacement governor | ||
102 | * @exclude_rating: the rating that will be skipped while looking for | ||
103 | * new governor. | ||
104 | */ | ||
105 | static struct cpuidle_governor *cpuidle_replace_governor(int exclude_rating) | ||
106 | { | ||
107 | struct cpuidle_governor *gov; | ||
108 | struct cpuidle_governor *ret_gov = NULL; | ||
109 | unsigned int max_rating = 0; | ||
110 | |||
111 | list_for_each_entry(gov, &cpuidle_governors, governor_list) { | ||
112 | if (gov->rating == exclude_rating) | ||
113 | continue; | ||
114 | if (gov->rating > max_rating) { | ||
115 | max_rating = gov->rating; | ||
116 | ret_gov = gov; | ||
117 | } | ||
118 | } | ||
119 | |||
120 | return ret_gov; | ||
121 | } | ||
122 | |||
123 | /** | ||
124 | * cpuidle_unregister_governor - unregisters a governor | ||
125 | * @gov: the governor | ||
126 | */ | ||
127 | void cpuidle_unregister_governor(struct cpuidle_governor *gov) | ||
128 | { | ||
129 | if (!gov) | ||
130 | return; | ||
131 | |||
132 | mutex_lock(&cpuidle_lock); | ||
133 | if (gov == cpuidle_curr_governor) { | ||
134 | struct cpuidle_governor *new_gov; | ||
135 | new_gov = cpuidle_replace_governor(gov->rating); | ||
136 | cpuidle_switch_governor(new_gov); | ||
137 | } | ||
138 | list_del(&gov->governor_list); | ||
139 | mutex_unlock(&cpuidle_lock); | ||
140 | } | ||
141 | |||
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 8739cc05228c..e918b6d0caf7 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c | |||
@@ -52,11 +52,12 @@ static ssize_t show_current_driver(struct device *dev, | |||
52 | char *buf) | 52 | char *buf) |
53 | { | 53 | { |
54 | ssize_t ret; | 54 | ssize_t ret; |
55 | struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); | 55 | struct cpuidle_driver *drv; |
56 | 56 | ||
57 | spin_lock(&cpuidle_driver_lock); | 57 | spin_lock(&cpuidle_driver_lock); |
58 | if (cpuidle_driver) | 58 | drv = cpuidle_get_driver(); |
59 | ret = sprintf(buf, "%s\n", cpuidle_driver->name); | 59 | if (drv) |
60 | ret = sprintf(buf, "%s\n", drv->name); | ||
60 | else | 61 | else |
61 | ret = sprintf(buf, "none\n"); | 62 | ret = sprintf(buf, "none\n"); |
62 | spin_unlock(&cpuidle_driver_lock); | 63 | spin_unlock(&cpuidle_driver_lock); |
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index c99c00d35d34..a0b2f7e0eedb 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/stat.h> | 20 | #include <linux/stat.h> |
21 | #include <linux/opp.h> | 21 | #include <linux/pm_opp.h> |
22 | #include <linux/devfreq.h> | 22 | #include <linux/devfreq.h> |
23 | #include <linux/workqueue.h> | 23 | #include <linux/workqueue.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
@@ -902,13 +902,13 @@ static ssize_t available_frequencies_show(struct device *d, | |||
902 | { | 902 | { |
903 | struct devfreq *df = to_devfreq(d); | 903 | struct devfreq *df = to_devfreq(d); |
904 | struct device *dev = df->dev.parent; | 904 | struct device *dev = df->dev.parent; |
905 | struct opp *opp; | 905 | struct dev_pm_opp *opp; |
906 | ssize_t count = 0; | 906 | ssize_t count = 0; |
907 | unsigned long freq = 0; | 907 | unsigned long freq = 0; |
908 | 908 | ||
909 | rcu_read_lock(); | 909 | rcu_read_lock(); |
910 | do { | 910 | do { |
911 | opp = opp_find_freq_ceil(dev, &freq); | 911 | opp = dev_pm_opp_find_freq_ceil(dev, &freq); |
912 | if (IS_ERR(opp)) | 912 | if (IS_ERR(opp)) |
913 | break; | 913 | break; |
914 | 914 | ||
@@ -993,10 +993,10 @@ static int __init devfreq_init(void) | |||
993 | } | 993 | } |
994 | 994 | ||
995 | devfreq_wq = create_freezable_workqueue("devfreq_wq"); | 995 | devfreq_wq = create_freezable_workqueue("devfreq_wq"); |
996 | if (IS_ERR(devfreq_wq)) { | 996 | if (!devfreq_wq) { |
997 | class_destroy(devfreq_class); | 997 | class_destroy(devfreq_class); |
998 | pr_err("%s: couldn't create workqueue\n", __FILE__); | 998 | pr_err("%s: couldn't create workqueue\n", __FILE__); |
999 | return PTR_ERR(devfreq_wq); | 999 | return -ENOMEM; |
1000 | } | 1000 | } |
1001 | devfreq_class->dev_groups = devfreq_groups; | 1001 | devfreq_class->dev_groups = devfreq_groups; |
1002 | 1002 | ||
@@ -1029,25 +1029,26 @@ module_exit(devfreq_exit); | |||
1029 | * under the locked area. The pointer returned must be used prior to unlocking | 1029 | * under the locked area. The pointer returned must be used prior to unlocking |
1030 | * with rcu_read_unlock() to maintain the integrity of the pointer. | 1030 | * with rcu_read_unlock() to maintain the integrity of the pointer. |
1031 | */ | 1031 | */ |
1032 | struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, | 1032 | struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, |
1033 | u32 flags) | 1033 | unsigned long *freq, |
1034 | u32 flags) | ||
1034 | { | 1035 | { |
1035 | struct opp *opp; | 1036 | struct dev_pm_opp *opp; |
1036 | 1037 | ||
1037 | if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) { | 1038 | if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) { |
1038 | /* The freq is an upper bound. opp should be lower */ | 1039 | /* The freq is an upper bound. opp should be lower */ |
1039 | opp = opp_find_freq_floor(dev, freq); | 1040 | opp = dev_pm_opp_find_freq_floor(dev, freq); |
1040 | 1041 | ||
1041 | /* If not available, use the closest opp */ | 1042 | /* If not available, use the closest opp */ |
1042 | if (opp == ERR_PTR(-ERANGE)) | 1043 | if (opp == ERR_PTR(-ERANGE)) |
1043 | opp = opp_find_freq_ceil(dev, freq); | 1044 | opp = dev_pm_opp_find_freq_ceil(dev, freq); |
1044 | } else { | 1045 | } else { |
1045 | /* The freq is an lower bound. opp should be higher */ | 1046 | /* The freq is an lower bound. opp should be higher */ |
1046 | opp = opp_find_freq_ceil(dev, freq); | 1047 | opp = dev_pm_opp_find_freq_ceil(dev, freq); |
1047 | 1048 | ||
1048 | /* If not available, use the closest opp */ | 1049 | /* If not available, use the closest opp */ |
1049 | if (opp == ERR_PTR(-ERANGE)) | 1050 | if (opp == ERR_PTR(-ERANGE)) |
1050 | opp = opp_find_freq_floor(dev, freq); | 1051 | opp = dev_pm_opp_find_freq_floor(dev, freq); |
1051 | } | 1052 | } |
1052 | 1053 | ||
1053 | return opp; | 1054 | return opp; |
@@ -1066,7 +1067,7 @@ int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) | |||
1066 | int ret = 0; | 1067 | int ret = 0; |
1067 | 1068 | ||
1068 | rcu_read_lock(); | 1069 | rcu_read_lock(); |
1069 | nh = opp_get_notifier(dev); | 1070 | nh = dev_pm_opp_get_notifier(dev); |
1070 | if (IS_ERR(nh)) | 1071 | if (IS_ERR(nh)) |
1071 | ret = PTR_ERR(nh); | 1072 | ret = PTR_ERR(nh); |
1072 | rcu_read_unlock(); | 1073 | rcu_read_unlock(); |
@@ -1092,7 +1093,7 @@ int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) | |||
1092 | int ret = 0; | 1093 | int ret = 0; |
1093 | 1094 | ||
1094 | rcu_read_lock(); | 1095 | rcu_read_lock(); |
1095 | nh = opp_get_notifier(dev); | 1096 | nh = dev_pm_opp_get_notifier(dev); |
1096 | if (IS_ERR(nh)) | 1097 | if (IS_ERR(nh)) |
1097 | ret = PTR_ERR(nh); | 1098 | ret = PTR_ERR(nh); |
1098 | rcu_read_unlock(); | 1099 | rcu_read_unlock(); |
diff --git a/drivers/devfreq/exynos/exynos4_bus.c b/drivers/devfreq/exynos/exynos4_bus.c index c5f86d8caca3..cede6f71cd63 100644 --- a/drivers/devfreq/exynos/exynos4_bus.c +++ b/drivers/devfreq/exynos/exynos4_bus.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
21 | #include <linux/suspend.h> | 21 | #include <linux/suspend.h> |
22 | #include <linux/opp.h> | 22 | #include <linux/pm_opp.h> |
23 | #include <linux/devfreq.h> | 23 | #include <linux/devfreq.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/regulator/consumer.h> | 25 | #include <linux/regulator/consumer.h> |
@@ -639,7 +639,7 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq, | |||
639 | struct platform_device *pdev = container_of(dev, struct platform_device, | 639 | struct platform_device *pdev = container_of(dev, struct platform_device, |
640 | dev); | 640 | dev); |
641 | struct busfreq_data *data = platform_get_drvdata(pdev); | 641 | struct busfreq_data *data = platform_get_drvdata(pdev); |
642 | struct opp *opp; | 642 | struct dev_pm_opp *opp; |
643 | unsigned long freq; | 643 | unsigned long freq; |
644 | unsigned long old_freq = data->curr_oppinfo.rate; | 644 | unsigned long old_freq = data->curr_oppinfo.rate; |
645 | struct busfreq_opp_info new_oppinfo; | 645 | struct busfreq_opp_info new_oppinfo; |
@@ -650,8 +650,8 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq, | |||
650 | rcu_read_unlock(); | 650 | rcu_read_unlock(); |
651 | return PTR_ERR(opp); | 651 | return PTR_ERR(opp); |
652 | } | 652 | } |
653 | new_oppinfo.rate = opp_get_freq(opp); | 653 | new_oppinfo.rate = dev_pm_opp_get_freq(opp); |
654 | new_oppinfo.volt = opp_get_voltage(opp); | 654 | new_oppinfo.volt = dev_pm_opp_get_voltage(opp); |
655 | rcu_read_unlock(); | 655 | rcu_read_unlock(); |
656 | freq = new_oppinfo.rate; | 656 | freq = new_oppinfo.rate; |
657 | 657 | ||
@@ -873,7 +873,7 @@ static int exynos4210_init_tables(struct busfreq_data *data) | |||
873 | exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i]; | 873 | exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i]; |
874 | 874 | ||
875 | for (i = LV_0; i < EX4210_LV_NUM; i++) { | 875 | for (i = LV_0; i < EX4210_LV_NUM; i++) { |
876 | err = opp_add(data->dev, exynos4210_busclk_table[i].clk, | 876 | err = dev_pm_opp_add(data->dev, exynos4210_busclk_table[i].clk, |
877 | exynos4210_busclk_table[i].volt); | 877 | exynos4210_busclk_table[i].volt); |
878 | if (err) { | 878 | if (err) { |
879 | dev_err(data->dev, "Cannot add opp entries.\n"); | 879 | dev_err(data->dev, "Cannot add opp entries.\n"); |
@@ -940,7 +940,7 @@ static int exynos4x12_init_tables(struct busfreq_data *data) | |||
940 | } | 940 | } |
941 | 941 | ||
942 | for (i = 0; i < EX4x12_LV_NUM; i++) { | 942 | for (i = 0; i < EX4x12_LV_NUM; i++) { |
943 | ret = opp_add(data->dev, exynos4x12_mifclk_table[i].clk, | 943 | ret = dev_pm_opp_add(data->dev, exynos4x12_mifclk_table[i].clk, |
944 | exynos4x12_mifclk_table[i].volt); | 944 | exynos4x12_mifclk_table[i].volt); |
945 | if (ret) { | 945 | if (ret) { |
946 | dev_err(data->dev, "Fail to add opp entries.\n"); | 946 | dev_err(data->dev, "Fail to add opp entries.\n"); |
@@ -956,7 +956,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this, | |||
956 | { | 956 | { |
957 | struct busfreq_data *data = container_of(this, struct busfreq_data, | 957 | struct busfreq_data *data = container_of(this, struct busfreq_data, |
958 | pm_notifier); | 958 | pm_notifier); |
959 | struct opp *opp; | 959 | struct dev_pm_opp *opp; |
960 | struct busfreq_opp_info new_oppinfo; | 960 | struct busfreq_opp_info new_oppinfo; |
961 | unsigned long maxfreq = ULONG_MAX; | 961 | unsigned long maxfreq = ULONG_MAX; |
962 | int err = 0; | 962 | int err = 0; |
@@ -969,7 +969,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this, | |||
969 | data->disabled = true; | 969 | data->disabled = true; |
970 | 970 | ||
971 | rcu_read_lock(); | 971 | rcu_read_lock(); |
972 | opp = opp_find_freq_floor(data->dev, &maxfreq); | 972 | opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq); |
973 | if (IS_ERR(opp)) { | 973 | if (IS_ERR(opp)) { |
974 | rcu_read_unlock(); | 974 | rcu_read_unlock(); |
975 | dev_err(data->dev, "%s: unable to find a min freq\n", | 975 | dev_err(data->dev, "%s: unable to find a min freq\n", |
@@ -977,8 +977,8 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this, | |||
977 | mutex_unlock(&data->lock); | 977 | mutex_unlock(&data->lock); |
978 | return PTR_ERR(opp); | 978 | return PTR_ERR(opp); |
979 | } | 979 | } |
980 | new_oppinfo.rate = opp_get_freq(opp); | 980 | new_oppinfo.rate = dev_pm_opp_get_freq(opp); |
981 | new_oppinfo.volt = opp_get_voltage(opp); | 981 | new_oppinfo.volt = dev_pm_opp_get_voltage(opp); |
982 | rcu_read_unlock(); | 982 | rcu_read_unlock(); |
983 | 983 | ||
984 | err = exynos4_bus_setvolt(data, &new_oppinfo, | 984 | err = exynos4_bus_setvolt(data, &new_oppinfo, |
@@ -1020,7 +1020,7 @@ unlock: | |||
1020 | static int exynos4_busfreq_probe(struct platform_device *pdev) | 1020 | static int exynos4_busfreq_probe(struct platform_device *pdev) |
1021 | { | 1021 | { |
1022 | struct busfreq_data *data; | 1022 | struct busfreq_data *data; |
1023 | struct opp *opp; | 1023 | struct dev_pm_opp *opp; |
1024 | struct device *dev = &pdev->dev; | 1024 | struct device *dev = &pdev->dev; |
1025 | int err = 0; | 1025 | int err = 0; |
1026 | 1026 | ||
@@ -1065,15 +1065,16 @@ static int exynos4_busfreq_probe(struct platform_device *pdev) | |||
1065 | } | 1065 | } |
1066 | 1066 | ||
1067 | rcu_read_lock(); | 1067 | rcu_read_lock(); |
1068 | opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq); | 1068 | opp = dev_pm_opp_find_freq_floor(dev, |
1069 | &exynos4_devfreq_profile.initial_freq); | ||
1069 | if (IS_ERR(opp)) { | 1070 | if (IS_ERR(opp)) { |
1070 | rcu_read_unlock(); | 1071 | rcu_read_unlock(); |
1071 | dev_err(dev, "Invalid initial frequency %lu kHz.\n", | 1072 | dev_err(dev, "Invalid initial frequency %lu kHz.\n", |
1072 | exynos4_devfreq_profile.initial_freq); | 1073 | exynos4_devfreq_profile.initial_freq); |
1073 | return PTR_ERR(opp); | 1074 | return PTR_ERR(opp); |
1074 | } | 1075 | } |
1075 | data->curr_oppinfo.rate = opp_get_freq(opp); | 1076 | data->curr_oppinfo.rate = dev_pm_opp_get_freq(opp); |
1076 | data->curr_oppinfo.volt = opp_get_voltage(opp); | 1077 | data->curr_oppinfo.volt = dev_pm_opp_get_voltage(opp); |
1077 | rcu_read_unlock(); | 1078 | rcu_read_unlock(); |
1078 | 1079 | ||
1079 | platform_set_drvdata(pdev, data); | 1080 | platform_set_drvdata(pdev, data); |
diff --git a/drivers/devfreq/exynos/exynos5_bus.c b/drivers/devfreq/exynos/exynos5_bus.c index 574b16b59be5..a60da3c1c48e 100644 --- a/drivers/devfreq/exynos/exynos5_bus.c +++ b/drivers/devfreq/exynos/exynos5_bus.c | |||
@@ -15,10 +15,9 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/devfreq.h> | 16 | #include <linux/devfreq.h> |
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | #include <linux/opp.h> | 18 | #include <linux/pm_opp.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/suspend.h> | 20 | #include <linux/suspend.h> |
21 | #include <linux/opp.h> | ||
22 | #include <linux/clk.h> | 21 | #include <linux/clk.h> |
23 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
24 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
@@ -132,7 +131,7 @@ static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq, | |||
132 | struct platform_device *pdev = container_of(dev, struct platform_device, | 131 | struct platform_device *pdev = container_of(dev, struct platform_device, |
133 | dev); | 132 | dev); |
134 | struct busfreq_data_int *data = platform_get_drvdata(pdev); | 133 | struct busfreq_data_int *data = platform_get_drvdata(pdev); |
135 | struct opp *opp; | 134 | struct dev_pm_opp *opp; |
136 | unsigned long old_freq, freq; | 135 | unsigned long old_freq, freq; |
137 | unsigned long volt; | 136 | unsigned long volt; |
138 | 137 | ||
@@ -144,8 +143,8 @@ static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq, | |||
144 | return PTR_ERR(opp); | 143 | return PTR_ERR(opp); |
145 | } | 144 | } |
146 | 145 | ||
147 | freq = opp_get_freq(opp); | 146 | freq = dev_pm_opp_get_freq(opp); |
148 | volt = opp_get_voltage(opp); | 147 | volt = dev_pm_opp_get_voltage(opp); |
149 | rcu_read_unlock(); | 148 | rcu_read_unlock(); |
150 | 149 | ||
151 | old_freq = data->curr_freq; | 150 | old_freq = data->curr_freq; |
@@ -246,7 +245,7 @@ static int exynos5250_init_int_tables(struct busfreq_data_int *data) | |||
246 | int i, err = 0; | 245 | int i, err = 0; |
247 | 246 | ||
248 | for (i = LV_0; i < _LV_END; i++) { | 247 | for (i = LV_0; i < _LV_END; i++) { |
249 | err = opp_add(data->dev, exynos5_int_opp_table[i].clk, | 248 | err = dev_pm_opp_add(data->dev, exynos5_int_opp_table[i].clk, |
250 | exynos5_int_opp_table[i].volt); | 249 | exynos5_int_opp_table[i].volt); |
251 | if (err) { | 250 | if (err) { |
252 | dev_err(data->dev, "Cannot add opp entries.\n"); | 251 | dev_err(data->dev, "Cannot add opp entries.\n"); |
@@ -262,7 +261,7 @@ static int exynos5_busfreq_int_pm_notifier_event(struct notifier_block *this, | |||
262 | { | 261 | { |
263 | struct busfreq_data_int *data = container_of(this, | 262 | struct busfreq_data_int *data = container_of(this, |
264 | struct busfreq_data_int, pm_notifier); | 263 | struct busfreq_data_int, pm_notifier); |
265 | struct opp *opp; | 264 | struct dev_pm_opp *opp; |
266 | unsigned long maxfreq = ULONG_MAX; | 265 | unsigned long maxfreq = ULONG_MAX; |
267 | unsigned long freq; | 266 | unsigned long freq; |
268 | unsigned long volt; | 267 | unsigned long volt; |
@@ -276,14 +275,14 @@ static int exynos5_busfreq_int_pm_notifier_event(struct notifier_block *this, | |||
276 | data->disabled = true; | 275 | data->disabled = true; |
277 | 276 | ||
278 | rcu_read_lock(); | 277 | rcu_read_lock(); |
279 | opp = opp_find_freq_floor(data->dev, &maxfreq); | 278 | opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq); |
280 | if (IS_ERR(opp)) { | 279 | if (IS_ERR(opp)) { |
281 | rcu_read_unlock(); | 280 | rcu_read_unlock(); |
282 | err = PTR_ERR(opp); | 281 | err = PTR_ERR(opp); |
283 | goto unlock; | 282 | goto unlock; |
284 | } | 283 | } |
285 | freq = opp_get_freq(opp); | 284 | freq = dev_pm_opp_get_freq(opp); |
286 | volt = opp_get_voltage(opp); | 285 | volt = dev_pm_opp_get_voltage(opp); |
287 | rcu_read_unlock(); | 286 | rcu_read_unlock(); |
288 | 287 | ||
289 | err = exynos5_int_setvolt(data, volt); | 288 | err = exynos5_int_setvolt(data, volt); |
@@ -316,7 +315,7 @@ unlock: | |||
316 | static int exynos5_busfreq_int_probe(struct platform_device *pdev) | 315 | static int exynos5_busfreq_int_probe(struct platform_device *pdev) |
317 | { | 316 | { |
318 | struct busfreq_data_int *data; | 317 | struct busfreq_data_int *data; |
319 | struct opp *opp; | 318 | struct dev_pm_opp *opp; |
320 | struct device *dev = &pdev->dev; | 319 | struct device *dev = &pdev->dev; |
321 | struct device_node *np; | 320 | struct device_node *np; |
322 | unsigned long initial_freq; | 321 | unsigned long initial_freq; |
@@ -351,46 +350,43 @@ static int exynos5_busfreq_int_probe(struct platform_device *pdev) | |||
351 | 350 | ||
352 | err = exynos5250_init_int_tables(data); | 351 | err = exynos5250_init_int_tables(data); |
353 | if (err) | 352 | if (err) |
354 | goto err_regulator; | 353 | return err; |
355 | 354 | ||
356 | data->vdd_int = regulator_get(dev, "vdd_int"); | 355 | data->vdd_int = devm_regulator_get(dev, "vdd_int"); |
357 | if (IS_ERR(data->vdd_int)) { | 356 | if (IS_ERR(data->vdd_int)) { |
358 | dev_err(dev, "Cannot get the regulator \"vdd_int\"\n"); | 357 | dev_err(dev, "Cannot get the regulator \"vdd_int\"\n"); |
359 | err = PTR_ERR(data->vdd_int); | 358 | return PTR_ERR(data->vdd_int); |
360 | goto err_regulator; | ||
361 | } | 359 | } |
362 | 360 | ||
363 | data->int_clk = clk_get(dev, "int_clk"); | 361 | data->int_clk = devm_clk_get(dev, "int_clk"); |
364 | if (IS_ERR(data->int_clk)) { | 362 | if (IS_ERR(data->int_clk)) { |
365 | dev_err(dev, "Cannot get clock \"int_clk\"\n"); | 363 | dev_err(dev, "Cannot get clock \"int_clk\"\n"); |
366 | err = PTR_ERR(data->int_clk); | 364 | return PTR_ERR(data->int_clk); |
367 | goto err_clock; | ||
368 | } | 365 | } |
369 | 366 | ||
370 | rcu_read_lock(); | 367 | rcu_read_lock(); |
371 | opp = opp_find_freq_floor(dev, | 368 | opp = dev_pm_opp_find_freq_floor(dev, |
372 | &exynos5_devfreq_int_profile.initial_freq); | 369 | &exynos5_devfreq_int_profile.initial_freq); |
373 | if (IS_ERR(opp)) { | 370 | if (IS_ERR(opp)) { |
374 | rcu_read_unlock(); | 371 | rcu_read_unlock(); |
375 | dev_err(dev, "Invalid initial frequency %lu kHz.\n", | 372 | dev_err(dev, "Invalid initial frequency %lu kHz.\n", |
376 | exynos5_devfreq_int_profile.initial_freq); | 373 | exynos5_devfreq_int_profile.initial_freq); |
377 | err = PTR_ERR(opp); | 374 | return PTR_ERR(opp); |
378 | goto err_opp_add; | ||
379 | } | 375 | } |
380 | initial_freq = opp_get_freq(opp); | 376 | initial_freq = dev_pm_opp_get_freq(opp); |
381 | initial_volt = opp_get_voltage(opp); | 377 | initial_volt = dev_pm_opp_get_voltage(opp); |
382 | rcu_read_unlock(); | 378 | rcu_read_unlock(); |
383 | data->curr_freq = initial_freq; | 379 | data->curr_freq = initial_freq; |
384 | 380 | ||
385 | err = clk_set_rate(data->int_clk, initial_freq * 1000); | 381 | err = clk_set_rate(data->int_clk, initial_freq * 1000); |
386 | if (err) { | 382 | if (err) { |
387 | dev_err(dev, "Failed to set initial frequency\n"); | 383 | dev_err(dev, "Failed to set initial frequency\n"); |
388 | goto err_opp_add; | 384 | return err; |
389 | } | 385 | } |
390 | 386 | ||
391 | err = exynos5_int_setvolt(data, initial_volt); | 387 | err = exynos5_int_setvolt(data, initial_volt); |
392 | if (err) | 388 | if (err) |
393 | goto err_opp_add; | 389 | return err; |
394 | 390 | ||
395 | platform_set_drvdata(pdev, data); | 391 | platform_set_drvdata(pdev, data); |
396 | 392 | ||
@@ -419,12 +415,6 @@ static int exynos5_busfreq_int_probe(struct platform_device *pdev) | |||
419 | 415 | ||
420 | err_devfreq_add: | 416 | err_devfreq_add: |
421 | devfreq_remove_device(data->devfreq); | 417 | devfreq_remove_device(data->devfreq); |
422 | platform_set_drvdata(pdev, NULL); | ||
423 | err_opp_add: | ||
424 | clk_put(data->int_clk); | ||
425 | err_clock: | ||
426 | regulator_put(data->vdd_int); | ||
427 | err_regulator: | ||
428 | return err; | 418 | return err; |
429 | } | 419 | } |
430 | 420 | ||
@@ -435,9 +425,6 @@ static int exynos5_busfreq_int_remove(struct platform_device *pdev) | |||
435 | pm_qos_remove_request(&data->int_req); | 425 | pm_qos_remove_request(&data->int_req); |
436 | unregister_pm_notifier(&data->pm_notifier); | 426 | unregister_pm_notifier(&data->pm_notifier); |
437 | devfreq_remove_device(data->devfreq); | 427 | devfreq_remove_device(data->devfreq); |
438 | regulator_put(data->vdd_int); | ||
439 | clk_put(data->int_clk); | ||
440 | platform_set_drvdata(pdev, NULL); | ||
441 | 428 | ||
442 | return 0; | 429 | return 0; |
443 | } | 430 | } |
@@ -479,7 +466,7 @@ static int __init exynos5_busfreq_int_init(void) | |||
479 | 466 | ||
480 | exynos5_devfreq_pdev = | 467 | exynos5_devfreq_pdev = |
481 | platform_device_register_simple("exynos5-bus-int", -1, NULL, 0); | 468 | platform_device_register_simple("exynos5-bus-int", -1, NULL, 0); |
482 | if (IS_ERR_OR_NULL(exynos5_devfreq_pdev)) { | 469 | if (IS_ERR(exynos5_devfreq_pdev)) { |
483 | ret = PTR_ERR(exynos5_devfreq_pdev); | 470 | ret = PTR_ERR(exynos5_devfreq_pdev); |
484 | goto out1; | 471 | goto out1; |
485 | } | 472 | } |
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index 57fe1ae32a0d..43959edd4291 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c | |||
@@ -193,16 +193,14 @@ out: | |||
193 | 193 | ||
194 | static bool intel_dsm_pci_probe(struct pci_dev *pdev) | 194 | static bool intel_dsm_pci_probe(struct pci_dev *pdev) |
195 | { | 195 | { |
196 | acpi_handle dhandle, intel_handle; | 196 | acpi_handle dhandle; |
197 | acpi_status status; | ||
198 | int ret; | 197 | int ret; |
199 | 198 | ||
200 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); | 199 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); |
201 | if (!dhandle) | 200 | if (!dhandle) |
202 | return false; | 201 | return false; |
203 | 202 | ||
204 | status = acpi_get_handle(dhandle, "_DSM", &intel_handle); | 203 | if (!acpi_has_method(dhandle, "_DSM")) { |
205 | if (ACPI_FAILURE(status)) { | ||
206 | DRM_DEBUG_KMS("no _DSM method for intel device\n"); | 204 | DRM_DEBUG_KMS("no _DSM method for intel device\n"); |
207 | return false; | 205 | return false; |
208 | } | 206 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index dd7d2e182719..cfbeee607b3a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c | |||
@@ -253,18 +253,15 @@ static struct vga_switcheroo_handler nouveau_dsm_handler = { | |||
253 | 253 | ||
254 | static int nouveau_dsm_pci_probe(struct pci_dev *pdev) | 254 | static int nouveau_dsm_pci_probe(struct pci_dev *pdev) |
255 | { | 255 | { |
256 | acpi_handle dhandle, nvidia_handle; | 256 | acpi_handle dhandle; |
257 | acpi_status status; | ||
258 | int retval = 0; | 257 | int retval = 0; |
259 | 258 | ||
260 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); | 259 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); |
261 | if (!dhandle) | 260 | if (!dhandle) |
262 | return false; | 261 | return false; |
263 | 262 | ||
264 | status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle); | 263 | if (!acpi_has_method(dhandle, "_DSM")) |
265 | if (ACPI_FAILURE(status)) { | ||
266 | return false; | 264 | return false; |
267 | } | ||
268 | 265 | ||
269 | if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER)) | 266 | if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER)) |
270 | retval |= NOUVEAU_DSM_HAS_MUX; | 267 | retval |= NOUVEAU_DSM_HAS_MUX; |
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index c1336193b04b..fd7ce374f812 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c | |||
@@ -854,10 +854,10 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client, | |||
854 | 0xF7, 0xF6, 0xDF, 0x3C, 0x67, 0x42, 0x55, 0x45, | 854 | 0xF7, 0xF6, 0xDF, 0x3C, 0x67, 0x42, 0x55, 0x45, |
855 | 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE, | 855 | 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE, |
856 | }; | 856 | }; |
857 | struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; | 857 | union acpi_object params[4]; |
858 | union acpi_object params[4], *obj; | ||
859 | struct acpi_object_list input; | 858 | struct acpi_object_list input; |
860 | struct acpi_device *adev; | 859 | struct acpi_device *adev; |
860 | unsigned long long value; | ||
861 | acpi_handle handle; | 861 | acpi_handle handle; |
862 | 862 | ||
863 | handle = ACPI_HANDLE(&client->dev); | 863 | handle = ACPI_HANDLE(&client->dev); |
@@ -878,22 +878,14 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client, | |||
878 | params[3].package.count = 0; | 878 | params[3].package.count = 0; |
879 | params[3].package.elements = NULL; | 879 | params[3].package.elements = NULL; |
880 | 880 | ||
881 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DSM", &input, &buf))) { | 881 | if (ACPI_FAILURE(acpi_evaluate_integer(handle, "_DSM", &input, |
882 | &value))) { | ||
882 | dev_err(&client->dev, "device _DSM execution failed\n"); | 883 | dev_err(&client->dev, "device _DSM execution failed\n"); |
883 | return -ENODEV; | 884 | return -ENODEV; |
884 | } | 885 | } |
885 | 886 | ||
886 | obj = (union acpi_object *)buf.pointer; | 887 | pdata->hid_descriptor_address = value; |
887 | if (obj->type != ACPI_TYPE_INTEGER) { | ||
888 | dev_err(&client->dev, "device _DSM returned invalid type: %d\n", | ||
889 | obj->type); | ||
890 | kfree(buf.pointer); | ||
891 | return -EINVAL; | ||
892 | } | ||
893 | |||
894 | pdata->hid_descriptor_address = obj->integer.value; | ||
895 | 888 | ||
896 | kfree(buf.pointer); | ||
897 | return 0; | 889 | return 0; |
898 | } | 890 | } |
899 | 891 | ||
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 3be58f89ac77..75ba8608383e 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -254,10 +254,12 @@ static int i2c_device_probe(struct device *dev) | |||
254 | client->flags & I2C_CLIENT_WAKE); | 254 | client->flags & I2C_CLIENT_WAKE); |
255 | dev_dbg(dev, "probe\n"); | 255 | dev_dbg(dev, "probe\n"); |
256 | 256 | ||
257 | acpi_dev_pm_attach(&client->dev, true); | ||
257 | status = driver->probe(client, i2c_match_id(driver->id_table, client)); | 258 | status = driver->probe(client, i2c_match_id(driver->id_table, client)); |
258 | if (status) { | 259 | if (status) { |
259 | client->driver = NULL; | 260 | client->driver = NULL; |
260 | i2c_set_clientdata(client, NULL); | 261 | i2c_set_clientdata(client, NULL); |
262 | acpi_dev_pm_detach(&client->dev, true); | ||
261 | } | 263 | } |
262 | return status; | 264 | return status; |
263 | } | 265 | } |
@@ -283,6 +285,7 @@ static int i2c_device_remove(struct device *dev) | |||
283 | client->driver = NULL; | 285 | client->driver = NULL; |
284 | i2c_set_clientdata(client, NULL); | 286 | i2c_set_clientdata(client, NULL); |
285 | } | 287 | } |
288 | acpi_dev_pm_detach(&client->dev, true); | ||
286 | return status; | 289 | return status; |
287 | } | 290 | } |
288 | 291 | ||
@@ -1111,8 +1114,10 @@ static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level, | |||
1111 | if (ret < 0 || !info.addr) | 1114 | if (ret < 0 || !info.addr) |
1112 | return AE_OK; | 1115 | return AE_OK; |
1113 | 1116 | ||
1117 | adev->power.flags.ignore_parent = true; | ||
1114 | strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type)); | 1118 | strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type)); |
1115 | if (!i2c_new_device(adapter, &info)) { | 1119 | if (!i2c_new_device(adapter, &info)) { |
1120 | adev->power.flags.ignore_parent = false; | ||
1116 | dev_err(&adapter->dev, | 1121 | dev_err(&adapter->dev, |
1117 | "failed to add I2C device %s from ACPI\n", | 1122 | "failed to add I2C device %s from ACPI\n", |
1118 | dev_name(&adev->dev)); | 1123 | dev_name(&adev->dev)); |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index f116d664b473..3226ce98fb18 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
@@ -123,7 +123,7 @@ static struct cpuidle_state *cpuidle_state_table; | |||
123 | * which is also the index into the MWAIT hint array. | 123 | * which is also the index into the MWAIT hint array. |
124 | * Thus C0 is a dummy. | 124 | * Thus C0 is a dummy. |
125 | */ | 125 | */ |
126 | static struct cpuidle_state nehalem_cstates[CPUIDLE_STATE_MAX] = { | 126 | static struct cpuidle_state nehalem_cstates[] __initdata = { |
127 | { | 127 | { |
128 | .name = "C1-NHM", | 128 | .name = "C1-NHM", |
129 | .desc = "MWAIT 0x00", | 129 | .desc = "MWAIT 0x00", |
@@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[CPUIDLE_STATE_MAX] = { | |||
156 | .enter = NULL } | 156 | .enter = NULL } |
157 | }; | 157 | }; |
158 | 158 | ||
159 | static struct cpuidle_state snb_cstates[CPUIDLE_STATE_MAX] = { | 159 | static struct cpuidle_state snb_cstates[] __initdata = { |
160 | { | 160 | { |
161 | .name = "C1-SNB", | 161 | .name = "C1-SNB", |
162 | .desc = "MWAIT 0x00", | 162 | .desc = "MWAIT 0x00", |
@@ -196,7 +196,7 @@ static struct cpuidle_state snb_cstates[CPUIDLE_STATE_MAX] = { | |||
196 | .enter = NULL } | 196 | .enter = NULL } |
197 | }; | 197 | }; |
198 | 198 | ||
199 | static struct cpuidle_state ivb_cstates[CPUIDLE_STATE_MAX] = { | 199 | static struct cpuidle_state ivb_cstates[] __initdata = { |
200 | { | 200 | { |
201 | .name = "C1-IVB", | 201 | .name = "C1-IVB", |
202 | .desc = "MWAIT 0x00", | 202 | .desc = "MWAIT 0x00", |
@@ -236,7 +236,7 @@ static struct cpuidle_state ivb_cstates[CPUIDLE_STATE_MAX] = { | |||
236 | .enter = NULL } | 236 | .enter = NULL } |
237 | }; | 237 | }; |
238 | 238 | ||
239 | static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = { | 239 | static struct cpuidle_state hsw_cstates[] __initdata = { |
240 | { | 240 | { |
241 | .name = "C1-HSW", | 241 | .name = "C1-HSW", |
242 | .desc = "MWAIT 0x00", | 242 | .desc = "MWAIT 0x00", |
@@ -297,7 +297,7 @@ static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = { | |||
297 | .enter = NULL } | 297 | .enter = NULL } |
298 | }; | 298 | }; |
299 | 299 | ||
300 | static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = { | 300 | static struct cpuidle_state atom_cstates[] __initdata = { |
301 | { | 301 | { |
302 | .name = "C1E-ATM", | 302 | .name = "C1E-ATM", |
303 | .desc = "MWAIT 0x00", | 303 | .desc = "MWAIT 0x00", |
@@ -390,7 +390,7 @@ static int cpu_hotplug_notify(struct notifier_block *n, | |||
390 | int hotcpu = (unsigned long)hcpu; | 390 | int hotcpu = (unsigned long)hcpu; |
391 | struct cpuidle_device *dev; | 391 | struct cpuidle_device *dev; |
392 | 392 | ||
393 | switch (action & 0xf) { | 393 | switch (action & ~CPU_TASKS_FROZEN) { |
394 | case CPU_ONLINE: | 394 | case CPU_ONLINE: |
395 | 395 | ||
396 | if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) | 396 | if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) |
@@ -490,7 +490,7 @@ MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); | |||
490 | /* | 490 | /* |
491 | * intel_idle_probe() | 491 | * intel_idle_probe() |
492 | */ | 492 | */ |
493 | static int intel_idle_probe(void) | 493 | static int __init intel_idle_probe(void) |
494 | { | 494 | { |
495 | unsigned int eax, ebx, ecx; | 495 | unsigned int eax, ebx, ecx; |
496 | const struct x86_cpu_id *id; | 496 | const struct x86_cpu_id *id; |
@@ -558,7 +558,7 @@ static void intel_idle_cpuidle_devices_uninit(void) | |||
558 | * intel_idle_cpuidle_driver_init() | 558 | * intel_idle_cpuidle_driver_init() |
559 | * allocate, initialize cpuidle_states | 559 | * allocate, initialize cpuidle_states |
560 | */ | 560 | */ |
561 | static int intel_idle_cpuidle_driver_init(void) | 561 | static int __init intel_idle_cpuidle_driver_init(void) |
562 | { | 562 | { |
563 | int cstate; | 563 | int cstate; |
564 | struct cpuidle_driver *drv = &intel_idle_driver; | 564 | struct cpuidle_driver *drv = &intel_idle_driver; |
@@ -628,7 +628,7 @@ static int intel_idle_cpu_init(int cpu) | |||
628 | int num_substates, mwait_hint, mwait_cstate, mwait_substate; | 628 | int num_substates, mwait_hint, mwait_cstate, mwait_substate; |
629 | 629 | ||
630 | if (cpuidle_state_table[cstate].enter == NULL) | 630 | if (cpuidle_state_table[cstate].enter == NULL) |
631 | continue; | 631 | break; |
632 | 632 | ||
633 | if (cstate + 1 > max_cstate) { | 633 | if (cstate + 1 > max_cstate) { |
634 | printk(PREFIX "max_cstate %d reached\n", max_cstate); | 634 | printk(PREFIX "max_cstate %d reached\n", max_cstate); |
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 785675a56a10..900946950230 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
@@ -88,7 +88,7 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, | |||
88 | pr_warn("Device scope bus [%d] not found\n", scope->bus); | 88 | pr_warn("Device scope bus [%d] not found\n", scope->bus); |
89 | break; | 89 | break; |
90 | } | 90 | } |
91 | pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn)); | 91 | pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function)); |
92 | if (!pdev) { | 92 | if (!pdev) { |
93 | /* warning will be printed below */ | 93 | /* warning will be printed below */ |
94 | break; | 94 | break; |
@@ -99,7 +99,7 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, | |||
99 | } | 99 | } |
100 | if (!pdev) { | 100 | if (!pdev) { |
101 | pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n", | 101 | pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n", |
102 | segment, scope->bus, path->dev, path->fn); | 102 | segment, scope->bus, path->device, path->function); |
103 | *dev = NULL; | 103 | *dev = NULL; |
104 | return 0; | 104 | return 0; |
105 | } | 105 | } |
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index f71673dbb23d..ab86902fd9ff 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c | |||
@@ -686,12 +686,12 @@ static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, | |||
686 | * Access PCI directly due to the PCI | 686 | * Access PCI directly due to the PCI |
687 | * subsystem isn't initialized yet. | 687 | * subsystem isn't initialized yet. |
688 | */ | 688 | */ |
689 | bus = read_pci_config_byte(bus, path->dev, path->fn, | 689 | bus = read_pci_config_byte(bus, path->device, path->function, |
690 | PCI_SECONDARY_BUS); | 690 | PCI_SECONDARY_BUS); |
691 | path++; | 691 | path++; |
692 | } | 692 | } |
693 | ir_hpet[ir_hpet_num].bus = bus; | 693 | ir_hpet[ir_hpet_num].bus = bus; |
694 | ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn); | 694 | ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function); |
695 | ir_hpet[ir_hpet_num].iommu = iommu; | 695 | ir_hpet[ir_hpet_num].iommu = iommu; |
696 | ir_hpet[ir_hpet_num].id = scope->enumeration_id; | 696 | ir_hpet[ir_hpet_num].id = scope->enumeration_id; |
697 | ir_hpet_num++; | 697 | ir_hpet_num++; |
@@ -714,13 +714,13 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, | |||
714 | * Access PCI directly due to the PCI | 714 | * Access PCI directly due to the PCI |
715 | * subsystem isn't initialized yet. | 715 | * subsystem isn't initialized yet. |
716 | */ | 716 | */ |
717 | bus = read_pci_config_byte(bus, path->dev, path->fn, | 717 | bus = read_pci_config_byte(bus, path->device, path->function, |
718 | PCI_SECONDARY_BUS); | 718 | PCI_SECONDARY_BUS); |
719 | path++; | 719 | path++; |
720 | } | 720 | } |
721 | 721 | ||
722 | ir_ioapic[ir_ioapic_num].bus = bus; | 722 | ir_ioapic[ir_ioapic_num].bus = bus; |
723 | ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn); | 723 | ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function); |
724 | ir_ioapic[ir_ioapic_num].iommu = iommu; | 724 | ir_ioapic[ir_ioapic_num].iommu = iommu; |
725 | ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; | 725 | ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; |
726 | ir_ioapic_num++; | 726 | ir_ioapic_num++; |
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index 2a47e82821da..5440131cd4ee 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c | |||
@@ -411,13 +411,10 @@ EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware); | |||
411 | static int pcihp_is_ejectable(acpi_handle handle) | 411 | static int pcihp_is_ejectable(acpi_handle handle) |
412 | { | 412 | { |
413 | acpi_status status; | 413 | acpi_status status; |
414 | acpi_handle tmp; | ||
415 | unsigned long long removable; | 414 | unsigned long long removable; |
416 | status = acpi_get_handle(handle, "_ADR", &tmp); | 415 | if (!acpi_has_method(handle, "_ADR")) |
417 | if (ACPI_FAILURE(status)) | ||
418 | return 0; | 416 | return 0; |
419 | status = acpi_get_handle(handle, "_EJ0", &tmp); | 417 | if (acpi_has_method(handle, "_EJ0")) |
420 | if (ACPI_SUCCESS(status)) | ||
421 | return 1; | 418 | return 1; |
422 | status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable); | 419 | status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable); |
423 | if (ACPI_SUCCESS(status) && removable) | 420 | if (ACPI_SUCCESS(status) && removable) |
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 1ea75236a15f..4a0a9ac7a1e5 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -871,21 +871,17 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data) | |||
871 | put_bridge(bridge); | 871 | put_bridge(bridge); |
872 | } | 872 | } |
873 | 873 | ||
874 | static void hotplug_event_work(struct work_struct *work) | 874 | static void hotplug_event_work(void *data, u32 type) |
875 | { | 875 | { |
876 | struct acpiphp_context *context; | 876 | struct acpiphp_context *context = data; |
877 | struct acpi_hp_work *hp_work; | 877 | acpi_handle handle = context->handle; |
878 | 878 | ||
879 | hp_work = container_of(work, struct acpi_hp_work, work); | ||
880 | context = hp_work->context; | ||
881 | acpi_scan_lock_acquire(); | 879 | acpi_scan_lock_acquire(); |
882 | 880 | ||
883 | hotplug_event(hp_work->handle, hp_work->type, context); | 881 | hotplug_event(handle, type, context); |
884 | 882 | ||
885 | acpi_scan_lock_release(); | 883 | acpi_scan_lock_release(); |
886 | acpi_evaluate_hotplug_ost(hp_work->handle, hp_work->type, | 884 | acpi_evaluate_hotplug_ost(handle, type, ACPI_OST_SC_SUCCESS, NULL); |
887 | ACPI_OST_SC_SUCCESS, NULL); | ||
888 | kfree(hp_work); /* allocated in handle_hotplug_event() */ | ||
889 | put_bridge(context->func.parent); | 885 | put_bridge(context->func.parent); |
890 | } | 886 | } |
891 | 887 | ||
@@ -936,10 +932,10 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data) | |||
936 | 932 | ||
937 | mutex_lock(&acpiphp_context_lock); | 933 | mutex_lock(&acpiphp_context_lock); |
938 | context = acpiphp_get_context(handle); | 934 | context = acpiphp_get_context(handle); |
939 | if (context) { | 935 | if (context && !WARN_ON(context->handle != handle)) { |
940 | get_bridge(context->func.parent); | 936 | get_bridge(context->func.parent); |
941 | acpiphp_put_context(context); | 937 | acpiphp_put_context(context); |
942 | alloc_acpi_hp_work(handle, type, context, hotplug_event_work); | 938 | acpi_hotplug_execute(hotplug_event_work, context, type); |
943 | mutex_unlock(&acpiphp_context_lock); | 939 | mutex_unlock(&acpiphp_context_lock); |
944 | return; | 940 | return; |
945 | } | 941 | } |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index b0299e6d9a3f..dfd1f59de729 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -181,7 +181,6 @@ static bool acpi_pci_power_manageable(struct pci_dev *dev) | |||
181 | static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) | 181 | static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) |
182 | { | 182 | { |
183 | acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); | 183 | acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); |
184 | acpi_handle tmp; | ||
185 | static const u8 state_conv[] = { | 184 | static const u8 state_conv[] = { |
186 | [PCI_D0] = ACPI_STATE_D0, | 185 | [PCI_D0] = ACPI_STATE_D0, |
187 | [PCI_D1] = ACPI_STATE_D1, | 186 | [PCI_D1] = ACPI_STATE_D1, |
@@ -192,7 +191,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
192 | int error = -EINVAL; | 191 | int error = -EINVAL; |
193 | 192 | ||
194 | /* If the ACPI device has _EJ0, ignore the device */ | 193 | /* If the ACPI device has _EJ0, ignore the device */ |
195 | if (!handle || ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp))) | 194 | if (!handle || acpi_has_method(handle, "_EJ0")) |
196 | return -ENODEV; | 195 | return -ENODEV; |
197 | 196 | ||
198 | switch (state) { | 197 | switch (state) { |
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index a6afd4108beb..aefcc32e5634 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c | |||
@@ -190,16 +190,10 @@ struct eeepc_laptop { | |||
190 | */ | 190 | */ |
191 | static int write_acpi_int(acpi_handle handle, const char *method, int val) | 191 | static int write_acpi_int(acpi_handle handle, const char *method, int val) |
192 | { | 192 | { |
193 | struct acpi_object_list params; | ||
194 | union acpi_object in_obj; | ||
195 | acpi_status status; | 193 | acpi_status status; |
196 | 194 | ||
197 | params.count = 1; | 195 | status = acpi_execute_simple_method(handle, (char *)method, val); |
198 | params.pointer = &in_obj; | ||
199 | in_obj.type = ACPI_TYPE_INTEGER; | ||
200 | in_obj.integer.value = val; | ||
201 | 196 | ||
202 | status = acpi_evaluate_object(handle, (char *)method, ¶ms, NULL); | ||
203 | return (status == AE_OK ? 0 : -1); | 197 | return (status == AE_OK ? 0 : -1); |
204 | } | 198 | } |
205 | 199 | ||
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 52b8a97efde1..9d30d69aa78f 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c | |||
@@ -219,8 +219,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2) | |||
219 | { .type = ACPI_TYPE_INTEGER } | 219 | { .type = ACPI_TYPE_INTEGER } |
220 | }; | 220 | }; |
221 | struct acpi_object_list arg_list = { 4, ¶ms[0] }; | 221 | struct acpi_object_list arg_list = { 4, ¶ms[0] }; |
222 | struct acpi_buffer output; | 222 | unsigned long long value; |
223 | union acpi_object out_obj; | ||
224 | acpi_handle handle = NULL; | 223 | acpi_handle handle = NULL; |
225 | 224 | ||
226 | status = acpi_get_handle(fujitsu_hotkey->acpi_handle, "FUNC", &handle); | 225 | status = acpi_get_handle(fujitsu_hotkey->acpi_handle, "FUNC", &handle); |
@@ -235,10 +234,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2) | |||
235 | params[2].integer.value = arg1; | 234 | params[2].integer.value = arg1; |
236 | params[3].integer.value = arg2; | 235 | params[3].integer.value = arg2; |
237 | 236 | ||
238 | output.length = sizeof(out_obj); | 237 | status = acpi_evaluate_integer(handle, NULL, &arg_list, &value); |
239 | output.pointer = &out_obj; | ||
240 | |||
241 | status = acpi_evaluate_object(handle, NULL, &arg_list, &output); | ||
242 | if (ACPI_FAILURE(status)) { | 238 | if (ACPI_FAILURE(status)) { |
243 | vdbg_printk(FUJLAPTOP_DBG_WARN, | 239 | vdbg_printk(FUJLAPTOP_DBG_WARN, |
244 | "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) call failed\n", | 240 | "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) call failed\n", |
@@ -246,18 +242,10 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2) | |||
246 | return -ENODEV; | 242 | return -ENODEV; |
247 | } | 243 | } |
248 | 244 | ||
249 | if (out_obj.type != ACPI_TYPE_INTEGER) { | ||
250 | vdbg_printk(FUJLAPTOP_DBG_WARN, | ||
251 | "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) did not " | ||
252 | "return an integer\n", | ||
253 | cmd, arg0, arg1, arg2); | ||
254 | return -ENODEV; | ||
255 | } | ||
256 | |||
257 | vdbg_printk(FUJLAPTOP_DBG_TRACE, | 245 | vdbg_printk(FUJLAPTOP_DBG_TRACE, |
258 | "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) returned 0x%x\n", | 246 | "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) returned 0x%x\n", |
259 | cmd, arg0, arg1, arg2, (int)out_obj.integer.value); | 247 | cmd, arg0, arg1, arg2, (int)value); |
260 | return out_obj.integer.value; | 248 | return value; |
261 | } | 249 | } |
262 | 250 | ||
263 | #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) | 251 | #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) |
@@ -317,8 +305,6 @@ static enum led_brightness kblamps_get(struct led_classdev *cdev) | |||
317 | static int set_lcd_level(int level) | 305 | static int set_lcd_level(int level) |
318 | { | 306 | { |
319 | acpi_status status = AE_OK; | 307 | acpi_status status = AE_OK; |
320 | union acpi_object arg0 = { ACPI_TYPE_INTEGER }; | ||
321 | struct acpi_object_list arg_list = { 1, &arg0 }; | ||
322 | acpi_handle handle = NULL; | 308 | acpi_handle handle = NULL; |
323 | 309 | ||
324 | vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n", | 310 | vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n", |
@@ -333,9 +319,8 @@ static int set_lcd_level(int level) | |||
333 | return -ENODEV; | 319 | return -ENODEV; |
334 | } | 320 | } |
335 | 321 | ||
336 | arg0.integer.value = level; | ||
337 | 322 | ||
338 | status = acpi_evaluate_object(handle, NULL, &arg_list, NULL); | 323 | status = acpi_execute_simple_method(handle, NULL, level); |
339 | if (ACPI_FAILURE(status)) | 324 | if (ACPI_FAILURE(status)) |
340 | return -ENODEV; | 325 | return -ENODEV; |
341 | 326 | ||
@@ -345,8 +330,6 @@ static int set_lcd_level(int level) | |||
345 | static int set_lcd_level_alt(int level) | 330 | static int set_lcd_level_alt(int level) |
346 | { | 331 | { |
347 | acpi_status status = AE_OK; | 332 | acpi_status status = AE_OK; |
348 | union acpi_object arg0 = { ACPI_TYPE_INTEGER }; | ||
349 | struct acpi_object_list arg_list = { 1, &arg0 }; | ||
350 | acpi_handle handle = NULL; | 333 | acpi_handle handle = NULL; |
351 | 334 | ||
352 | vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n", | 335 | vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n", |
@@ -361,9 +344,7 @@ static int set_lcd_level_alt(int level) | |||
361 | return -ENODEV; | 344 | return -ENODEV; |
362 | } | 345 | } |
363 | 346 | ||
364 | arg0.integer.value = level; | 347 | status = acpi_execute_simple_method(handle, NULL, level); |
365 | |||
366 | status = acpi_evaluate_object(handle, NULL, &arg_list, NULL); | ||
367 | if (ACPI_FAILURE(status)) | 348 | if (ACPI_FAILURE(status)) |
368 | return -ENODEV; | 349 | return -ENODEV; |
369 | 350 | ||
@@ -586,11 +567,10 @@ static struct platform_driver fujitsupf_driver = { | |||
586 | 567 | ||
587 | static void dmi_check_cb_common(const struct dmi_system_id *id) | 568 | static void dmi_check_cb_common(const struct dmi_system_id *id) |
588 | { | 569 | { |
589 | acpi_handle handle; | ||
590 | pr_info("Identified laptop model '%s'\n", id->ident); | 570 | pr_info("Identified laptop model '%s'\n", id->ident); |
591 | if (use_alt_lcd_levels == -1) { | 571 | if (use_alt_lcd_levels == -1) { |
592 | if (ACPI_SUCCESS(acpi_get_handle(NULL, | 572 | if (acpi_has_method(NULL, |
593 | "\\_SB.PCI0.LPCB.FJEX.SBL2", &handle))) | 573 | "\\_SB.PCI0.LPCB.FJEX.SBL2")) |
594 | use_alt_lcd_levels = 1; | 574 | use_alt_lcd_levels = 1; |
595 | else | 575 | else |
596 | use_alt_lcd_levels = 0; | 576 | use_alt_lcd_levels = 0; |
@@ -653,7 +633,6 @@ static struct dmi_system_id fujitsu_dmi_table[] = { | |||
653 | 633 | ||
654 | static int acpi_fujitsu_add(struct acpi_device *device) | 634 | static int acpi_fujitsu_add(struct acpi_device *device) |
655 | { | 635 | { |
656 | acpi_handle handle; | ||
657 | int result = 0; | 636 | int result = 0; |
658 | int state = 0; | 637 | int state = 0; |
659 | struct input_dev *input; | 638 | struct input_dev *input; |
@@ -702,8 +681,7 @@ static int acpi_fujitsu_add(struct acpi_device *device) | |||
702 | 681 | ||
703 | fujitsu->dev = device; | 682 | fujitsu->dev = device; |
704 | 683 | ||
705 | if (ACPI_SUCCESS | 684 | if (acpi_has_method(device->handle, METHOD_NAME__INI)) { |
706 | (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) { | ||
707 | vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n"); | 685 | vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n"); |
708 | if (ACPI_FAILURE | 686 | if (ACPI_FAILURE |
709 | (acpi_evaluate_object | 687 | (acpi_evaluate_object |
@@ -803,7 +781,6 @@ static void acpi_fujitsu_notify(struct acpi_device *device, u32 event) | |||
803 | 781 | ||
804 | static int acpi_fujitsu_hotkey_add(struct acpi_device *device) | 782 | static int acpi_fujitsu_hotkey_add(struct acpi_device *device) |
805 | { | 783 | { |
806 | acpi_handle handle; | ||
807 | int result = 0; | 784 | int result = 0; |
808 | int state = 0; | 785 | int state = 0; |
809 | struct input_dev *input; | 786 | struct input_dev *input; |
@@ -866,8 +843,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) | |||
866 | 843 | ||
867 | fujitsu_hotkey->dev = device; | 844 | fujitsu_hotkey->dev = device; |
868 | 845 | ||
869 | if (ACPI_SUCCESS | 846 | if (acpi_has_method(device->handle, METHOD_NAME__INI)) { |
870 | (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) { | ||
871 | vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n"); | 847 | vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n"); |
872 | if (ACPI_FAILURE | 848 | if (ACPI_FAILURE |
873 | (acpi_evaluate_object | 849 | (acpi_evaluate_object |
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 89c4519d48ac..6788acc22ab9 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c | |||
@@ -72,8 +72,15 @@ enum { | |||
72 | VPCCMD_W_BL_POWER = 0x33, | 72 | VPCCMD_W_BL_POWER = 0x33, |
73 | }; | 73 | }; |
74 | 74 | ||
75 | struct ideapad_rfk_priv { | ||
76 | int dev; | ||
77 | struct ideapad_private *priv; | ||
78 | }; | ||
79 | |||
75 | struct ideapad_private { | 80 | struct ideapad_private { |
81 | struct acpi_device *adev; | ||
76 | struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM]; | 82 | struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM]; |
83 | struct ideapad_rfk_priv rfk_priv[IDEAPAD_RFKILL_DEV_NUM]; | ||
77 | struct platform_device *platform_device; | 84 | struct platform_device *platform_device; |
78 | struct input_dev *inputdev; | 85 | struct input_dev *inputdev; |
79 | struct backlight_device *blightdev; | 86 | struct backlight_device *blightdev; |
@@ -81,8 +88,6 @@ struct ideapad_private { | |||
81 | unsigned long cfg; | 88 | unsigned long cfg; |
82 | }; | 89 | }; |
83 | 90 | ||
84 | static acpi_handle ideapad_handle; | ||
85 | static struct ideapad_private *ideapad_priv; | ||
86 | static bool no_bt_rfkill; | 91 | static bool no_bt_rfkill; |
87 | module_param(no_bt_rfkill, bool, 0444); | 92 | module_param(no_bt_rfkill, bool, 0444); |
88 | MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth."); | 93 | MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth."); |
@@ -200,34 +205,38 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data) | |||
200 | */ | 205 | */ |
201 | static int debugfs_status_show(struct seq_file *s, void *data) | 206 | static int debugfs_status_show(struct seq_file *s, void *data) |
202 | { | 207 | { |
208 | struct ideapad_private *priv = s->private; | ||
203 | unsigned long value; | 209 | unsigned long value; |
204 | 210 | ||
205 | if (!read_ec_data(ideapad_handle, VPCCMD_R_BL_MAX, &value)) | 211 | if (!priv) |
212 | return -EINVAL; | ||
213 | |||
214 | if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &value)) | ||
206 | seq_printf(s, "Backlight max:\t%lu\n", value); | 215 | seq_printf(s, "Backlight max:\t%lu\n", value); |
207 | if (!read_ec_data(ideapad_handle, VPCCMD_R_BL, &value)) | 216 | if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL, &value)) |
208 | seq_printf(s, "Backlight now:\t%lu\n", value); | 217 | seq_printf(s, "Backlight now:\t%lu\n", value); |
209 | if (!read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &value)) | 218 | if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &value)) |
210 | seq_printf(s, "BL power value:\t%s\n", value ? "On" : "Off"); | 219 | seq_printf(s, "BL power value:\t%s\n", value ? "On" : "Off"); |
211 | seq_printf(s, "=====================\n"); | 220 | seq_printf(s, "=====================\n"); |
212 | 221 | ||
213 | if (!read_ec_data(ideapad_handle, VPCCMD_R_RF, &value)) | 222 | if (!read_ec_data(priv->adev->handle, VPCCMD_R_RF, &value)) |
214 | seq_printf(s, "Radio status:\t%s(%lu)\n", | 223 | seq_printf(s, "Radio status:\t%s(%lu)\n", |
215 | value ? "On" : "Off", value); | 224 | value ? "On" : "Off", value); |
216 | if (!read_ec_data(ideapad_handle, VPCCMD_R_WIFI, &value)) | 225 | if (!read_ec_data(priv->adev->handle, VPCCMD_R_WIFI, &value)) |
217 | seq_printf(s, "Wifi status:\t%s(%lu)\n", | 226 | seq_printf(s, "Wifi status:\t%s(%lu)\n", |
218 | value ? "On" : "Off", value); | 227 | value ? "On" : "Off", value); |
219 | if (!read_ec_data(ideapad_handle, VPCCMD_R_BT, &value)) | 228 | if (!read_ec_data(priv->adev->handle, VPCCMD_R_BT, &value)) |
220 | seq_printf(s, "BT status:\t%s(%lu)\n", | 229 | seq_printf(s, "BT status:\t%s(%lu)\n", |
221 | value ? "On" : "Off", value); | 230 | value ? "On" : "Off", value); |
222 | if (!read_ec_data(ideapad_handle, VPCCMD_R_3G, &value)) | 231 | if (!read_ec_data(priv->adev->handle, VPCCMD_R_3G, &value)) |
223 | seq_printf(s, "3G status:\t%s(%lu)\n", | 232 | seq_printf(s, "3G status:\t%s(%lu)\n", |
224 | value ? "On" : "Off", value); | 233 | value ? "On" : "Off", value); |
225 | seq_printf(s, "=====================\n"); | 234 | seq_printf(s, "=====================\n"); |
226 | 235 | ||
227 | if (!read_ec_data(ideapad_handle, VPCCMD_R_TOUCHPAD, &value)) | 236 | if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) |
228 | seq_printf(s, "Touchpad status:%s(%lu)\n", | 237 | seq_printf(s, "Touchpad status:%s(%lu)\n", |
229 | value ? "On" : "Off", value); | 238 | value ? "On" : "Off", value); |
230 | if (!read_ec_data(ideapad_handle, VPCCMD_R_CAMERA, &value)) | 239 | if (!read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &value)) |
231 | seq_printf(s, "Camera status:\t%s(%lu)\n", | 240 | seq_printf(s, "Camera status:\t%s(%lu)\n", |
232 | value ? "On" : "Off", value); | 241 | value ? "On" : "Off", value); |
233 | 242 | ||
@@ -236,7 +245,7 @@ static int debugfs_status_show(struct seq_file *s, void *data) | |||
236 | 245 | ||
237 | static int debugfs_status_open(struct inode *inode, struct file *file) | 246 | static int debugfs_status_open(struct inode *inode, struct file *file) |
238 | { | 247 | { |
239 | return single_open(file, debugfs_status_show, NULL); | 248 | return single_open(file, debugfs_status_show, inode->i_private); |
240 | } | 249 | } |
241 | 250 | ||
242 | static const struct file_operations debugfs_status_fops = { | 251 | static const struct file_operations debugfs_status_fops = { |
@@ -249,21 +258,23 @@ static const struct file_operations debugfs_status_fops = { | |||
249 | 258 | ||
250 | static int debugfs_cfg_show(struct seq_file *s, void *data) | 259 | static int debugfs_cfg_show(struct seq_file *s, void *data) |
251 | { | 260 | { |
252 | if (!ideapad_priv) { | 261 | struct ideapad_private *priv = s->private; |
262 | |||
263 | if (!priv) { | ||
253 | seq_printf(s, "cfg: N/A\n"); | 264 | seq_printf(s, "cfg: N/A\n"); |
254 | } else { | 265 | } else { |
255 | seq_printf(s, "cfg: 0x%.8lX\n\nCapability: ", | 266 | seq_printf(s, "cfg: 0x%.8lX\n\nCapability: ", |
256 | ideapad_priv->cfg); | 267 | priv->cfg); |
257 | if (test_bit(CFG_BT_BIT, &ideapad_priv->cfg)) | 268 | if (test_bit(CFG_BT_BIT, &priv->cfg)) |
258 | seq_printf(s, "Bluetooth "); | 269 | seq_printf(s, "Bluetooth "); |
259 | if (test_bit(CFG_3G_BIT, &ideapad_priv->cfg)) | 270 | if (test_bit(CFG_3G_BIT, &priv->cfg)) |
260 | seq_printf(s, "3G "); | 271 | seq_printf(s, "3G "); |
261 | if (test_bit(CFG_WIFI_BIT, &ideapad_priv->cfg)) | 272 | if (test_bit(CFG_WIFI_BIT, &priv->cfg)) |
262 | seq_printf(s, "Wireless "); | 273 | seq_printf(s, "Wireless "); |
263 | if (test_bit(CFG_CAMERA_BIT, &ideapad_priv->cfg)) | 274 | if (test_bit(CFG_CAMERA_BIT, &priv->cfg)) |
264 | seq_printf(s, "Camera "); | 275 | seq_printf(s, "Camera "); |
265 | seq_printf(s, "\nGraphic: "); | 276 | seq_printf(s, "\nGraphic: "); |
266 | switch ((ideapad_priv->cfg)&0x700) { | 277 | switch ((priv->cfg)&0x700) { |
267 | case 0x100: | 278 | case 0x100: |
268 | seq_printf(s, "Intel"); | 279 | seq_printf(s, "Intel"); |
269 | break; | 280 | break; |
@@ -287,7 +298,7 @@ static int debugfs_cfg_show(struct seq_file *s, void *data) | |||
287 | 298 | ||
288 | static int debugfs_cfg_open(struct inode *inode, struct file *file) | 299 | static int debugfs_cfg_open(struct inode *inode, struct file *file) |
289 | { | 300 | { |
290 | return single_open(file, debugfs_cfg_show, NULL); | 301 | return single_open(file, debugfs_cfg_show, inode->i_private); |
291 | } | 302 | } |
292 | 303 | ||
293 | static const struct file_operations debugfs_cfg_fops = { | 304 | static const struct file_operations debugfs_cfg_fops = { |
@@ -308,14 +319,14 @@ static int ideapad_debugfs_init(struct ideapad_private *priv) | |||
308 | goto errout; | 319 | goto errout; |
309 | } | 320 | } |
310 | 321 | ||
311 | node = debugfs_create_file("cfg", S_IRUGO, priv->debug, NULL, | 322 | node = debugfs_create_file("cfg", S_IRUGO, priv->debug, priv, |
312 | &debugfs_cfg_fops); | 323 | &debugfs_cfg_fops); |
313 | if (!node) { | 324 | if (!node) { |
314 | pr_err("failed to create cfg in debugfs"); | 325 | pr_err("failed to create cfg in debugfs"); |
315 | goto errout; | 326 | goto errout; |
316 | } | 327 | } |
317 | 328 | ||
318 | node = debugfs_create_file("status", S_IRUGO, priv->debug, NULL, | 329 | node = debugfs_create_file("status", S_IRUGO, priv->debug, priv, |
319 | &debugfs_status_fops); | 330 | &debugfs_status_fops); |
320 | if (!node) { | 331 | if (!node) { |
321 | pr_err("failed to create status in debugfs"); | 332 | pr_err("failed to create status in debugfs"); |
@@ -342,8 +353,9 @@ static ssize_t show_ideapad_cam(struct device *dev, | |||
342 | char *buf) | 353 | char *buf) |
343 | { | 354 | { |
344 | unsigned long result; | 355 | unsigned long result; |
356 | struct ideapad_private *priv = dev_get_drvdata(dev); | ||
345 | 357 | ||
346 | if (read_ec_data(ideapad_handle, VPCCMD_R_CAMERA, &result)) | 358 | if (read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result)) |
347 | return sprintf(buf, "-1\n"); | 359 | return sprintf(buf, "-1\n"); |
348 | return sprintf(buf, "%lu\n", result); | 360 | return sprintf(buf, "%lu\n", result); |
349 | } | 361 | } |
@@ -353,12 +365,13 @@ static ssize_t store_ideapad_cam(struct device *dev, | |||
353 | const char *buf, size_t count) | 365 | const char *buf, size_t count) |
354 | { | 366 | { |
355 | int ret, state; | 367 | int ret, state; |
368 | struct ideapad_private *priv = dev_get_drvdata(dev); | ||
356 | 369 | ||
357 | if (!count) | 370 | if (!count) |
358 | return 0; | 371 | return 0; |
359 | if (sscanf(buf, "%i", &state) != 1) | 372 | if (sscanf(buf, "%i", &state) != 1) |
360 | return -EINVAL; | 373 | return -EINVAL; |
361 | ret = write_ec_cmd(ideapad_handle, VPCCMD_W_CAMERA, state); | 374 | ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state); |
362 | if (ret < 0) | 375 | if (ret < 0) |
363 | return -EIO; | 376 | return -EIO; |
364 | return count; | 377 | return count; |
@@ -371,8 +384,9 @@ static ssize_t show_ideapad_fan(struct device *dev, | |||
371 | char *buf) | 384 | char *buf) |
372 | { | 385 | { |
373 | unsigned long result; | 386 | unsigned long result; |
387 | struct ideapad_private *priv = dev_get_drvdata(dev); | ||
374 | 388 | ||
375 | if (read_ec_data(ideapad_handle, VPCCMD_R_FAN, &result)) | 389 | if (read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result)) |
376 | return sprintf(buf, "-1\n"); | 390 | return sprintf(buf, "-1\n"); |
377 | return sprintf(buf, "%lu\n", result); | 391 | return sprintf(buf, "%lu\n", result); |
378 | } | 392 | } |
@@ -382,6 +396,7 @@ static ssize_t store_ideapad_fan(struct device *dev, | |||
382 | const char *buf, size_t count) | 396 | const char *buf, size_t count) |
383 | { | 397 | { |
384 | int ret, state; | 398 | int ret, state; |
399 | struct ideapad_private *priv = dev_get_drvdata(dev); | ||
385 | 400 | ||
386 | if (!count) | 401 | if (!count) |
387 | return 0; | 402 | return 0; |
@@ -389,7 +404,7 @@ static ssize_t store_ideapad_fan(struct device *dev, | |||
389 | return -EINVAL; | 404 | return -EINVAL; |
390 | if (state < 0 || state > 4 || state == 3) | 405 | if (state < 0 || state > 4 || state == 3) |
391 | return -EINVAL; | 406 | return -EINVAL; |
392 | ret = write_ec_cmd(ideapad_handle, VPCCMD_W_FAN, state); | 407 | ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state); |
393 | if (ret < 0) | 408 | if (ret < 0) |
394 | return -EIO; | 409 | return -EIO; |
395 | return count; | 410 | return count; |
@@ -415,7 +430,8 @@ static umode_t ideapad_is_visible(struct kobject *kobj, | |||
415 | supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg)); | 430 | supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg)); |
416 | else if (attr == &dev_attr_fan_mode.attr) { | 431 | else if (attr == &dev_attr_fan_mode.attr) { |
417 | unsigned long value; | 432 | unsigned long value; |
418 | supported = !read_ec_data(ideapad_handle, VPCCMD_R_FAN, &value); | 433 | supported = !read_ec_data(priv->adev->handle, VPCCMD_R_FAN, |
434 | &value); | ||
419 | } else | 435 | } else |
420 | supported = true; | 436 | supported = true; |
421 | 437 | ||
@@ -445,9 +461,9 @@ const struct ideapad_rfk_data ideapad_rfk_data[] = { | |||
445 | 461 | ||
446 | static int ideapad_rfk_set(void *data, bool blocked) | 462 | static int ideapad_rfk_set(void *data, bool blocked) |
447 | { | 463 | { |
448 | unsigned long opcode = (unsigned long)data; | 464 | struct ideapad_rfk_priv *priv = data; |
449 | 465 | ||
450 | return write_ec_cmd(ideapad_handle, opcode, !blocked); | 466 | return write_ec_cmd(priv->priv->adev->handle, priv->dev, !blocked); |
451 | } | 467 | } |
452 | 468 | ||
453 | static struct rfkill_ops ideapad_rfk_ops = { | 469 | static struct rfkill_ops ideapad_rfk_ops = { |
@@ -459,7 +475,7 @@ static void ideapad_sync_rfk_state(struct ideapad_private *priv) | |||
459 | unsigned long hw_blocked; | 475 | unsigned long hw_blocked; |
460 | int i; | 476 | int i; |
461 | 477 | ||
462 | if (read_ec_data(ideapad_handle, VPCCMD_R_RF, &hw_blocked)) | 478 | if (read_ec_data(priv->adev->handle, VPCCMD_R_RF, &hw_blocked)) |
463 | return; | 479 | return; |
464 | hw_blocked = !hw_blocked; | 480 | hw_blocked = !hw_blocked; |
465 | 481 | ||
@@ -468,27 +484,30 @@ static void ideapad_sync_rfk_state(struct ideapad_private *priv) | |||
468 | rfkill_set_hw_state(priv->rfk[i], hw_blocked); | 484 | rfkill_set_hw_state(priv->rfk[i], hw_blocked); |
469 | } | 485 | } |
470 | 486 | ||
471 | static int ideapad_register_rfkill(struct acpi_device *adevice, int dev) | 487 | static int ideapad_register_rfkill(struct ideapad_private *priv, int dev) |
472 | { | 488 | { |
473 | struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); | ||
474 | int ret; | 489 | int ret; |
475 | unsigned long sw_blocked; | 490 | unsigned long sw_blocked; |
476 | 491 | ||
477 | if (no_bt_rfkill && | 492 | if (no_bt_rfkill && |
478 | (ideapad_rfk_data[dev].type == RFKILL_TYPE_BLUETOOTH)) { | 493 | (ideapad_rfk_data[dev].type == RFKILL_TYPE_BLUETOOTH)) { |
479 | /* Force to enable bluetooth when no_bt_rfkill=1 */ | 494 | /* Force to enable bluetooth when no_bt_rfkill=1 */ |
480 | write_ec_cmd(ideapad_handle, | 495 | write_ec_cmd(priv->adev->handle, |
481 | ideapad_rfk_data[dev].opcode, 1); | 496 | ideapad_rfk_data[dev].opcode, 1); |
482 | return 0; | 497 | return 0; |
483 | } | 498 | } |
484 | 499 | priv->rfk_priv[dev].dev = dev; | |
485 | priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev].name, &adevice->dev, | 500 | priv->rfk_priv[dev].priv = priv; |
486 | ideapad_rfk_data[dev].type, &ideapad_rfk_ops, | 501 | |
487 | (void *)(long)dev); | 502 | priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev].name, |
503 | &priv->platform_device->dev, | ||
504 | ideapad_rfk_data[dev].type, | ||
505 | &ideapad_rfk_ops, | ||
506 | &priv->rfk_priv[dev]); | ||
488 | if (!priv->rfk[dev]) | 507 | if (!priv->rfk[dev]) |
489 | return -ENOMEM; | 508 | return -ENOMEM; |
490 | 509 | ||
491 | if (read_ec_data(ideapad_handle, ideapad_rfk_data[dev].opcode-1, | 510 | if (read_ec_data(priv->adev->handle, ideapad_rfk_data[dev].opcode-1, |
492 | &sw_blocked)) { | 511 | &sw_blocked)) { |
493 | rfkill_init_sw_state(priv->rfk[dev], 0); | 512 | rfkill_init_sw_state(priv->rfk[dev], 0); |
494 | } else { | 513 | } else { |
@@ -504,10 +523,8 @@ static int ideapad_register_rfkill(struct acpi_device *adevice, int dev) | |||
504 | return 0; | 523 | return 0; |
505 | } | 524 | } |
506 | 525 | ||
507 | static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev) | 526 | static void ideapad_unregister_rfkill(struct ideapad_private *priv, int dev) |
508 | { | 527 | { |
509 | struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); | ||
510 | |||
511 | if (!priv->rfk[dev]) | 528 | if (!priv->rfk[dev]) |
512 | return; | 529 | return; |
513 | 530 | ||
@@ -518,37 +535,16 @@ static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev) | |||
518 | /* | 535 | /* |
519 | * Platform device | 536 | * Platform device |
520 | */ | 537 | */ |
521 | static int ideapad_platform_init(struct ideapad_private *priv) | 538 | static int ideapad_sysfs_init(struct ideapad_private *priv) |
522 | { | 539 | { |
523 | int result; | 540 | return sysfs_create_group(&priv->platform_device->dev.kobj, |
524 | |||
525 | priv->platform_device = platform_device_alloc("ideapad", -1); | ||
526 | if (!priv->platform_device) | ||
527 | return -ENOMEM; | ||
528 | platform_set_drvdata(priv->platform_device, priv); | ||
529 | |||
530 | result = platform_device_add(priv->platform_device); | ||
531 | if (result) | ||
532 | goto fail_platform_device; | ||
533 | |||
534 | result = sysfs_create_group(&priv->platform_device->dev.kobj, | ||
535 | &ideapad_attribute_group); | 541 | &ideapad_attribute_group); |
536 | if (result) | ||
537 | goto fail_sysfs; | ||
538 | return 0; | ||
539 | |||
540 | fail_sysfs: | ||
541 | platform_device_del(priv->platform_device); | ||
542 | fail_platform_device: | ||
543 | platform_device_put(priv->platform_device); | ||
544 | return result; | ||
545 | } | 542 | } |
546 | 543 | ||
547 | static void ideapad_platform_exit(struct ideapad_private *priv) | 544 | static void ideapad_sysfs_exit(struct ideapad_private *priv) |
548 | { | 545 | { |
549 | sysfs_remove_group(&priv->platform_device->dev.kobj, | 546 | sysfs_remove_group(&priv->platform_device->dev.kobj, |
550 | &ideapad_attribute_group); | 547 | &ideapad_attribute_group); |
551 | platform_device_unregister(priv->platform_device); | ||
552 | } | 548 | } |
553 | 549 | ||
554 | /* | 550 | /* |
@@ -623,7 +619,7 @@ static void ideapad_input_novokey(struct ideapad_private *priv) | |||
623 | { | 619 | { |
624 | unsigned long long_pressed; | 620 | unsigned long long_pressed; |
625 | 621 | ||
626 | if (read_ec_data(ideapad_handle, VPCCMD_R_NOVO, &long_pressed)) | 622 | if (read_ec_data(priv->adev->handle, VPCCMD_R_NOVO, &long_pressed)) |
627 | return; | 623 | return; |
628 | if (long_pressed) | 624 | if (long_pressed) |
629 | ideapad_input_report(priv, 17); | 625 | ideapad_input_report(priv, 17); |
@@ -635,7 +631,7 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv) | |||
635 | { | 631 | { |
636 | unsigned long bit, value; | 632 | unsigned long bit, value; |
637 | 633 | ||
638 | read_ec_data(ideapad_handle, VPCCMD_R_SPECIAL_BUTTONS, &value); | 634 | read_ec_data(priv->adev->handle, VPCCMD_R_SPECIAL_BUTTONS, &value); |
639 | 635 | ||
640 | for (bit = 0; bit < 16; bit++) { | 636 | for (bit = 0; bit < 16; bit++) { |
641 | if (test_bit(bit, &value)) { | 637 | if (test_bit(bit, &value)) { |
@@ -662,19 +658,28 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv) | |||
662 | */ | 658 | */ |
663 | static int ideapad_backlight_get_brightness(struct backlight_device *blightdev) | 659 | static int ideapad_backlight_get_brightness(struct backlight_device *blightdev) |
664 | { | 660 | { |
661 | struct ideapad_private *priv = bl_get_data(blightdev); | ||
665 | unsigned long now; | 662 | unsigned long now; |
666 | 663 | ||
667 | if (read_ec_data(ideapad_handle, VPCCMD_R_BL, &now)) | 664 | if (!priv) |
665 | return -EINVAL; | ||
666 | |||
667 | if (read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now)) | ||
668 | return -EIO; | 668 | return -EIO; |
669 | return now; | 669 | return now; |
670 | } | 670 | } |
671 | 671 | ||
672 | static int ideapad_backlight_update_status(struct backlight_device *blightdev) | 672 | static int ideapad_backlight_update_status(struct backlight_device *blightdev) |
673 | { | 673 | { |
674 | if (write_ec_cmd(ideapad_handle, VPCCMD_W_BL, | 674 | struct ideapad_private *priv = bl_get_data(blightdev); |
675 | |||
676 | if (!priv) | ||
677 | return -EINVAL; | ||
678 | |||
679 | if (write_ec_cmd(priv->adev->handle, VPCCMD_W_BL, | ||
675 | blightdev->props.brightness)) | 680 | blightdev->props.brightness)) |
676 | return -EIO; | 681 | return -EIO; |
677 | if (write_ec_cmd(ideapad_handle, VPCCMD_W_BL_POWER, | 682 | if (write_ec_cmd(priv->adev->handle, VPCCMD_W_BL_POWER, |
678 | blightdev->props.power == FB_BLANK_POWERDOWN ? 0 : 1)) | 683 | blightdev->props.power == FB_BLANK_POWERDOWN ? 0 : 1)) |
679 | return -EIO; | 684 | return -EIO; |
680 | 685 | ||
@@ -692,11 +697,11 @@ static int ideapad_backlight_init(struct ideapad_private *priv) | |||
692 | struct backlight_properties props; | 697 | struct backlight_properties props; |
693 | unsigned long max, now, power; | 698 | unsigned long max, now, power; |
694 | 699 | ||
695 | if (read_ec_data(ideapad_handle, VPCCMD_R_BL_MAX, &max)) | 700 | if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &max)) |
696 | return -EIO; | 701 | return -EIO; |
697 | if (read_ec_data(ideapad_handle, VPCCMD_R_BL, &now)) | 702 | if (read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now)) |
698 | return -EIO; | 703 | return -EIO; |
699 | if (read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &power)) | 704 | if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power)) |
700 | return -EIO; | 705 | return -EIO; |
701 | 706 | ||
702 | memset(&props, 0, sizeof(struct backlight_properties)); | 707 | memset(&props, 0, sizeof(struct backlight_properties)); |
@@ -734,7 +739,7 @@ static void ideapad_backlight_notify_power(struct ideapad_private *priv) | |||
734 | 739 | ||
735 | if (!blightdev) | 740 | if (!blightdev) |
736 | return; | 741 | return; |
737 | if (read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &power)) | 742 | if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power)) |
738 | return; | 743 | return; |
739 | blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; | 744 | blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; |
740 | } | 745 | } |
@@ -745,7 +750,7 @@ static void ideapad_backlight_notify_brightness(struct ideapad_private *priv) | |||
745 | 750 | ||
746 | /* if we control brightness via acpi video driver */ | 751 | /* if we control brightness via acpi video driver */ |
747 | if (priv->blightdev == NULL) { | 752 | if (priv->blightdev == NULL) { |
748 | read_ec_data(ideapad_handle, VPCCMD_R_BL, &now); | 753 | read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now); |
749 | return; | 754 | return; |
750 | } | 755 | } |
751 | 756 | ||
@@ -755,19 +760,12 @@ static void ideapad_backlight_notify_brightness(struct ideapad_private *priv) | |||
755 | /* | 760 | /* |
756 | * module init/exit | 761 | * module init/exit |
757 | */ | 762 | */ |
758 | static const struct acpi_device_id ideapad_device_ids[] = { | 763 | static void ideapad_sync_touchpad_state(struct ideapad_private *priv) |
759 | { "VPC2004", 0}, | ||
760 | { "", 0}, | ||
761 | }; | ||
762 | MODULE_DEVICE_TABLE(acpi, ideapad_device_ids); | ||
763 | |||
764 | static void ideapad_sync_touchpad_state(struct acpi_device *adevice) | ||
765 | { | 764 | { |
766 | struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); | ||
767 | unsigned long value; | 765 | unsigned long value; |
768 | 766 | ||
769 | /* Without reading from EC touchpad LED doesn't switch state */ | 767 | /* Without reading from EC touchpad LED doesn't switch state */ |
770 | if (!read_ec_data(adevice->handle, VPCCMD_R_TOUCHPAD, &value)) { | 768 | if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) { |
771 | /* Some IdeaPads don't really turn off touchpad - they only | 769 | /* Some IdeaPads don't really turn off touchpad - they only |
772 | * switch the LED state. We (de)activate KBC AUX port to turn | 770 | * switch the LED state. We (de)activate KBC AUX port to turn |
773 | * touchpad off and on. We send KEY_TOUCHPAD_OFF and | 771 | * touchpad off and on. We send KEY_TOUCHPAD_OFF and |
@@ -779,26 +777,77 @@ static void ideapad_sync_touchpad_state(struct acpi_device *adevice) | |||
779 | } | 777 | } |
780 | } | 778 | } |
781 | 779 | ||
782 | static int ideapad_acpi_add(struct acpi_device *adevice) | 780 | static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) |
781 | { | ||
782 | struct ideapad_private *priv = data; | ||
783 | unsigned long vpc1, vpc2, vpc_bit; | ||
784 | |||
785 | if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1)) | ||
786 | return; | ||
787 | if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2)) | ||
788 | return; | ||
789 | |||
790 | vpc1 = (vpc2 << 8) | vpc1; | ||
791 | for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) { | ||
792 | if (test_bit(vpc_bit, &vpc1)) { | ||
793 | switch (vpc_bit) { | ||
794 | case 9: | ||
795 | ideapad_sync_rfk_state(priv); | ||
796 | break; | ||
797 | case 13: | ||
798 | case 11: | ||
799 | case 7: | ||
800 | case 6: | ||
801 | ideapad_input_report(priv, vpc_bit); | ||
802 | break; | ||
803 | case 5: | ||
804 | ideapad_sync_touchpad_state(priv); | ||
805 | break; | ||
806 | case 4: | ||
807 | ideapad_backlight_notify_brightness(priv); | ||
808 | break; | ||
809 | case 3: | ||
810 | ideapad_input_novokey(priv); | ||
811 | break; | ||
812 | case 2: | ||
813 | ideapad_backlight_notify_power(priv); | ||
814 | break; | ||
815 | case 0: | ||
816 | ideapad_check_special_buttons(priv); | ||
817 | break; | ||
818 | default: | ||
819 | pr_info("Unknown event: %lu\n", vpc_bit); | ||
820 | } | ||
821 | } | ||
822 | } | ||
823 | } | ||
824 | |||
825 | static int ideapad_acpi_add(struct platform_device *pdev) | ||
783 | { | 826 | { |
784 | int ret, i; | 827 | int ret, i; |
785 | int cfg; | 828 | int cfg; |
786 | struct ideapad_private *priv; | 829 | struct ideapad_private *priv; |
830 | struct acpi_device *adev; | ||
831 | |||
832 | ret = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev); | ||
833 | if (ret) | ||
834 | return -ENODEV; | ||
787 | 835 | ||
788 | if (read_method_int(adevice->handle, "_CFG", &cfg)) | 836 | if (read_method_int(adev->handle, "_CFG", &cfg)) |
789 | return -ENODEV; | 837 | return -ENODEV; |
790 | 838 | ||
791 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 839 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
792 | if (!priv) | 840 | if (!priv) |
793 | return -ENOMEM; | 841 | return -ENOMEM; |
794 | dev_set_drvdata(&adevice->dev, priv); | 842 | |
795 | ideapad_priv = priv; | 843 | dev_set_drvdata(&pdev->dev, priv); |
796 | ideapad_handle = adevice->handle; | ||
797 | priv->cfg = cfg; | 844 | priv->cfg = cfg; |
845 | priv->adev = adev; | ||
846 | priv->platform_device = pdev; | ||
798 | 847 | ||
799 | ret = ideapad_platform_init(priv); | 848 | ret = ideapad_sysfs_init(priv); |
800 | if (ret) | 849 | if (ret) |
801 | goto platform_failed; | 850 | goto sysfs_failed; |
802 | 851 | ||
803 | ret = ideapad_debugfs_init(priv); | 852 | ret = ideapad_debugfs_init(priv); |
804 | if (ret) | 853 | if (ret) |
@@ -810,117 +859,92 @@ static int ideapad_acpi_add(struct acpi_device *adevice) | |||
810 | 859 | ||
811 | for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) { | 860 | for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) { |
812 | if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg)) | 861 | if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg)) |
813 | ideapad_register_rfkill(adevice, i); | 862 | ideapad_register_rfkill(priv, i); |
814 | else | 863 | else |
815 | priv->rfk[i] = NULL; | 864 | priv->rfk[i] = NULL; |
816 | } | 865 | } |
817 | ideapad_sync_rfk_state(priv); | 866 | ideapad_sync_rfk_state(priv); |
818 | ideapad_sync_touchpad_state(adevice); | 867 | ideapad_sync_touchpad_state(priv); |
819 | 868 | ||
820 | if (!acpi_video_backlight_support()) { | 869 | if (!acpi_video_backlight_support()) { |
821 | ret = ideapad_backlight_init(priv); | 870 | ret = ideapad_backlight_init(priv); |
822 | if (ret && ret != -ENODEV) | 871 | if (ret && ret != -ENODEV) |
823 | goto backlight_failed; | 872 | goto backlight_failed; |
824 | } | 873 | } |
874 | ret = acpi_install_notify_handler(adev->handle, | ||
875 | ACPI_DEVICE_NOTIFY, ideapad_acpi_notify, priv); | ||
876 | if (ret) | ||
877 | goto notification_failed; | ||
825 | 878 | ||
826 | return 0; | 879 | return 0; |
827 | 880 | notification_failed: | |
881 | ideapad_backlight_exit(priv); | ||
828 | backlight_failed: | 882 | backlight_failed: |
829 | for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) | 883 | for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) |
830 | ideapad_unregister_rfkill(adevice, i); | 884 | ideapad_unregister_rfkill(priv, i); |
831 | ideapad_input_exit(priv); | 885 | ideapad_input_exit(priv); |
832 | input_failed: | 886 | input_failed: |
833 | ideapad_debugfs_exit(priv); | 887 | ideapad_debugfs_exit(priv); |
834 | debugfs_failed: | 888 | debugfs_failed: |
835 | ideapad_platform_exit(priv); | 889 | ideapad_sysfs_exit(priv); |
836 | platform_failed: | 890 | sysfs_failed: |
837 | kfree(priv); | 891 | kfree(priv); |
838 | return ret; | 892 | return ret; |
839 | } | 893 | } |
840 | 894 | ||
841 | static int ideapad_acpi_remove(struct acpi_device *adevice) | 895 | static int ideapad_acpi_remove(struct platform_device *pdev) |
842 | { | 896 | { |
843 | struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); | 897 | struct ideapad_private *priv = dev_get_drvdata(&pdev->dev); |
844 | int i; | 898 | int i; |
845 | 899 | ||
900 | acpi_remove_notify_handler(priv->adev->handle, | ||
901 | ACPI_DEVICE_NOTIFY, ideapad_acpi_notify); | ||
846 | ideapad_backlight_exit(priv); | 902 | ideapad_backlight_exit(priv); |
847 | for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) | 903 | for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) |
848 | ideapad_unregister_rfkill(adevice, i); | 904 | ideapad_unregister_rfkill(priv, i); |
849 | ideapad_input_exit(priv); | 905 | ideapad_input_exit(priv); |
850 | ideapad_debugfs_exit(priv); | 906 | ideapad_debugfs_exit(priv); |
851 | ideapad_platform_exit(priv); | 907 | ideapad_sysfs_exit(priv); |
852 | dev_set_drvdata(&adevice->dev, NULL); | 908 | dev_set_drvdata(&pdev->dev, NULL); |
853 | kfree(priv); | 909 | kfree(priv); |
854 | 910 | ||
855 | return 0; | 911 | return 0; |
856 | } | 912 | } |
857 | 913 | ||
858 | static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event) | 914 | #ifdef CONFIG_PM_SLEEP |
915 | static int ideapad_acpi_resume(struct device *device) | ||
859 | { | 916 | { |
860 | struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); | 917 | struct ideapad_private *priv; |
861 | acpi_handle handle = adevice->handle; | ||
862 | unsigned long vpc1, vpc2, vpc_bit; | ||
863 | |||
864 | if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1)) | ||
865 | return; | ||
866 | if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2)) | ||
867 | return; | ||
868 | 918 | ||
869 | vpc1 = (vpc2 << 8) | vpc1; | 919 | if (!device) |
870 | for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) { | 920 | return -EINVAL; |
871 | if (test_bit(vpc_bit, &vpc1)) { | 921 | priv = dev_get_drvdata(device); |
872 | switch (vpc_bit) { | ||
873 | case 9: | ||
874 | ideapad_sync_rfk_state(priv); | ||
875 | break; | ||
876 | case 13: | ||
877 | case 11: | ||
878 | case 7: | ||
879 | case 6: | ||
880 | ideapad_input_report(priv, vpc_bit); | ||
881 | break; | ||
882 | case 5: | ||
883 | ideapad_sync_touchpad_state(adevice); | ||
884 | break; | ||
885 | case 4: | ||
886 | ideapad_backlight_notify_brightness(priv); | ||
887 | break; | ||
888 | case 3: | ||
889 | ideapad_input_novokey(priv); | ||
890 | break; | ||
891 | case 2: | ||
892 | ideapad_backlight_notify_power(priv); | ||
893 | break; | ||
894 | case 0: | ||
895 | ideapad_check_special_buttons(priv); | ||
896 | break; | ||
897 | default: | ||
898 | pr_info("Unknown event: %lu\n", vpc_bit); | ||
899 | } | ||
900 | } | ||
901 | } | ||
902 | } | ||
903 | 922 | ||
904 | static int ideapad_acpi_resume(struct device *device) | 923 | ideapad_sync_rfk_state(priv); |
905 | { | 924 | ideapad_sync_touchpad_state(priv); |
906 | ideapad_sync_rfk_state(ideapad_priv); | ||
907 | ideapad_sync_touchpad_state(to_acpi_device(device)); | ||
908 | return 0; | 925 | return 0; |
909 | } | 926 | } |
910 | 927 | #endif | |
911 | static SIMPLE_DEV_PM_OPS(ideapad_pm, NULL, ideapad_acpi_resume); | 928 | static SIMPLE_DEV_PM_OPS(ideapad_pm, NULL, ideapad_acpi_resume); |
912 | 929 | ||
913 | static struct acpi_driver ideapad_acpi_driver = { | 930 | static const struct acpi_device_id ideapad_device_ids[] = { |
914 | .name = "ideapad_acpi", | 931 | { "VPC2004", 0}, |
915 | .class = "IdeaPad", | 932 | { "", 0}, |
916 | .ids = ideapad_device_ids, | ||
917 | .ops.add = ideapad_acpi_add, | ||
918 | .ops.remove = ideapad_acpi_remove, | ||
919 | .ops.notify = ideapad_acpi_notify, | ||
920 | .drv.pm = &ideapad_pm, | ||
921 | .owner = THIS_MODULE, | ||
922 | }; | 933 | }; |
923 | module_acpi_driver(ideapad_acpi_driver); | 934 | MODULE_DEVICE_TABLE(acpi, ideapad_device_ids); |
935 | |||
936 | static struct platform_driver ideapad_acpi_driver = { | ||
937 | .probe = ideapad_acpi_add, | ||
938 | .remove = ideapad_acpi_remove, | ||
939 | .driver = { | ||
940 | .name = "ideapad_acpi", | ||
941 | .owner = THIS_MODULE, | ||
942 | .pm = &ideapad_pm, | ||
943 | .acpi_match_table = ACPI_PTR(ideapad_device_ids), | ||
944 | }, | ||
945 | }; | ||
946 | |||
947 | module_platform_driver(ideapad_acpi_driver); | ||
924 | 948 | ||
925 | MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); | 949 | MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); |
926 | MODULE_DESCRIPTION("IdeaPad ACPI Extras"); | 950 | MODULE_DESCRIPTION("IdeaPad ACPI Extras"); |
diff --git a/drivers/platform/x86/intel-rst.c b/drivers/platform/x86/intel-rst.c index 41b740cb28bc..a2083a9e5662 100644 --- a/drivers/platform/x86/intel-rst.c +++ b/drivers/platform/x86/intel-rst.c | |||
@@ -29,24 +29,16 @@ static ssize_t irst_show_wakeup_events(struct device *dev, | |||
29 | char *buf) | 29 | char *buf) |
30 | { | 30 | { |
31 | struct acpi_device *acpi; | 31 | struct acpi_device *acpi; |
32 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | 32 | unsigned long long value; |
33 | union acpi_object *result; | ||
34 | acpi_status status; | 33 | acpi_status status; |
35 | 34 | ||
36 | acpi = to_acpi_device(dev); | 35 | acpi = to_acpi_device(dev); |
37 | 36 | ||
38 | status = acpi_evaluate_object(acpi->handle, "GFFS", NULL, &output); | 37 | status = acpi_evaluate_integer(acpi->handle, "GFFS", NULL, &value); |
39 | if (!ACPI_SUCCESS(status)) | 38 | if (!ACPI_SUCCESS(status)) |
40 | return -EINVAL; | 39 | return -EINVAL; |
41 | 40 | ||
42 | result = output.pointer; | 41 | return sprintf(buf, "%lld\n", value); |
43 | |||
44 | if (result->type != ACPI_TYPE_INTEGER) { | ||
45 | kfree(result); | ||
46 | return -EINVAL; | ||
47 | } | ||
48 | |||
49 | return sprintf(buf, "%lld\n", result->integer.value); | ||
50 | } | 42 | } |
51 | 43 | ||
52 | static ssize_t irst_store_wakeup_events(struct device *dev, | 44 | static ssize_t irst_store_wakeup_events(struct device *dev, |
@@ -54,8 +46,6 @@ static ssize_t irst_store_wakeup_events(struct device *dev, | |||
54 | const char *buf, size_t count) | 46 | const char *buf, size_t count) |
55 | { | 47 | { |
56 | struct acpi_device *acpi; | 48 | struct acpi_device *acpi; |
57 | struct acpi_object_list input; | ||
58 | union acpi_object param; | ||
59 | acpi_status status; | 49 | acpi_status status; |
60 | unsigned long value; | 50 | unsigned long value; |
61 | int error; | 51 | int error; |
@@ -67,13 +57,7 @@ static ssize_t irst_store_wakeup_events(struct device *dev, | |||
67 | if (error) | 57 | if (error) |
68 | return error; | 58 | return error; |
69 | 59 | ||
70 | param.type = ACPI_TYPE_INTEGER; | 60 | status = acpi_execute_simple_method(acpi->handle, "SFFS", value); |
71 | param.integer.value = value; | ||
72 | |||
73 | input.count = 1; | ||
74 | input.pointer = ¶m; | ||
75 | |||
76 | status = acpi_evaluate_object(acpi->handle, "SFFS", &input, NULL); | ||
77 | 61 | ||
78 | if (!ACPI_SUCCESS(status)) | 62 | if (!ACPI_SUCCESS(status)) |
79 | return -EINVAL; | 63 | return -EINVAL; |
@@ -91,24 +75,16 @@ static ssize_t irst_show_wakeup_time(struct device *dev, | |||
91 | struct device_attribute *attr, char *buf) | 75 | struct device_attribute *attr, char *buf) |
92 | { | 76 | { |
93 | struct acpi_device *acpi; | 77 | struct acpi_device *acpi; |
94 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | 78 | unsigned long long value; |
95 | union acpi_object *result; | ||
96 | acpi_status status; | 79 | acpi_status status; |
97 | 80 | ||
98 | acpi = to_acpi_device(dev); | 81 | acpi = to_acpi_device(dev); |
99 | 82 | ||
100 | status = acpi_evaluate_object(acpi->handle, "GFTV", NULL, &output); | 83 | status = acpi_evaluate_integer(acpi->handle, "GFTV", NULL, &value); |
101 | if (!ACPI_SUCCESS(status)) | 84 | if (!ACPI_SUCCESS(status)) |
102 | return -EINVAL; | 85 | return -EINVAL; |
103 | 86 | ||
104 | result = output.pointer; | 87 | return sprintf(buf, "%lld\n", value); |
105 | |||
106 | if (result->type != ACPI_TYPE_INTEGER) { | ||
107 | kfree(result); | ||
108 | return -EINVAL; | ||
109 | } | ||
110 | |||
111 | return sprintf(buf, "%lld\n", result->integer.value); | ||
112 | } | 88 | } |
113 | 89 | ||
114 | static ssize_t irst_store_wakeup_time(struct device *dev, | 90 | static ssize_t irst_store_wakeup_time(struct device *dev, |
@@ -116,8 +92,6 @@ static ssize_t irst_store_wakeup_time(struct device *dev, | |||
116 | const char *buf, size_t count) | 92 | const char *buf, size_t count) |
117 | { | 93 | { |
118 | struct acpi_device *acpi; | 94 | struct acpi_device *acpi; |
119 | struct acpi_object_list input; | ||
120 | union acpi_object param; | ||
121 | acpi_status status; | 95 | acpi_status status; |
122 | unsigned long value; | 96 | unsigned long value; |
123 | int error; | 97 | int error; |
@@ -129,13 +103,7 @@ static ssize_t irst_store_wakeup_time(struct device *dev, | |||
129 | if (error) | 103 | if (error) |
130 | return error; | 104 | return error; |
131 | 105 | ||
132 | param.type = ACPI_TYPE_INTEGER; | 106 | status = acpi_execute_simple_method(acpi->handle, "SFTV", value); |
133 | param.integer.value = value; | ||
134 | |||
135 | input.count = 1; | ||
136 | input.pointer = ¶m; | ||
137 | |||
138 | status = acpi_evaluate_object(acpi->handle, "SFTV", &input, NULL); | ||
139 | 107 | ||
140 | if (!ACPI_SUCCESS(status)) | 108 | if (!ACPI_SUCCESS(status)) |
141 | return -EINVAL; | 109 | return -EINVAL; |
diff --git a/drivers/platform/x86/intel-smartconnect.c b/drivers/platform/x86/intel-smartconnect.c index 52259dcabecb..1838400dc036 100644 --- a/drivers/platform/x86/intel-smartconnect.c +++ b/drivers/platform/x86/intel-smartconnect.c | |||
@@ -25,37 +25,18 @@ MODULE_LICENSE("GPL"); | |||
25 | 25 | ||
26 | static int smartconnect_acpi_init(struct acpi_device *acpi) | 26 | static int smartconnect_acpi_init(struct acpi_device *acpi) |
27 | { | 27 | { |
28 | struct acpi_object_list input; | 28 | unsigned long long value; |
29 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
30 | union acpi_object *result; | ||
31 | union acpi_object param; | ||
32 | acpi_status status; | 29 | acpi_status status; |
33 | 30 | ||
34 | status = acpi_evaluate_object(acpi->handle, "GAOS", NULL, &output); | 31 | status = acpi_evaluate_integer(acpi->handle, "GAOS", NULL, &value); |
35 | if (!ACPI_SUCCESS(status)) | 32 | if (!ACPI_SUCCESS(status)) |
36 | return -EINVAL; | 33 | return -EINVAL; |
37 | 34 | ||
38 | result = output.pointer; | 35 | if (value & 0x1) { |
39 | |||
40 | if (result->type != ACPI_TYPE_INTEGER) { | ||
41 | kfree(result); | ||
42 | return -EINVAL; | ||
43 | } | ||
44 | |||
45 | if (result->integer.value & 0x1) { | ||
46 | param.type = ACPI_TYPE_INTEGER; | ||
47 | param.integer.value = 0; | ||
48 | |||
49 | input.count = 1; | ||
50 | input.pointer = ¶m; | ||
51 | |||
52 | dev_info(&acpi->dev, "Disabling Intel Smart Connect\n"); | 36 | dev_info(&acpi->dev, "Disabling Intel Smart Connect\n"); |
53 | status = acpi_evaluate_object(acpi->handle, "SAOS", &input, | 37 | status = acpi_execute_simple_method(acpi->handle, "SAOS", 0); |
54 | NULL); | ||
55 | } | 38 | } |
56 | 39 | ||
57 | kfree(result); | ||
58 | |||
59 | return 0; | 40 | return 0; |
60 | } | 41 | } |
61 | 42 | ||
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c index d6cfc1558c2f..11244f8703c4 100644 --- a/drivers/platform/x86/intel_menlow.c +++ b/drivers/platform/x86/intel_menlow.c | |||
@@ -156,19 +156,15 @@ static struct thermal_cooling_device_ops memory_cooling_ops = { | |||
156 | static int intel_menlow_memory_add(struct acpi_device *device) | 156 | static int intel_menlow_memory_add(struct acpi_device *device) |
157 | { | 157 | { |
158 | int result = -ENODEV; | 158 | int result = -ENODEV; |
159 | acpi_status status = AE_OK; | ||
160 | acpi_handle dummy; | ||
161 | struct thermal_cooling_device *cdev; | 159 | struct thermal_cooling_device *cdev; |
162 | 160 | ||
163 | if (!device) | 161 | if (!device) |
164 | return -EINVAL; | 162 | return -EINVAL; |
165 | 163 | ||
166 | status = acpi_get_handle(device->handle, MEMORY_GET_BANDWIDTH, &dummy); | 164 | if (!acpi_has_method(device->handle, MEMORY_GET_BANDWIDTH)) |
167 | if (ACPI_FAILURE(status)) | ||
168 | goto end; | 165 | goto end; |
169 | 166 | ||
170 | status = acpi_get_handle(device->handle, MEMORY_SET_BANDWIDTH, &dummy); | 167 | if (!acpi_has_method(device->handle, MEMORY_SET_BANDWIDTH)) |
171 | if (ACPI_FAILURE(status)) | ||
172 | goto end; | 168 | goto end; |
173 | 169 | ||
174 | cdev = thermal_cooling_device_register("Memory controller", device, | 170 | cdev = thermal_cooling_device_register("Memory controller", device, |
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index 13ec195f0ca6..47caab0ea7a1 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c | |||
@@ -1508,7 +1508,6 @@ static void sony_nc_function_resume(void) | |||
1508 | static int sony_nc_resume(struct device *dev) | 1508 | static int sony_nc_resume(struct device *dev) |
1509 | { | 1509 | { |
1510 | struct sony_nc_value *item; | 1510 | struct sony_nc_value *item; |
1511 | acpi_handle handle; | ||
1512 | 1511 | ||
1513 | for (item = sony_nc_values; item->name; item++) { | 1512 | for (item = sony_nc_values; item->name; item++) { |
1514 | int ret; | 1513 | int ret; |
@@ -1523,15 +1522,13 @@ static int sony_nc_resume(struct device *dev) | |||
1523 | } | 1522 | } |
1524 | } | 1523 | } |
1525 | 1524 | ||
1526 | if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON", | 1525 | if (acpi_has_method(sony_nc_acpi_handle, "ECON")) { |
1527 | &handle))) { | ||
1528 | int arg = 1; | 1526 | int arg = 1; |
1529 | if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL)) | 1527 | if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL)) |
1530 | dprintk("ECON Method failed\n"); | 1528 | dprintk("ECON Method failed\n"); |
1531 | } | 1529 | } |
1532 | 1530 | ||
1533 | if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00", | 1531 | if (acpi_has_method(sony_nc_acpi_handle, "SN00")) |
1534 | &handle))) | ||
1535 | sony_nc_function_resume(); | 1532 | sony_nc_function_resume(); |
1536 | 1533 | ||
1537 | return 0; | 1534 | return 0; |
@@ -2682,7 +2679,6 @@ static void sony_nc_backlight_ng_read_limits(int handle, | |||
2682 | 2679 | ||
2683 | static void sony_nc_backlight_setup(void) | 2680 | static void sony_nc_backlight_setup(void) |
2684 | { | 2681 | { |
2685 | acpi_handle unused; | ||
2686 | int max_brightness = 0; | 2682 | int max_brightness = 0; |
2687 | const struct backlight_ops *ops = NULL; | 2683 | const struct backlight_ops *ops = NULL; |
2688 | struct backlight_properties props; | 2684 | struct backlight_properties props; |
@@ -2717,8 +2713,7 @@ static void sony_nc_backlight_setup(void) | |||
2717 | sony_nc_backlight_ng_read_limits(0x14c, &sony_bl_props); | 2713 | sony_nc_backlight_ng_read_limits(0x14c, &sony_bl_props); |
2718 | max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset; | 2714 | max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset; |
2719 | 2715 | ||
2720 | } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT", | 2716 | } else if (acpi_has_method(sony_nc_acpi_handle, "GBRT")) { |
2721 | &unused))) { | ||
2722 | ops = &sony_backlight_ops; | 2717 | ops = &sony_backlight_ops; |
2723 | max_brightness = SONY_MAX_BRIGHTNESS - 1; | 2718 | max_brightness = SONY_MAX_BRIGHTNESS - 1; |
2724 | 2719 | ||
@@ -2750,7 +2745,6 @@ static int sony_nc_add(struct acpi_device *device) | |||
2750 | { | 2745 | { |
2751 | acpi_status status; | 2746 | acpi_status status; |
2752 | int result = 0; | 2747 | int result = 0; |
2753 | acpi_handle handle; | ||
2754 | struct sony_nc_value *item; | 2748 | struct sony_nc_value *item; |
2755 | 2749 | ||
2756 | pr_info("%s v%s\n", SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION); | 2750 | pr_info("%s v%s\n", SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION); |
@@ -2790,15 +2784,13 @@ static int sony_nc_add(struct acpi_device *device) | |||
2790 | goto outplatform; | 2784 | goto outplatform; |
2791 | } | 2785 | } |
2792 | 2786 | ||
2793 | if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON", | 2787 | if (acpi_has_method(sony_nc_acpi_handle, "ECON")) { |
2794 | &handle))) { | ||
2795 | int arg = 1; | 2788 | int arg = 1; |
2796 | if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL)) | 2789 | if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL)) |
2797 | dprintk("ECON Method failed\n"); | 2790 | dprintk("ECON Method failed\n"); |
2798 | } | 2791 | } |
2799 | 2792 | ||
2800 | if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00", | 2793 | if (acpi_has_method(sony_nc_acpi_handle, "SN00")) { |
2801 | &handle))) { | ||
2802 | dprintk("Doing SNC setup\n"); | 2794 | dprintk("Doing SNC setup\n"); |
2803 | /* retrieve the available handles */ | 2795 | /* retrieve the available handles */ |
2804 | result = sony_nc_handles_setup(sony_pf_device); | 2796 | result = sony_nc_handles_setup(sony_pf_device); |
@@ -2821,9 +2813,8 @@ static int sony_nc_add(struct acpi_device *device) | |||
2821 | 2813 | ||
2822 | /* find the available acpiget as described in the DSDT */ | 2814 | /* find the available acpiget as described in the DSDT */ |
2823 | for (; item->acpiget && *item->acpiget; ++item->acpiget) { | 2815 | for (; item->acpiget && *item->acpiget; ++item->acpiget) { |
2824 | if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, | 2816 | if (acpi_has_method(sony_nc_acpi_handle, |
2825 | *item->acpiget, | 2817 | *item->acpiget)) { |
2826 | &handle))) { | ||
2827 | dprintk("Found %s getter: %s\n", | 2818 | dprintk("Found %s getter: %s\n", |
2828 | item->name, *item->acpiget); | 2819 | item->name, *item->acpiget); |
2829 | item->devattr.attr.mode |= S_IRUGO; | 2820 | item->devattr.attr.mode |= S_IRUGO; |
@@ -2833,9 +2824,8 @@ static int sony_nc_add(struct acpi_device *device) | |||
2833 | 2824 | ||
2834 | /* find the available acpiset as described in the DSDT */ | 2825 | /* find the available acpiset as described in the DSDT */ |
2835 | for (; item->acpiset && *item->acpiset; ++item->acpiset) { | 2826 | for (; item->acpiset && *item->acpiset; ++item->acpiset) { |
2836 | if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, | 2827 | if (acpi_has_method(sony_nc_acpi_handle, |
2837 | *item->acpiset, | 2828 | *item->acpiset)) { |
2838 | &handle))) { | ||
2839 | dprintk("Found %s setter: %s\n", | 2829 | dprintk("Found %s setter: %s\n", |
2840 | item->name, *item->acpiset); | 2830 | item->name, *item->acpiset); |
2841 | item->devattr.attr.mode |= S_IWUSR; | 2831 | item->devattr.attr.mode |= S_IWUSR; |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 0b7efb269cf1..05e046aa5e31 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
@@ -701,6 +701,14 @@ static void __init drv_acpi_handle_init(const char *name, | |||
701 | static acpi_status __init tpacpi_acpi_handle_locate_callback(acpi_handle handle, | 701 | static acpi_status __init tpacpi_acpi_handle_locate_callback(acpi_handle handle, |
702 | u32 level, void *context, void **return_value) | 702 | u32 level, void *context, void **return_value) |
703 | { | 703 | { |
704 | struct acpi_device *dev; | ||
705 | if (!strcmp(context, "video")) { | ||
706 | if (acpi_bus_get_device(handle, &dev)) | ||
707 | return AE_OK; | ||
708 | if (strcmp(ACPI_VIDEO_HID, acpi_device_hid(dev))) | ||
709 | return AE_OK; | ||
710 | } | ||
711 | |||
704 | *(acpi_handle *)return_value = handle; | 712 | *(acpi_handle *)return_value = handle; |
705 | 713 | ||
706 | return AE_CTRL_TERMINATE; | 714 | return AE_CTRL_TERMINATE; |
@@ -713,10 +721,10 @@ static void __init tpacpi_acpi_handle_locate(const char *name, | |||
713 | acpi_status status; | 721 | acpi_status status; |
714 | acpi_handle device_found; | 722 | acpi_handle device_found; |
715 | 723 | ||
716 | BUG_ON(!name || !hid || !handle); | 724 | BUG_ON(!name || !handle); |
717 | vdbg_printk(TPACPI_DBG_INIT, | 725 | vdbg_printk(TPACPI_DBG_INIT, |
718 | "trying to locate ACPI handle for %s, using HID %s\n", | 726 | "trying to locate ACPI handle for %s, using HID %s\n", |
719 | name, hid); | 727 | name, hid ? hid : "NULL"); |
720 | 728 | ||
721 | memset(&device_found, 0, sizeof(device_found)); | 729 | memset(&device_found, 0, sizeof(device_found)); |
722 | status = acpi_get_devices(hid, tpacpi_acpi_handle_locate_callback, | 730 | status = acpi_get_devices(hid, tpacpi_acpi_handle_locate_callback, |
@@ -6091,19 +6099,28 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle) | |||
6091 | { | 6099 | { |
6092 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 6100 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
6093 | union acpi_object *obj; | 6101 | union acpi_object *obj; |
6102 | struct acpi_device *device, *child; | ||
6094 | int rc; | 6103 | int rc; |
6095 | 6104 | ||
6096 | if (ACPI_SUCCESS(acpi_evaluate_object(handle, "_BCL", NULL, &buffer))) { | 6105 | if (acpi_bus_get_device(handle, &device)) |
6106 | return 0; | ||
6107 | |||
6108 | rc = 0; | ||
6109 | list_for_each_entry(child, &device->children, node) { | ||
6110 | acpi_status status = acpi_evaluate_object(child->handle, "_BCL", | ||
6111 | NULL, &buffer); | ||
6112 | if (ACPI_FAILURE(status)) | ||
6113 | continue; | ||
6114 | |||
6097 | obj = (union acpi_object *)buffer.pointer; | 6115 | obj = (union acpi_object *)buffer.pointer; |
6098 | if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) { | 6116 | if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) { |
6099 | pr_err("Unknown _BCL data, please report this to %s\n", | 6117 | pr_err("Unknown _BCL data, please report this to %s\n", |
6100 | TPACPI_MAIL); | 6118 | TPACPI_MAIL); |
6101 | rc = 0; | 6119 | rc = 0; |
6102 | } else { | 6120 | } else { |
6103 | rc = obj->package.count; | 6121 | rc = obj->package.count; |
6104 | } | 6122 | } |
6105 | } else { | 6123 | break; |
6106 | return 0; | ||
6107 | } | 6124 | } |
6108 | 6125 | ||
6109 | kfree(buffer.pointer); | 6126 | kfree(buffer.pointer); |
@@ -6119,7 +6136,7 @@ static unsigned int __init tpacpi_check_std_acpi_brightness_support(void) | |||
6119 | acpi_handle video_device; | 6136 | acpi_handle video_device; |
6120 | int bcl_levels = 0; | 6137 | int bcl_levels = 0; |
6121 | 6138 | ||
6122 | tpacpi_acpi_handle_locate("video", ACPI_VIDEO_HID, &video_device); | 6139 | tpacpi_acpi_handle_locate("video", NULL, &video_device); |
6123 | if (video_device) | 6140 | if (video_device) |
6124 | bcl_levels = tpacpi_query_bcl_levels(video_device); | 6141 | bcl_levels = tpacpi_query_bcl_levels(video_device); |
6125 | 6142 | ||
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c index 4ab618c63b45..67897c8740ba 100644 --- a/drivers/platform/x86/topstar-laptop.c +++ b/drivers/platform/x86/topstar-laptop.c | |||
@@ -80,13 +80,9 @@ static void acpi_topstar_notify(struct acpi_device *device, u32 event) | |||
80 | static int acpi_topstar_fncx_switch(struct acpi_device *device, bool state) | 80 | static int acpi_topstar_fncx_switch(struct acpi_device *device, bool state) |
81 | { | 81 | { |
82 | acpi_status status; | 82 | acpi_status status; |
83 | union acpi_object fncx_params[1] = { | ||
84 | { .type = ACPI_TYPE_INTEGER } | ||
85 | }; | ||
86 | struct acpi_object_list fncx_arg_list = { 1, &fncx_params[0] }; | ||
87 | 83 | ||
88 | fncx_params[0].integer.value = state ? 0x86 : 0x87; | 84 | status = acpi_execute_simple_method(device->handle, "FNCX", |
89 | status = acpi_evaluate_object(device->handle, "FNCX", &fncx_arg_list, NULL); | 85 | state ? 0x86 : 0x87); |
90 | if (ACPI_FAILURE(status)) { | 86 | if (ACPI_FAILURE(status)) { |
91 | pr_err("Unable to switch FNCX notifications\n"); | 87 | pr_err("Unable to switch FNCX notifications\n"); |
92 | return -ENODEV; | 88 | return -ENODEV; |
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index eb3467ea6d86..0cfadb65f7c6 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c | |||
@@ -191,16 +191,9 @@ static __inline__ void _set_bit(u32 * word, u32 mask, int value) | |||
191 | 191 | ||
192 | static int write_acpi_int(const char *methodName, int val) | 192 | static int write_acpi_int(const char *methodName, int val) |
193 | { | 193 | { |
194 | struct acpi_object_list params; | ||
195 | union acpi_object in_objs[1]; | ||
196 | acpi_status status; | 194 | acpi_status status; |
197 | 195 | ||
198 | params.count = ARRAY_SIZE(in_objs); | 196 | status = acpi_execute_simple_method(NULL, (char *)methodName, val); |
199 | params.pointer = in_objs; | ||
200 | in_objs[0].type = ACPI_TYPE_INTEGER; | ||
201 | in_objs[0].integer.value = val; | ||
202 | |||
203 | status = acpi_evaluate_object(NULL, (char *)methodName, ¶ms, NULL); | ||
204 | return (status == AE_OK) ? 0 : -EIO; | 197 | return (status == AE_OK) ? 0 : -EIO; |
205 | } | 198 | } |
206 | 199 | ||
@@ -947,21 +940,17 @@ static void toshiba_acpi_hotkey_work(struct work_struct *work) | |||
947 | */ | 940 | */ |
948 | static int toshiba_acpi_query_hotkey(struct toshiba_acpi_dev *dev) | 941 | static int toshiba_acpi_query_hotkey(struct toshiba_acpi_dev *dev) |
949 | { | 942 | { |
950 | struct acpi_buffer buf; | 943 | unsigned long long value; |
951 | union acpi_object out_obj; | ||
952 | acpi_status status; | 944 | acpi_status status; |
953 | 945 | ||
954 | buf.pointer = &out_obj; | 946 | status = acpi_evaluate_integer(dev->acpi_dev->handle, "INFO", |
955 | buf.length = sizeof(out_obj); | 947 | NULL, &value); |
956 | 948 | if (ACPI_FAILURE(status)) { | |
957 | status = acpi_evaluate_object(dev->acpi_dev->handle, "INFO", | ||
958 | NULL, &buf); | ||
959 | if (ACPI_FAILURE(status) || out_obj.type != ACPI_TYPE_INTEGER) { | ||
960 | pr_err("ACPI INFO method execution failed\n"); | 949 | pr_err("ACPI INFO method execution failed\n"); |
961 | return -EIO; | 950 | return -EIO; |
962 | } | 951 | } |
963 | 952 | ||
964 | return out_obj.integer.value; | 953 | return value; |
965 | } | 954 | } |
966 | 955 | ||
967 | static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev, | 956 | static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev, |
@@ -981,7 +970,7 @@ static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev, | |||
981 | static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev) | 970 | static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev) |
982 | { | 971 | { |
983 | acpi_status status; | 972 | acpi_status status; |
984 | acpi_handle ec_handle, handle; | 973 | acpi_handle ec_handle; |
985 | int error; | 974 | int error; |
986 | u32 hci_result; | 975 | u32 hci_result; |
987 | 976 | ||
@@ -1008,10 +997,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev) | |||
1008 | */ | 997 | */ |
1009 | status = AE_ERROR; | 998 | status = AE_ERROR; |
1010 | ec_handle = ec_get_handle(); | 999 | ec_handle = ec_get_handle(); |
1011 | if (ec_handle) | 1000 | if (ec_handle && acpi_has_method(ec_handle, "NTFY")) { |
1012 | status = acpi_get_handle(ec_handle, "NTFY", &handle); | ||
1013 | |||
1014 | if (ACPI_SUCCESS(status)) { | ||
1015 | INIT_WORK(&dev->hotkey_work, toshiba_acpi_hotkey_work); | 1001 | INIT_WORK(&dev->hotkey_work, toshiba_acpi_hotkey_work); |
1016 | 1002 | ||
1017 | error = i8042_install_filter(toshiba_acpi_i8042_filter); | 1003 | error = i8042_install_filter(toshiba_acpi_i8042_filter); |
@@ -1027,10 +1013,9 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev) | |||
1027 | * Determine hotkey query interface. Prefer using the INFO | 1013 | * Determine hotkey query interface. Prefer using the INFO |
1028 | * method when it is available. | 1014 | * method when it is available. |
1029 | */ | 1015 | */ |
1030 | status = acpi_get_handle(dev->acpi_dev->handle, "INFO", &handle); | 1016 | if (acpi_has_method(dev->acpi_dev->handle, "INFO")) |
1031 | if (ACPI_SUCCESS(status)) { | ||
1032 | dev->info_supported = 1; | 1017 | dev->info_supported = 1; |
1033 | } else { | 1018 | else { |
1034 | hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result); | 1019 | hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result); |
1035 | if (hci_result == HCI_SUCCESS) | 1020 | if (hci_result == HCI_SUCCESS) |
1036 | dev->system_event_supported = 1; | 1021 | dev->system_event_supported = 1; |
@@ -1155,15 +1140,10 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev) | |||
1155 | 1140 | ||
1156 | static const char *find_hci_method(acpi_handle handle) | 1141 | static const char *find_hci_method(acpi_handle handle) |
1157 | { | 1142 | { |
1158 | acpi_status status; | 1143 | if (acpi_has_method(handle, "GHCI")) |
1159 | acpi_handle hci_handle; | ||
1160 | |||
1161 | status = acpi_get_handle(handle, "GHCI", &hci_handle); | ||
1162 | if (ACPI_SUCCESS(status)) | ||
1163 | return "GHCI"; | 1144 | return "GHCI"; |
1164 | 1145 | ||
1165 | status = acpi_get_handle(handle, "SPFC", &hci_handle); | 1146 | if (acpi_has_method(handle, "SPFC")) |
1166 | if (ACPI_SUCCESS(status)) | ||
1167 | return "SPFC"; | 1147 | return "SPFC"; |
1168 | 1148 | ||
1169 | return NULL; | 1149 | return NULL; |
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 601ea9512242..62e8c221d01e 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c | |||
@@ -252,8 +252,6 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable) | |||
252 | { | 252 | { |
253 | struct guid_block *block = NULL; | 253 | struct guid_block *block = NULL; |
254 | char method[5]; | 254 | char method[5]; |
255 | struct acpi_object_list input; | ||
256 | union acpi_object params[1]; | ||
257 | acpi_status status; | 255 | acpi_status status; |
258 | acpi_handle handle; | 256 | acpi_handle handle; |
259 | 257 | ||
@@ -263,13 +261,9 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable) | |||
263 | if (!block) | 261 | if (!block) |
264 | return AE_NOT_EXIST; | 262 | return AE_NOT_EXIST; |
265 | 263 | ||
266 | input.count = 1; | ||
267 | input.pointer = params; | ||
268 | params[0].type = ACPI_TYPE_INTEGER; | ||
269 | params[0].integer.value = enable; | ||
270 | 264 | ||
271 | snprintf(method, 5, "WE%02X", block->notify_id); | 265 | snprintf(method, 5, "WE%02X", block->notify_id); |
272 | status = acpi_evaluate_object(handle, method, &input, NULL); | 266 | status = acpi_execute_simple_method(handle, method, enable); |
273 | 267 | ||
274 | if (status != AE_OK && status != AE_NOT_FOUND) | 268 | if (status != AE_OK && status != AE_NOT_FOUND) |
275 | return status; | 269 | return status; |
@@ -353,10 +347,10 @@ struct acpi_buffer *out) | |||
353 | { | 347 | { |
354 | struct guid_block *block = NULL; | 348 | struct guid_block *block = NULL; |
355 | struct wmi_block *wblock = NULL; | 349 | struct wmi_block *wblock = NULL; |
356 | acpi_handle handle, wc_handle; | 350 | acpi_handle handle; |
357 | acpi_status status, wc_status = AE_ERROR; | 351 | acpi_status status, wc_status = AE_ERROR; |
358 | struct acpi_object_list input, wc_input; | 352 | struct acpi_object_list input; |
359 | union acpi_object wc_params[1], wq_params[1]; | 353 | union acpi_object wq_params[1]; |
360 | char method[5]; | 354 | char method[5]; |
361 | char wc_method[5] = "WC"; | 355 | char wc_method[5] = "WC"; |
362 | 356 | ||
@@ -386,11 +380,6 @@ struct acpi_buffer *out) | |||
386 | * enable collection. | 380 | * enable collection. |
387 | */ | 381 | */ |
388 | if (block->flags & ACPI_WMI_EXPENSIVE) { | 382 | if (block->flags & ACPI_WMI_EXPENSIVE) { |
389 | wc_input.count = 1; | ||
390 | wc_input.pointer = wc_params; | ||
391 | wc_params[0].type = ACPI_TYPE_INTEGER; | ||
392 | wc_params[0].integer.value = 1; | ||
393 | |||
394 | strncat(wc_method, block->object_id, 2); | 383 | strncat(wc_method, block->object_id, 2); |
395 | 384 | ||
396 | /* | 385 | /* |
@@ -398,10 +387,9 @@ struct acpi_buffer *out) | |||
398 | * expensive, but have no corresponding WCxx method. So we | 387 | * expensive, but have no corresponding WCxx method. So we |
399 | * should not fail if this happens. | 388 | * should not fail if this happens. |
400 | */ | 389 | */ |
401 | wc_status = acpi_get_handle(handle, wc_method, &wc_handle); | 390 | if (acpi_has_method(handle, wc_method)) |
402 | if (ACPI_SUCCESS(wc_status)) | 391 | wc_status = acpi_execute_simple_method(handle, |
403 | wc_status = acpi_evaluate_object(handle, wc_method, | 392 | wc_method, 1); |
404 | &wc_input, NULL); | ||
405 | } | 393 | } |
406 | 394 | ||
407 | strcpy(method, "WQ"); | 395 | strcpy(method, "WQ"); |
@@ -414,9 +402,7 @@ struct acpi_buffer *out) | |||
414 | * the WQxx method failed - we should disable collection anyway. | 402 | * the WQxx method failed - we should disable collection anyway. |
415 | */ | 403 | */ |
416 | if ((block->flags & ACPI_WMI_EXPENSIVE) && ACPI_SUCCESS(wc_status)) { | 404 | if ((block->flags & ACPI_WMI_EXPENSIVE) && ACPI_SUCCESS(wc_status)) { |
417 | wc_params[0].integer.value = 0; | 405 | status = acpi_execute_simple_method(handle, wc_method, 0); |
418 | status = acpi_evaluate_object(handle, | ||
419 | wc_method, &wc_input, NULL); | ||
420 | } | 406 | } |
421 | 407 | ||
422 | return status; | 408 | return status; |
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 34049b0b4c73..747826d99059 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c | |||
@@ -239,8 +239,6 @@ static char *__init pnpacpi_get_id(struct acpi_device *device) | |||
239 | 239 | ||
240 | static int __init pnpacpi_add_device(struct acpi_device *device) | 240 | static int __init pnpacpi_add_device(struct acpi_device *device) |
241 | { | 241 | { |
242 | acpi_handle temp = NULL; | ||
243 | acpi_status status; | ||
244 | struct pnp_dev *dev; | 242 | struct pnp_dev *dev; |
245 | char *pnpid; | 243 | char *pnpid; |
246 | struct acpi_hardware_id *id; | 244 | struct acpi_hardware_id *id; |
@@ -253,8 +251,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device) | |||
253 | * If a PnPacpi device is not present , the device | 251 | * If a PnPacpi device is not present , the device |
254 | * driver should not be loaded. | 252 | * driver should not be loaded. |
255 | */ | 253 | */ |
256 | status = acpi_get_handle(device->handle, "_CRS", &temp); | 254 | if (!acpi_has_method(device->handle, "_CRS")) |
257 | if (ACPI_FAILURE(status)) | ||
258 | return 0; | 255 | return 0; |
259 | 256 | ||
260 | pnpid = pnpacpi_get_id(device); | 257 | pnpid = pnpacpi_get_id(device); |
@@ -271,16 +268,14 @@ static int __init pnpacpi_add_device(struct acpi_device *device) | |||
271 | dev->data = device; | 268 | dev->data = device; |
272 | /* .enabled means the device can decode the resources */ | 269 | /* .enabled means the device can decode the resources */ |
273 | dev->active = device->status.enabled; | 270 | dev->active = device->status.enabled; |
274 | status = acpi_get_handle(device->handle, "_SRS", &temp); | 271 | if (acpi_has_method(device->handle, "_SRS")) |
275 | if (ACPI_SUCCESS(status)) | ||
276 | dev->capabilities |= PNP_CONFIGURABLE; | 272 | dev->capabilities |= PNP_CONFIGURABLE; |
277 | dev->capabilities |= PNP_READ; | 273 | dev->capabilities |= PNP_READ; |
278 | if (device->flags.dynamic_status && (dev->capabilities & PNP_CONFIGURABLE)) | 274 | if (device->flags.dynamic_status && (dev->capabilities & PNP_CONFIGURABLE)) |
279 | dev->capabilities |= PNP_WRITE; | 275 | dev->capabilities |= PNP_WRITE; |
280 | if (device->flags.removable) | 276 | if (device->flags.removable) |
281 | dev->capabilities |= PNP_REMOVABLE; | 277 | dev->capabilities |= PNP_REMOVABLE; |
282 | status = acpi_get_handle(device->handle, "_DIS", &temp); | 278 | if (acpi_has_method(device->handle, "_DIS")) |
283 | if (ACPI_SUCCESS(status)) | ||
284 | dev->capabilities |= PNP_DISABLE; | 279 | dev->capabilities |= PNP_DISABLE; |
285 | 280 | ||
286 | if (strlen(acpi_device_name(device))) | 281 | if (strlen(acpi_device_name(device))) |
diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig new file mode 100644 index 000000000000..a7c81b53d88a --- /dev/null +++ b/drivers/powercap/Kconfig | |||
@@ -0,0 +1,32 @@ | |||
1 | # | ||
2 | # Generic power capping sysfs interface configuration | ||
3 | # | ||
4 | |||
5 | menuconfig POWERCAP | ||
6 | bool "Generic powercap sysfs driver" | ||
7 | help | ||
8 | The power capping sysfs interface allows kernel subsystems to expose power | ||
9 | capping settings to user space in a consistent way. Usually, it consists | ||
10 | of multiple control types that determine which settings may be exposed and | ||
11 | power zones representing parts of the system that can be subject to power | ||
12 | capping. | ||
13 | |||
14 | If you want this code to be compiled in, say Y here. | ||
15 | |||
16 | if POWERCAP | ||
17 | # Client driver configurations go here. | ||
18 | config INTEL_RAPL | ||
19 | tristate "Intel RAPL Support" | ||
20 | depends on X86 | ||
21 | default n | ||
22 | ---help--- | ||
23 | This enables support for the Intel Running Average Power Limit (RAPL) | ||
24 | technology which allows power limits to be enforced and monitored on | ||
25 | modern Intel processors (Sandy Bridge and later). | ||
26 | |||
27 | In RAPL, the platform level settings are divided into domains for | ||
28 | fine grained control. These domains include processor package, DRAM | ||
29 | controller, CPU core (Power Plance 0), graphics uncore (Power Plane | ||
30 | 1), etc. | ||
31 | |||
32 | endif | ||
diff --git a/drivers/powercap/Makefile b/drivers/powercap/Makefile new file mode 100644 index 000000000000..0a21ef31372b --- /dev/null +++ b/drivers/powercap/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-$(CONFIG_POWERCAP) += powercap_sys.o | ||
2 | obj-$(CONFIG_INTEL_RAPL) += intel_rapl.o | ||
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c new file mode 100644 index 000000000000..2a786c504460 --- /dev/null +++ b/drivers/powercap/intel_rapl.c | |||
@@ -0,0 +1,1395 @@ | |||
1 | /* | ||
2 | * Intel Running Average Power Limit (RAPL) Driver | ||
3 | * Copyright (c) 2013, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc. | ||
16 | * | ||
17 | */ | ||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/list.h> | ||
23 | #include <linux/types.h> | ||
24 | #include <linux/device.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/log2.h> | ||
27 | #include <linux/bitmap.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/sysfs.h> | ||
30 | #include <linux/cpu.h> | ||
31 | #include <linux/powercap.h> | ||
32 | |||
33 | #include <asm/processor.h> | ||
34 | #include <asm/cpu_device_id.h> | ||
35 | |||
36 | /* bitmasks for RAPL MSRs, used by primitive access functions */ | ||
37 | #define ENERGY_STATUS_MASK 0xffffffff | ||
38 | |||
39 | #define POWER_LIMIT1_MASK 0x7FFF | ||
40 | #define POWER_LIMIT1_ENABLE BIT(15) | ||
41 | #define POWER_LIMIT1_CLAMP BIT(16) | ||
42 | |||
43 | #define POWER_LIMIT2_MASK (0x7FFFULL<<32) | ||
44 | #define POWER_LIMIT2_ENABLE BIT_ULL(47) | ||
45 | #define POWER_LIMIT2_CLAMP BIT_ULL(48) | ||
46 | #define POWER_PACKAGE_LOCK BIT_ULL(63) | ||
47 | #define POWER_PP_LOCK BIT(31) | ||
48 | |||
49 | #define TIME_WINDOW1_MASK (0x7FULL<<17) | ||
50 | #define TIME_WINDOW2_MASK (0x7FULL<<49) | ||
51 | |||
52 | #define POWER_UNIT_OFFSET 0 | ||
53 | #define POWER_UNIT_MASK 0x0F | ||
54 | |||
55 | #define ENERGY_UNIT_OFFSET 0x08 | ||
56 | #define ENERGY_UNIT_MASK 0x1F00 | ||
57 | |||
58 | #define TIME_UNIT_OFFSET 0x10 | ||
59 | #define TIME_UNIT_MASK 0xF0000 | ||
60 | |||
61 | #define POWER_INFO_MAX_MASK (0x7fffULL<<32) | ||
62 | #define POWER_INFO_MIN_MASK (0x7fffULL<<16) | ||
63 | #define POWER_INFO_MAX_TIME_WIN_MASK (0x3fULL<<48) | ||
64 | #define POWER_INFO_THERMAL_SPEC_MASK 0x7fff | ||
65 | |||
66 | #define PERF_STATUS_THROTTLE_TIME_MASK 0xffffffff | ||
67 | #define PP_POLICY_MASK 0x1F | ||
68 | |||
69 | /* Non HW constants */ | ||
70 | #define RAPL_PRIMITIVE_DERIVED BIT(1) /* not from raw data */ | ||
71 | #define RAPL_PRIMITIVE_DUMMY BIT(2) | ||
72 | |||
73 | /* scale RAPL units to avoid floating point math inside kernel */ | ||
74 | #define POWER_UNIT_SCALE (1000000) | ||
75 | #define ENERGY_UNIT_SCALE (1000000) | ||
76 | #define TIME_UNIT_SCALE (1000000) | ||
77 | |||
78 | #define TIME_WINDOW_MAX_MSEC 40000 | ||
79 | #define TIME_WINDOW_MIN_MSEC 250 | ||
80 | |||
81 | enum unit_type { | ||
82 | ARBITRARY_UNIT, /* no translation */ | ||
83 | POWER_UNIT, | ||
84 | ENERGY_UNIT, | ||
85 | TIME_UNIT, | ||
86 | }; | ||
87 | |||
88 | enum rapl_domain_type { | ||
89 | RAPL_DOMAIN_PACKAGE, /* entire package/socket */ | ||
90 | RAPL_DOMAIN_PP0, /* core power plane */ | ||
91 | RAPL_DOMAIN_PP1, /* graphics uncore */ | ||
92 | RAPL_DOMAIN_DRAM,/* DRAM control_type */ | ||
93 | RAPL_DOMAIN_MAX, | ||
94 | }; | ||
95 | |||
96 | enum rapl_domain_msr_id { | ||
97 | RAPL_DOMAIN_MSR_LIMIT, | ||
98 | RAPL_DOMAIN_MSR_STATUS, | ||
99 | RAPL_DOMAIN_MSR_PERF, | ||
100 | RAPL_DOMAIN_MSR_POLICY, | ||
101 | RAPL_DOMAIN_MSR_INFO, | ||
102 | RAPL_DOMAIN_MSR_MAX, | ||
103 | }; | ||
104 | |||
105 | /* per domain data, some are optional */ | ||
106 | enum rapl_primitives { | ||
107 | ENERGY_COUNTER, | ||
108 | POWER_LIMIT1, | ||
109 | POWER_LIMIT2, | ||
110 | FW_LOCK, | ||
111 | |||
112 | PL1_ENABLE, /* power limit 1, aka long term */ | ||
113 | PL1_CLAMP, /* allow frequency to go below OS request */ | ||
114 | PL2_ENABLE, /* power limit 2, aka short term, instantaneous */ | ||
115 | PL2_CLAMP, | ||
116 | |||
117 | TIME_WINDOW1, /* long term */ | ||
118 | TIME_WINDOW2, /* short term */ | ||
119 | THERMAL_SPEC_POWER, | ||
120 | MAX_POWER, | ||
121 | |||
122 | MIN_POWER, | ||
123 | MAX_TIME_WINDOW, | ||
124 | THROTTLED_TIME, | ||
125 | PRIORITY_LEVEL, | ||
126 | |||
127 | /* below are not raw primitive data */ | ||
128 | AVERAGE_POWER, | ||
129 | NR_RAPL_PRIMITIVES, | ||
130 | }; | ||
131 | |||
132 | #define NR_RAW_PRIMITIVES (NR_RAPL_PRIMITIVES - 2) | ||
133 | |||
134 | /* Can be expanded to include events, etc.*/ | ||
135 | struct rapl_domain_data { | ||
136 | u64 primitives[NR_RAPL_PRIMITIVES]; | ||
137 | unsigned long timestamp; | ||
138 | }; | ||
139 | |||
140 | |||
141 | #define DOMAIN_STATE_INACTIVE BIT(0) | ||
142 | #define DOMAIN_STATE_POWER_LIMIT_SET BIT(1) | ||
143 | #define DOMAIN_STATE_BIOS_LOCKED BIT(2) | ||
144 | |||
145 | #define NR_POWER_LIMITS (2) | ||
146 | struct rapl_power_limit { | ||
147 | struct powercap_zone_constraint *constraint; | ||
148 | int prim_id; /* primitive ID used to enable */ | ||
149 | struct rapl_domain *domain; | ||
150 | const char *name; | ||
151 | }; | ||
152 | |||
153 | static const char pl1_name[] = "long_term"; | ||
154 | static const char pl2_name[] = "short_term"; | ||
155 | |||
156 | struct rapl_domain { | ||
157 | const char *name; | ||
158 | enum rapl_domain_type id; | ||
159 | int msrs[RAPL_DOMAIN_MSR_MAX]; | ||
160 | struct powercap_zone power_zone; | ||
161 | struct rapl_domain_data rdd; | ||
162 | struct rapl_power_limit rpl[NR_POWER_LIMITS]; | ||
163 | u64 attr_map; /* track capabilities */ | ||
164 | unsigned int state; | ||
165 | int package_id; | ||
166 | }; | ||
167 | #define power_zone_to_rapl_domain(_zone) \ | ||
168 | container_of(_zone, struct rapl_domain, power_zone) | ||
169 | |||
170 | |||
171 | /* Each physical package contains multiple domains, these are the common | ||
172 | * data across RAPL domains within a package. | ||
173 | */ | ||
174 | struct rapl_package { | ||
175 | unsigned int id; /* physical package/socket id */ | ||
176 | unsigned int nr_domains; | ||
177 | unsigned long domain_map; /* bit map of active domains */ | ||
178 | unsigned int power_unit_divisor; | ||
179 | unsigned int energy_unit_divisor; | ||
180 | unsigned int time_unit_divisor; | ||
181 | struct rapl_domain *domains; /* array of domains, sized at runtime */ | ||
182 | struct powercap_zone *power_zone; /* keep track of parent zone */ | ||
183 | int nr_cpus; /* active cpus on the package, topology info is lost during | ||
184 | * cpu hotplug. so we have to track ourselves. | ||
185 | */ | ||
186 | unsigned long power_limit_irq; /* keep track of package power limit | ||
187 | * notify interrupt enable status. | ||
188 | */ | ||
189 | struct list_head plist; | ||
190 | }; | ||
191 | #define PACKAGE_PLN_INT_SAVED BIT(0) | ||
192 | #define MAX_PRIM_NAME (32) | ||
193 | |||
194 | /* per domain data. used to describe individual knobs such that access function | ||
195 | * can be consolidated into one instead of many inline functions. | ||
196 | */ | ||
197 | struct rapl_primitive_info { | ||
198 | const char *name; | ||
199 | u64 mask; | ||
200 | int shift; | ||
201 | enum rapl_domain_msr_id id; | ||
202 | enum unit_type unit; | ||
203 | u32 flag; | ||
204 | }; | ||
205 | |||
206 | #define PRIMITIVE_INFO_INIT(p, m, s, i, u, f) { \ | ||
207 | .name = #p, \ | ||
208 | .mask = m, \ | ||
209 | .shift = s, \ | ||
210 | .id = i, \ | ||
211 | .unit = u, \ | ||
212 | .flag = f \ | ||
213 | } | ||
214 | |||
215 | static void rapl_init_domains(struct rapl_package *rp); | ||
216 | static int rapl_read_data_raw(struct rapl_domain *rd, | ||
217 | enum rapl_primitives prim, | ||
218 | bool xlate, u64 *data); | ||
219 | static int rapl_write_data_raw(struct rapl_domain *rd, | ||
220 | enum rapl_primitives prim, | ||
221 | unsigned long long value); | ||
222 | static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, | ||
223 | int to_raw); | ||
224 | static void package_power_limit_irq_save(int package_id); | ||
225 | |||
226 | static LIST_HEAD(rapl_packages); /* guarded by CPU hotplug lock */ | ||
227 | |||
228 | static const char * const rapl_domain_names[] = { | ||
229 | "package", | ||
230 | "core", | ||
231 | "uncore", | ||
232 | "dram", | ||
233 | }; | ||
234 | |||
235 | static struct powercap_control_type *control_type; /* PowerCap Controller */ | ||
236 | |||
237 | /* caller to ensure CPU hotplug lock is held */ | ||
238 | static struct rapl_package *find_package_by_id(int id) | ||
239 | { | ||
240 | struct rapl_package *rp; | ||
241 | |||
242 | list_for_each_entry(rp, &rapl_packages, plist) { | ||
243 | if (rp->id == id) | ||
244 | return rp; | ||
245 | } | ||
246 | |||
247 | return NULL; | ||
248 | } | ||
249 | |||
250 | /* caller to ensure CPU hotplug lock is held */ | ||
251 | static int find_active_cpu_on_package(int package_id) | ||
252 | { | ||
253 | int i; | ||
254 | |||
255 | for_each_online_cpu(i) { | ||
256 | if (topology_physical_package_id(i) == package_id) | ||
257 | return i; | ||
258 | } | ||
259 | /* all CPUs on this package are offline */ | ||
260 | |||
261 | return -ENODEV; | ||
262 | } | ||
263 | |||
264 | /* caller must hold cpu hotplug lock */ | ||
265 | static void rapl_cleanup_data(void) | ||
266 | { | ||
267 | struct rapl_package *p, *tmp; | ||
268 | |||
269 | list_for_each_entry_safe(p, tmp, &rapl_packages, plist) { | ||
270 | kfree(p->domains); | ||
271 | list_del(&p->plist); | ||
272 | kfree(p); | ||
273 | } | ||
274 | } | ||
275 | |||
276 | static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw) | ||
277 | { | ||
278 | struct rapl_domain *rd; | ||
279 | u64 energy_now; | ||
280 | |||
281 | /* prevent CPU hotplug, make sure the RAPL domain does not go | ||
282 | * away while reading the counter. | ||
283 | */ | ||
284 | get_online_cpus(); | ||
285 | rd = power_zone_to_rapl_domain(power_zone); | ||
286 | |||
287 | if (!rapl_read_data_raw(rd, ENERGY_COUNTER, true, &energy_now)) { | ||
288 | *energy_raw = energy_now; | ||
289 | put_online_cpus(); | ||
290 | |||
291 | return 0; | ||
292 | } | ||
293 | put_online_cpus(); | ||
294 | |||
295 | return -EIO; | ||
296 | } | ||
297 | |||
298 | static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy) | ||
299 | { | ||
300 | *energy = rapl_unit_xlate(0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0); | ||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | static int release_zone(struct powercap_zone *power_zone) | ||
305 | { | ||
306 | struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone); | ||
307 | struct rapl_package *rp; | ||
308 | |||
309 | /* package zone is the last zone of a package, we can free | ||
310 | * memory here since all children has been unregistered. | ||
311 | */ | ||
312 | if (rd->id == RAPL_DOMAIN_PACKAGE) { | ||
313 | rp = find_package_by_id(rd->package_id); | ||
314 | if (!rp) { | ||
315 | dev_warn(&power_zone->dev, "no package id %s\n", | ||
316 | rd->name); | ||
317 | return -ENODEV; | ||
318 | } | ||
319 | kfree(rd); | ||
320 | rp->domains = NULL; | ||
321 | } | ||
322 | |||
323 | return 0; | ||
324 | |||
325 | } | ||
326 | |||
327 | static int find_nr_power_limit(struct rapl_domain *rd) | ||
328 | { | ||
329 | int i; | ||
330 | |||
331 | for (i = 0; i < NR_POWER_LIMITS; i++) { | ||
332 | if (rd->rpl[i].name == NULL) | ||
333 | break; | ||
334 | } | ||
335 | |||
336 | return i; | ||
337 | } | ||
338 | |||
339 | static int set_domain_enable(struct powercap_zone *power_zone, bool mode) | ||
340 | { | ||
341 | struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone); | ||
342 | int nr_powerlimit; | ||
343 | |||
344 | if (rd->state & DOMAIN_STATE_BIOS_LOCKED) | ||
345 | return -EACCES; | ||
346 | get_online_cpus(); | ||
347 | nr_powerlimit = find_nr_power_limit(rd); | ||
348 | /* here we activate/deactivate the hardware for power limiting */ | ||
349 | rapl_write_data_raw(rd, PL1_ENABLE, mode); | ||
350 | /* always enable clamp such that p-state can go below OS requested | ||
351 | * range. power capping priority over guranteed frequency. | ||
352 | */ | ||
353 | rapl_write_data_raw(rd, PL1_CLAMP, mode); | ||
354 | /* some domains have pl2 */ | ||
355 | if (nr_powerlimit > 1) { | ||
356 | rapl_write_data_raw(rd, PL2_ENABLE, mode); | ||
357 | rapl_write_data_raw(rd, PL2_CLAMP, mode); | ||
358 | } | ||
359 | put_online_cpus(); | ||
360 | |||
361 | return 0; | ||
362 | } | ||
363 | |||
364 | static int get_domain_enable(struct powercap_zone *power_zone, bool *mode) | ||
365 | { | ||
366 | struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone); | ||
367 | u64 val; | ||
368 | |||
369 | if (rd->state & DOMAIN_STATE_BIOS_LOCKED) { | ||
370 | *mode = false; | ||
371 | return 0; | ||
372 | } | ||
373 | get_online_cpus(); | ||
374 | if (rapl_read_data_raw(rd, PL1_ENABLE, true, &val)) { | ||
375 | put_online_cpus(); | ||
376 | return -EIO; | ||
377 | } | ||
378 | *mode = val; | ||
379 | put_online_cpus(); | ||
380 | |||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | /* per RAPL domain ops, in the order of rapl_domain_type */ | ||
385 | static struct powercap_zone_ops zone_ops[] = { | ||
386 | /* RAPL_DOMAIN_PACKAGE */ | ||
387 | { | ||
388 | .get_energy_uj = get_energy_counter, | ||
389 | .get_max_energy_range_uj = get_max_energy_counter, | ||
390 | .release = release_zone, | ||
391 | .set_enable = set_domain_enable, | ||
392 | .get_enable = get_domain_enable, | ||
393 | }, | ||
394 | /* RAPL_DOMAIN_PP0 */ | ||
395 | { | ||
396 | .get_energy_uj = get_energy_counter, | ||
397 | .get_max_energy_range_uj = get_max_energy_counter, | ||
398 | .release = release_zone, | ||
399 | .set_enable = set_domain_enable, | ||
400 | .get_enable = get_domain_enable, | ||
401 | }, | ||
402 | /* RAPL_DOMAIN_PP1 */ | ||
403 | { | ||
404 | .get_energy_uj = get_energy_counter, | ||
405 | .get_max_energy_range_uj = get_max_energy_counter, | ||
406 | .release = release_zone, | ||
407 | .set_enable = set_domain_enable, | ||
408 | .get_enable = get_domain_enable, | ||
409 | }, | ||
410 | /* RAPL_DOMAIN_DRAM */ | ||
411 | { | ||
412 | .get_energy_uj = get_energy_counter, | ||
413 | .get_max_energy_range_uj = get_max_energy_counter, | ||
414 | .release = release_zone, | ||
415 | .set_enable = set_domain_enable, | ||
416 | .get_enable = get_domain_enable, | ||
417 | }, | ||
418 | }; | ||
419 | |||
420 | static int set_power_limit(struct powercap_zone *power_zone, int id, | ||
421 | u64 power_limit) | ||
422 | { | ||
423 | struct rapl_domain *rd; | ||
424 | struct rapl_package *rp; | ||
425 | int ret = 0; | ||
426 | |||
427 | get_online_cpus(); | ||
428 | rd = power_zone_to_rapl_domain(power_zone); | ||
429 | rp = find_package_by_id(rd->package_id); | ||
430 | if (!rp) { | ||
431 | ret = -ENODEV; | ||
432 | goto set_exit; | ||
433 | } | ||
434 | |||
435 | if (rd->state & DOMAIN_STATE_BIOS_LOCKED) { | ||
436 | dev_warn(&power_zone->dev, "%s locked by BIOS, monitoring only\n", | ||
437 | rd->name); | ||
438 | ret = -EACCES; | ||
439 | goto set_exit; | ||
440 | } | ||
441 | |||
442 | switch (rd->rpl[id].prim_id) { | ||
443 | case PL1_ENABLE: | ||
444 | rapl_write_data_raw(rd, POWER_LIMIT1, power_limit); | ||
445 | break; | ||
446 | case PL2_ENABLE: | ||
447 | rapl_write_data_raw(rd, POWER_LIMIT2, power_limit); | ||
448 | break; | ||
449 | default: | ||
450 | ret = -EINVAL; | ||
451 | } | ||
452 | if (!ret) | ||
453 | package_power_limit_irq_save(rd->package_id); | ||
454 | set_exit: | ||
455 | put_online_cpus(); | ||
456 | return ret; | ||
457 | } | ||
458 | |||
459 | static int get_current_power_limit(struct powercap_zone *power_zone, int id, | ||
460 | u64 *data) | ||
461 | { | ||
462 | struct rapl_domain *rd; | ||
463 | u64 val; | ||
464 | int prim; | ||
465 | int ret = 0; | ||
466 | |||
467 | get_online_cpus(); | ||
468 | rd = power_zone_to_rapl_domain(power_zone); | ||
469 | switch (rd->rpl[id].prim_id) { | ||
470 | case PL1_ENABLE: | ||
471 | prim = POWER_LIMIT1; | ||
472 | break; | ||
473 | case PL2_ENABLE: | ||
474 | prim = POWER_LIMIT2; | ||
475 | break; | ||
476 | default: | ||
477 | put_online_cpus(); | ||
478 | return -EINVAL; | ||
479 | } | ||
480 | if (rapl_read_data_raw(rd, prim, true, &val)) | ||
481 | ret = -EIO; | ||
482 | else | ||
483 | *data = val; | ||
484 | |||
485 | put_online_cpus(); | ||
486 | |||
487 | return ret; | ||
488 | } | ||
489 | |||
490 | static int set_time_window(struct powercap_zone *power_zone, int id, | ||
491 | u64 window) | ||
492 | { | ||
493 | struct rapl_domain *rd; | ||
494 | int ret = 0; | ||
495 | |||
496 | get_online_cpus(); | ||
497 | rd = power_zone_to_rapl_domain(power_zone); | ||
498 | switch (rd->rpl[id].prim_id) { | ||
499 | case PL1_ENABLE: | ||
500 | rapl_write_data_raw(rd, TIME_WINDOW1, window); | ||
501 | break; | ||
502 | case PL2_ENABLE: | ||
503 | rapl_write_data_raw(rd, TIME_WINDOW2, window); | ||
504 | break; | ||
505 | default: | ||
506 | ret = -EINVAL; | ||
507 | } | ||
508 | put_online_cpus(); | ||
509 | return ret; | ||
510 | } | ||
511 | |||
512 | static int get_time_window(struct powercap_zone *power_zone, int id, u64 *data) | ||
513 | { | ||
514 | struct rapl_domain *rd; | ||
515 | u64 val; | ||
516 | int ret = 0; | ||
517 | |||
518 | get_online_cpus(); | ||
519 | rd = power_zone_to_rapl_domain(power_zone); | ||
520 | switch (rd->rpl[id].prim_id) { | ||
521 | case PL1_ENABLE: | ||
522 | ret = rapl_read_data_raw(rd, TIME_WINDOW1, true, &val); | ||
523 | break; | ||
524 | case PL2_ENABLE: | ||
525 | ret = rapl_read_data_raw(rd, TIME_WINDOW2, true, &val); | ||
526 | break; | ||
527 | default: | ||
528 | put_online_cpus(); | ||
529 | return -EINVAL; | ||
530 | } | ||
531 | if (!ret) | ||
532 | *data = val; | ||
533 | put_online_cpus(); | ||
534 | |||
535 | return ret; | ||
536 | } | ||
537 | |||
538 | static const char *get_constraint_name(struct powercap_zone *power_zone, int id) | ||
539 | { | ||
540 | struct rapl_power_limit *rpl; | ||
541 | struct rapl_domain *rd; | ||
542 | |||
543 | rd = power_zone_to_rapl_domain(power_zone); | ||
544 | rpl = (struct rapl_power_limit *) &rd->rpl[id]; | ||
545 | |||
546 | return rpl->name; | ||
547 | } | ||
548 | |||
549 | |||
550 | static int get_max_power(struct powercap_zone *power_zone, int id, | ||
551 | u64 *data) | ||
552 | { | ||
553 | struct rapl_domain *rd; | ||
554 | u64 val; | ||
555 | int prim; | ||
556 | int ret = 0; | ||
557 | |||
558 | get_online_cpus(); | ||
559 | rd = power_zone_to_rapl_domain(power_zone); | ||
560 | switch (rd->rpl[id].prim_id) { | ||
561 | case PL1_ENABLE: | ||
562 | prim = THERMAL_SPEC_POWER; | ||
563 | break; | ||
564 | case PL2_ENABLE: | ||
565 | prim = MAX_POWER; | ||
566 | break; | ||
567 | default: | ||
568 | put_online_cpus(); | ||
569 | return -EINVAL; | ||
570 | } | ||
571 | if (rapl_read_data_raw(rd, prim, true, &val)) | ||
572 | ret = -EIO; | ||
573 | else | ||
574 | *data = val; | ||
575 | |||
576 | put_online_cpus(); | ||
577 | |||
578 | return ret; | ||
579 | } | ||
580 | |||
581 | static struct powercap_zone_constraint_ops constraint_ops = { | ||
582 | .set_power_limit_uw = set_power_limit, | ||
583 | .get_power_limit_uw = get_current_power_limit, | ||
584 | .set_time_window_us = set_time_window, | ||
585 | .get_time_window_us = get_time_window, | ||
586 | .get_max_power_uw = get_max_power, | ||
587 | .get_name = get_constraint_name, | ||
588 | }; | ||
589 | |||
590 | /* called after domain detection and package level data are set */ | ||
591 | static void rapl_init_domains(struct rapl_package *rp) | ||
592 | { | ||
593 | int i; | ||
594 | struct rapl_domain *rd = rp->domains; | ||
595 | |||
596 | for (i = 0; i < RAPL_DOMAIN_MAX; i++) { | ||
597 | unsigned int mask = rp->domain_map & (1 << i); | ||
598 | switch (mask) { | ||
599 | case BIT(RAPL_DOMAIN_PACKAGE): | ||
600 | rd->name = rapl_domain_names[RAPL_DOMAIN_PACKAGE]; | ||
601 | rd->id = RAPL_DOMAIN_PACKAGE; | ||
602 | rd->msrs[0] = MSR_PKG_POWER_LIMIT; | ||
603 | rd->msrs[1] = MSR_PKG_ENERGY_STATUS; | ||
604 | rd->msrs[2] = MSR_PKG_PERF_STATUS; | ||
605 | rd->msrs[3] = 0; | ||
606 | rd->msrs[4] = MSR_PKG_POWER_INFO; | ||
607 | rd->rpl[0].prim_id = PL1_ENABLE; | ||
608 | rd->rpl[0].name = pl1_name; | ||
609 | rd->rpl[1].prim_id = PL2_ENABLE; | ||
610 | rd->rpl[1].name = pl2_name; | ||
611 | break; | ||
612 | case BIT(RAPL_DOMAIN_PP0): | ||
613 | rd->name = rapl_domain_names[RAPL_DOMAIN_PP0]; | ||
614 | rd->id = RAPL_DOMAIN_PP0; | ||
615 | rd->msrs[0] = MSR_PP0_POWER_LIMIT; | ||
616 | rd->msrs[1] = MSR_PP0_ENERGY_STATUS; | ||
617 | rd->msrs[2] = 0; | ||
618 | rd->msrs[3] = MSR_PP0_POLICY; | ||
619 | rd->msrs[4] = 0; | ||
620 | rd->rpl[0].prim_id = PL1_ENABLE; | ||
621 | rd->rpl[0].name = pl1_name; | ||
622 | break; | ||
623 | case BIT(RAPL_DOMAIN_PP1): | ||
624 | rd->name = rapl_domain_names[RAPL_DOMAIN_PP1]; | ||
625 | rd->id = RAPL_DOMAIN_PP1; | ||
626 | rd->msrs[0] = MSR_PP1_POWER_LIMIT; | ||
627 | rd->msrs[1] = MSR_PP1_ENERGY_STATUS; | ||
628 | rd->msrs[2] = 0; | ||
629 | rd->msrs[3] = MSR_PP1_POLICY; | ||
630 | rd->msrs[4] = 0; | ||
631 | rd->rpl[0].prim_id = PL1_ENABLE; | ||
632 | rd->rpl[0].name = pl1_name; | ||
633 | break; | ||
634 | case BIT(RAPL_DOMAIN_DRAM): | ||
635 | rd->name = rapl_domain_names[RAPL_DOMAIN_DRAM]; | ||
636 | rd->id = RAPL_DOMAIN_DRAM; | ||
637 | rd->msrs[0] = MSR_DRAM_POWER_LIMIT; | ||
638 | rd->msrs[1] = MSR_DRAM_ENERGY_STATUS; | ||
639 | rd->msrs[2] = MSR_DRAM_PERF_STATUS; | ||
640 | rd->msrs[3] = 0; | ||
641 | rd->msrs[4] = MSR_DRAM_POWER_INFO; | ||
642 | rd->rpl[0].prim_id = PL1_ENABLE; | ||
643 | rd->rpl[0].name = pl1_name; | ||
644 | break; | ||
645 | } | ||
646 | if (mask) { | ||
647 | rd->package_id = rp->id; | ||
648 | rd++; | ||
649 | } | ||
650 | } | ||
651 | } | ||
652 | |||
653 | static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, | ||
654 | int to_raw) | ||
655 | { | ||
656 | u64 divisor = 1; | ||
657 | int scale = 1; /* scale to user friendly data without floating point */ | ||
658 | u64 f, y; /* fraction and exp. used for time unit */ | ||
659 | struct rapl_package *rp; | ||
660 | |||
661 | rp = find_package_by_id(package); | ||
662 | if (!rp) | ||
663 | return value; | ||
664 | |||
665 | switch (type) { | ||
666 | case POWER_UNIT: | ||
667 | divisor = rp->power_unit_divisor; | ||
668 | scale = POWER_UNIT_SCALE; | ||
669 | break; | ||
670 | case ENERGY_UNIT: | ||
671 | scale = ENERGY_UNIT_SCALE; | ||
672 | divisor = rp->energy_unit_divisor; | ||
673 | break; | ||
674 | case TIME_UNIT: | ||
675 | divisor = rp->time_unit_divisor; | ||
676 | scale = TIME_UNIT_SCALE; | ||
677 | /* special processing based on 2^Y*(1+F)/4 = val/divisor, refer | ||
678 | * to Intel Software Developer's manual Vol. 3a, CH 14.7.4. | ||
679 | */ | ||
680 | if (!to_raw) { | ||
681 | f = (value & 0x60) >> 5; | ||
682 | y = value & 0x1f; | ||
683 | value = (1 << y) * (4 + f) * scale / 4; | ||
684 | return div64_u64(value, divisor); | ||
685 | } else { | ||
686 | do_div(value, scale); | ||
687 | value *= divisor; | ||
688 | y = ilog2(value); | ||
689 | f = div64_u64(4 * (value - (1 << y)), 1 << y); | ||
690 | value = (y & 0x1f) | ((f & 0x3) << 5); | ||
691 | return value; | ||
692 | } | ||
693 | break; | ||
694 | case ARBITRARY_UNIT: | ||
695 | default: | ||
696 | return value; | ||
697 | }; | ||
698 | |||
699 | if (to_raw) | ||
700 | return div64_u64(value * divisor, scale); | ||
701 | else | ||
702 | return div64_u64(value * scale, divisor); | ||
703 | } | ||
704 | |||
705 | /* in the order of enum rapl_primitives */ | ||
706 | static struct rapl_primitive_info rpi[] = { | ||
707 | /* name, mask, shift, msr index, unit divisor */ | ||
708 | PRIMITIVE_INFO_INIT(ENERGY_COUNTER, ENERGY_STATUS_MASK, 0, | ||
709 | RAPL_DOMAIN_MSR_STATUS, ENERGY_UNIT, 0), | ||
710 | PRIMITIVE_INFO_INIT(POWER_LIMIT1, POWER_LIMIT1_MASK, 0, | ||
711 | RAPL_DOMAIN_MSR_LIMIT, POWER_UNIT, 0), | ||
712 | PRIMITIVE_INFO_INIT(POWER_LIMIT2, POWER_LIMIT2_MASK, 32, | ||
713 | RAPL_DOMAIN_MSR_LIMIT, POWER_UNIT, 0), | ||
714 | PRIMITIVE_INFO_INIT(FW_LOCK, POWER_PP_LOCK, 31, | ||
715 | RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0), | ||
716 | PRIMITIVE_INFO_INIT(PL1_ENABLE, POWER_LIMIT1_ENABLE, 15, | ||
717 | RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0), | ||
718 | PRIMITIVE_INFO_INIT(PL1_CLAMP, POWER_LIMIT1_CLAMP, 16, | ||
719 | RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0), | ||
720 | PRIMITIVE_INFO_INIT(PL2_ENABLE, POWER_LIMIT2_ENABLE, 47, | ||
721 | RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0), | ||
722 | PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48, | ||
723 | RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0), | ||
724 | PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17, | ||
725 | RAPL_DOMAIN_MSR_LIMIT, TIME_UNIT, 0), | ||
726 | PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49, | ||
727 | RAPL_DOMAIN_MSR_LIMIT, TIME_UNIT, 0), | ||
728 | PRIMITIVE_INFO_INIT(THERMAL_SPEC_POWER, POWER_INFO_THERMAL_SPEC_MASK, | ||
729 | 0, RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0), | ||
730 | PRIMITIVE_INFO_INIT(MAX_POWER, POWER_INFO_MAX_MASK, 32, | ||
731 | RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0), | ||
732 | PRIMITIVE_INFO_INIT(MIN_POWER, POWER_INFO_MIN_MASK, 16, | ||
733 | RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0), | ||
734 | PRIMITIVE_INFO_INIT(MAX_TIME_WINDOW, POWER_INFO_MAX_TIME_WIN_MASK, 48, | ||
735 | RAPL_DOMAIN_MSR_INFO, TIME_UNIT, 0), | ||
736 | PRIMITIVE_INFO_INIT(THROTTLED_TIME, PERF_STATUS_THROTTLE_TIME_MASK, 0, | ||
737 | RAPL_DOMAIN_MSR_PERF, TIME_UNIT, 0), | ||
738 | PRIMITIVE_INFO_INIT(PRIORITY_LEVEL, PP_POLICY_MASK, 0, | ||
739 | RAPL_DOMAIN_MSR_POLICY, ARBITRARY_UNIT, 0), | ||
740 | /* non-hardware */ | ||
741 | PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0, POWER_UNIT, | ||
742 | RAPL_PRIMITIVE_DERIVED), | ||
743 | {NULL, 0, 0, 0}, | ||
744 | }; | ||
745 | |||
746 | /* Read primitive data based on its related struct rapl_primitive_info. | ||
747 | * if xlate flag is set, return translated data based on data units, i.e. | ||
748 | * time, energy, and power. | ||
749 | * RAPL MSRs are non-architectual and are laid out not consistently across | ||
750 | * domains. Here we use primitive info to allow writing consolidated access | ||
751 | * functions. | ||
752 | * For a given primitive, it is processed by MSR mask and shift. Unit conversion | ||
753 | * is pre-assigned based on RAPL unit MSRs read at init time. | ||
754 | * 63-------------------------- 31--------------------------- 0 | ||
755 | * | xxxxx (mask) | | ||
756 | * | |<- shift ----------------| | ||
757 | * 63-------------------------- 31--------------------------- 0 | ||
758 | */ | ||
759 | static int rapl_read_data_raw(struct rapl_domain *rd, | ||
760 | enum rapl_primitives prim, | ||
761 | bool xlate, u64 *data) | ||
762 | { | ||
763 | u64 value, final; | ||
764 | u32 msr; | ||
765 | struct rapl_primitive_info *rp = &rpi[prim]; | ||
766 | int cpu; | ||
767 | |||
768 | if (!rp->name || rp->flag & RAPL_PRIMITIVE_DUMMY) | ||
769 | return -EINVAL; | ||
770 | |||
771 | msr = rd->msrs[rp->id]; | ||
772 | if (!msr) | ||
773 | return -EINVAL; | ||
774 | /* use physical package id to look up active cpus */ | ||
775 | cpu = find_active_cpu_on_package(rd->package_id); | ||
776 | if (cpu < 0) | ||
777 | return cpu; | ||
778 | |||
779 | /* special-case package domain, which uses a different bit*/ | ||
780 | if (prim == FW_LOCK && rd->id == RAPL_DOMAIN_PACKAGE) { | ||
781 | rp->mask = POWER_PACKAGE_LOCK; | ||
782 | rp->shift = 63; | ||
783 | } | ||
784 | /* non-hardware data are collected by the polling thread */ | ||
785 | if (rp->flag & RAPL_PRIMITIVE_DERIVED) { | ||
786 | *data = rd->rdd.primitives[prim]; | ||
787 | return 0; | ||
788 | } | ||
789 | |||
790 | if (rdmsrl_safe_on_cpu(cpu, msr, &value)) { | ||
791 | pr_debug("failed to read msr 0x%x on cpu %d\n", msr, cpu); | ||
792 | return -EIO; | ||
793 | } | ||
794 | |||
795 | final = value & rp->mask; | ||
796 | final = final >> rp->shift; | ||
797 | if (xlate) | ||
798 | *data = rapl_unit_xlate(rd->package_id, rp->unit, final, 0); | ||
799 | else | ||
800 | *data = final; | ||
801 | |||
802 | return 0; | ||
803 | } | ||
804 | |||
805 | /* Similar use of primitive info in the read counterpart */ | ||
806 | static int rapl_write_data_raw(struct rapl_domain *rd, | ||
807 | enum rapl_primitives prim, | ||
808 | unsigned long long value) | ||
809 | { | ||
810 | u64 msr_val; | ||
811 | u32 msr; | ||
812 | struct rapl_primitive_info *rp = &rpi[prim]; | ||
813 | int cpu; | ||
814 | |||
815 | cpu = find_active_cpu_on_package(rd->package_id); | ||
816 | if (cpu < 0) | ||
817 | return cpu; | ||
818 | msr = rd->msrs[rp->id]; | ||
819 | if (rdmsrl_safe_on_cpu(cpu, msr, &msr_val)) { | ||
820 | dev_dbg(&rd->power_zone.dev, | ||
821 | "failed to read msr 0x%x on cpu %d\n", msr, cpu); | ||
822 | return -EIO; | ||
823 | } | ||
824 | value = rapl_unit_xlate(rd->package_id, rp->unit, value, 1); | ||
825 | msr_val &= ~rp->mask; | ||
826 | msr_val |= value << rp->shift; | ||
827 | if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) { | ||
828 | dev_dbg(&rd->power_zone.dev, | ||
829 | "failed to write msr 0x%x on cpu %d\n", msr, cpu); | ||
830 | return -EIO; | ||
831 | } | ||
832 | |||
833 | return 0; | ||
834 | } | ||
835 | |||
836 | static int rapl_check_unit(struct rapl_package *rp, int cpu) | ||
837 | { | ||
838 | u64 msr_val; | ||
839 | u32 value; | ||
840 | |||
841 | if (rdmsrl_safe_on_cpu(cpu, MSR_RAPL_POWER_UNIT, &msr_val)) { | ||
842 | pr_err("Failed to read power unit MSR 0x%x on CPU %d, exit.\n", | ||
843 | MSR_RAPL_POWER_UNIT, cpu); | ||
844 | return -ENODEV; | ||
845 | } | ||
846 | |||
847 | /* Raw RAPL data stored in MSRs are in certain scales. We need to | ||
848 | * convert them into standard units based on the divisors reported in | ||
849 | * the RAPL unit MSRs. | ||
850 | * i.e. | ||
851 | * energy unit: 1/enery_unit_divisor Joules | ||
852 | * power unit: 1/power_unit_divisor Watts | ||
853 | * time unit: 1/time_unit_divisor Seconds | ||
854 | */ | ||
855 | value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; | ||
856 | rp->energy_unit_divisor = 1 << value; | ||
857 | |||
858 | |||
859 | value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; | ||
860 | rp->power_unit_divisor = 1 << value; | ||
861 | |||
862 | value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; | ||
863 | rp->time_unit_divisor = 1 << value; | ||
864 | |||
865 | pr_debug("Physical package %d units: energy=%d, time=%d, power=%d\n", | ||
866 | rp->id, | ||
867 | rp->energy_unit_divisor, | ||
868 | rp->time_unit_divisor, | ||
869 | rp->power_unit_divisor); | ||
870 | |||
871 | return 0; | ||
872 | } | ||
873 | |||
874 | /* REVISIT: | ||
875 | * When package power limit is set artificially low by RAPL, LVT | ||
876 | * thermal interrupt for package power limit should be ignored | ||
877 | * since we are not really exceeding the real limit. The intention | ||
878 | * is to avoid excessive interrupts while we are trying to save power. | ||
879 | * A useful feature might be routing the package_power_limit interrupt | ||
880 | * to userspace via eventfd. once we have a usecase, this is simple | ||
881 | * to do by adding an atomic notifier. | ||
882 | */ | ||
883 | |||
884 | static void package_power_limit_irq_save(int package_id) | ||
885 | { | ||
886 | u32 l, h = 0; | ||
887 | int cpu; | ||
888 | struct rapl_package *rp; | ||
889 | |||
890 | rp = find_package_by_id(package_id); | ||
891 | if (!rp) | ||
892 | return; | ||
893 | |||
894 | if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN)) | ||
895 | return; | ||
896 | |||
897 | cpu = find_active_cpu_on_package(package_id); | ||
898 | if (cpu < 0) | ||
899 | return; | ||
900 | /* save the state of PLN irq mask bit before disabling it */ | ||
901 | rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h); | ||
902 | if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) { | ||
903 | rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE; | ||
904 | rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED; | ||
905 | } | ||
906 | l &= ~PACKAGE_THERM_INT_PLN_ENABLE; | ||
907 | wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); | ||
908 | } | ||
909 | |||
910 | /* restore per package power limit interrupt enable state */ | ||
911 | static void package_power_limit_irq_restore(int package_id) | ||
912 | { | ||
913 | u32 l, h; | ||
914 | int cpu; | ||
915 | struct rapl_package *rp; | ||
916 | |||
917 | rp = find_package_by_id(package_id); | ||
918 | if (!rp) | ||
919 | return; | ||
920 | |||
921 | if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN)) | ||
922 | return; | ||
923 | |||
924 | cpu = find_active_cpu_on_package(package_id); | ||
925 | if (cpu < 0) | ||
926 | return; | ||
927 | |||
928 | /* irq enable state not saved, nothing to restore */ | ||
929 | if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) | ||
930 | return; | ||
931 | rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h); | ||
932 | |||
933 | if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE) | ||
934 | l |= PACKAGE_THERM_INT_PLN_ENABLE; | ||
935 | else | ||
936 | l &= ~PACKAGE_THERM_INT_PLN_ENABLE; | ||
937 | |||
938 | wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); | ||
939 | } | ||
940 | |||
941 | static const struct x86_cpu_id rapl_ids[] = { | ||
942 | { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */ | ||
943 | { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */ | ||
944 | { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */ | ||
945 | { X86_VENDOR_INTEL, 6, 0x45},/* HSW */ | ||
946 | /* TODO: Add more CPU IDs after testing */ | ||
947 | {} | ||
948 | }; | ||
949 | MODULE_DEVICE_TABLE(x86cpu, rapl_ids); | ||
950 | |||
951 | /* read once for all raw primitive data for all packages, domains */ | ||
952 | static void rapl_update_domain_data(void) | ||
953 | { | ||
954 | int dmn, prim; | ||
955 | u64 val; | ||
956 | struct rapl_package *rp; | ||
957 | |||
958 | list_for_each_entry(rp, &rapl_packages, plist) { | ||
959 | for (dmn = 0; dmn < rp->nr_domains; dmn++) { | ||
960 | pr_debug("update package %d domain %s data\n", rp->id, | ||
961 | rp->domains[dmn].name); | ||
962 | /* exclude non-raw primitives */ | ||
963 | for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++) | ||
964 | if (!rapl_read_data_raw(&rp->domains[dmn], prim, | ||
965 | rpi[prim].unit, | ||
966 | &val)) | ||
967 | rp->domains[dmn].rdd.primitives[prim] = | ||
968 | val; | ||
969 | } | ||
970 | } | ||
971 | |||
972 | } | ||
973 | |||
974 | static int rapl_unregister_powercap(void) | ||
975 | { | ||
976 | struct rapl_package *rp; | ||
977 | struct rapl_domain *rd, *rd_package = NULL; | ||
978 | |||
979 | /* unregister all active rapl packages from the powercap layer, | ||
980 | * hotplug lock held | ||
981 | */ | ||
982 | list_for_each_entry(rp, &rapl_packages, plist) { | ||
983 | package_power_limit_irq_restore(rp->id); | ||
984 | |||
985 | for (rd = rp->domains; rd < rp->domains + rp->nr_domains; | ||
986 | rd++) { | ||
987 | pr_debug("remove package, undo power limit on %d: %s\n", | ||
988 | rp->id, rd->name); | ||
989 | rapl_write_data_raw(rd, PL1_ENABLE, 0); | ||
990 | rapl_write_data_raw(rd, PL2_ENABLE, 0); | ||
991 | rapl_write_data_raw(rd, PL1_CLAMP, 0); | ||
992 | rapl_write_data_raw(rd, PL2_CLAMP, 0); | ||
993 | if (rd->id == RAPL_DOMAIN_PACKAGE) { | ||
994 | rd_package = rd; | ||
995 | continue; | ||
996 | } | ||
997 | powercap_unregister_zone(control_type, &rd->power_zone); | ||
998 | } | ||
999 | /* do the package zone last */ | ||
1000 | if (rd_package) | ||
1001 | powercap_unregister_zone(control_type, | ||
1002 | &rd_package->power_zone); | ||
1003 | } | ||
1004 | powercap_unregister_control_type(control_type); | ||
1005 | |||
1006 | return 0; | ||
1007 | } | ||
1008 | |||
1009 | static int rapl_package_register_powercap(struct rapl_package *rp) | ||
1010 | { | ||
1011 | struct rapl_domain *rd; | ||
1012 | int ret = 0; | ||
1013 | char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/ | ||
1014 | struct powercap_zone *power_zone = NULL; | ||
1015 | int nr_pl; | ||
1016 | |||
1017 | /* first we register package domain as the parent zone*/ | ||
1018 | for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { | ||
1019 | if (rd->id == RAPL_DOMAIN_PACKAGE) { | ||
1020 | nr_pl = find_nr_power_limit(rd); | ||
1021 | pr_debug("register socket %d package domain %s\n", | ||
1022 | rp->id, rd->name); | ||
1023 | memset(dev_name, 0, sizeof(dev_name)); | ||
1024 | snprintf(dev_name, sizeof(dev_name), "%s-%d", | ||
1025 | rd->name, rp->id); | ||
1026 | power_zone = powercap_register_zone(&rd->power_zone, | ||
1027 | control_type, | ||
1028 | dev_name, NULL, | ||
1029 | &zone_ops[rd->id], | ||
1030 | nr_pl, | ||
1031 | &constraint_ops); | ||
1032 | if (IS_ERR(power_zone)) { | ||
1033 | pr_debug("failed to register package, %d\n", | ||
1034 | rp->id); | ||
1035 | ret = PTR_ERR(power_zone); | ||
1036 | goto exit_package; | ||
1037 | } | ||
1038 | /* track parent zone in per package/socket data */ | ||
1039 | rp->power_zone = power_zone; | ||
1040 | /* done, only one package domain per socket */ | ||
1041 | break; | ||
1042 | } | ||
1043 | } | ||
1044 | if (!power_zone) { | ||
1045 | pr_err("no package domain found, unknown topology!\n"); | ||
1046 | ret = -ENODEV; | ||
1047 | goto exit_package; | ||
1048 | } | ||
1049 | /* now register domains as children of the socket/package*/ | ||
1050 | for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { | ||
1051 | if (rd->id == RAPL_DOMAIN_PACKAGE) | ||
1052 | continue; | ||
1053 | /* number of power limits per domain varies */ | ||
1054 | nr_pl = find_nr_power_limit(rd); | ||
1055 | power_zone = powercap_register_zone(&rd->power_zone, | ||
1056 | control_type, rd->name, | ||
1057 | rp->power_zone, | ||
1058 | &zone_ops[rd->id], nr_pl, | ||
1059 | &constraint_ops); | ||
1060 | |||
1061 | if (IS_ERR(power_zone)) { | ||
1062 | pr_debug("failed to register power_zone, %d:%s:%s\n", | ||
1063 | rp->id, rd->name, dev_name); | ||
1064 | ret = PTR_ERR(power_zone); | ||
1065 | goto err_cleanup; | ||
1066 | } | ||
1067 | } | ||
1068 | |||
1069 | exit_package: | ||
1070 | return ret; | ||
1071 | err_cleanup: | ||
1072 | /* clean up previously initialized domains within the package if we | ||
1073 | * failed after the first domain setup. | ||
1074 | */ | ||
1075 | while (--rd >= rp->domains) { | ||
1076 | pr_debug("unregister package %d domain %s\n", rp->id, rd->name); | ||
1077 | powercap_unregister_zone(control_type, &rd->power_zone); | ||
1078 | } | ||
1079 | |||
1080 | return ret; | ||
1081 | } | ||
1082 | |||
1083 | static int rapl_register_powercap(void) | ||
1084 | { | ||
1085 | struct rapl_domain *rd; | ||
1086 | struct rapl_package *rp; | ||
1087 | int ret = 0; | ||
1088 | |||
1089 | control_type = powercap_register_control_type(NULL, "intel-rapl", NULL); | ||
1090 | if (IS_ERR(control_type)) { | ||
1091 | pr_debug("failed to register powercap control_type.\n"); | ||
1092 | return PTR_ERR(control_type); | ||
1093 | } | ||
1094 | /* read the initial data */ | ||
1095 | rapl_update_domain_data(); | ||
1096 | list_for_each_entry(rp, &rapl_packages, plist) | ||
1097 | if (rapl_package_register_powercap(rp)) | ||
1098 | goto err_cleanup_package; | ||
1099 | return ret; | ||
1100 | |||
1101 | err_cleanup_package: | ||
1102 | /* clean up previously initialized packages */ | ||
1103 | list_for_each_entry_continue_reverse(rp, &rapl_packages, plist) { | ||
1104 | for (rd = rp->domains; rd < rp->domains + rp->nr_domains; | ||
1105 | rd++) { | ||
1106 | pr_debug("unregister zone/package %d, %s domain\n", | ||
1107 | rp->id, rd->name); | ||
1108 | powercap_unregister_zone(control_type, &rd->power_zone); | ||
1109 | } | ||
1110 | } | ||
1111 | |||
1112 | return ret; | ||
1113 | } | ||
1114 | |||
1115 | static int rapl_check_domain(int cpu, int domain) | ||
1116 | { | ||
1117 | unsigned msr; | ||
1118 | u64 val1, val2 = 0; | ||
1119 | int retry = 0; | ||
1120 | |||
1121 | switch (domain) { | ||
1122 | case RAPL_DOMAIN_PACKAGE: | ||
1123 | msr = MSR_PKG_ENERGY_STATUS; | ||
1124 | break; | ||
1125 | case RAPL_DOMAIN_PP0: | ||
1126 | msr = MSR_PP0_ENERGY_STATUS; | ||
1127 | break; | ||
1128 | case RAPL_DOMAIN_PP1: | ||
1129 | msr = MSR_PP1_ENERGY_STATUS; | ||
1130 | break; | ||
1131 | case RAPL_DOMAIN_DRAM: | ||
1132 | msr = MSR_DRAM_ENERGY_STATUS; | ||
1133 | break; | ||
1134 | default: | ||
1135 | pr_err("invalid domain id %d\n", domain); | ||
1136 | return -EINVAL; | ||
1137 | } | ||
1138 | if (rdmsrl_safe_on_cpu(cpu, msr, &val1)) | ||
1139 | return -ENODEV; | ||
1140 | |||
1141 | /* energy counters roll slowly on some domains */ | ||
1142 | while (++retry < 10) { | ||
1143 | usleep_range(10000, 15000); | ||
1144 | rdmsrl_safe_on_cpu(cpu, msr, &val2); | ||
1145 | if ((val1 & ENERGY_STATUS_MASK) != (val2 & ENERGY_STATUS_MASK)) | ||
1146 | return 0; | ||
1147 | } | ||
1148 | /* if energy counter does not change, report as bad domain */ | ||
1149 | pr_info("domain %s energy ctr %llu:%llu not working, skip\n", | ||
1150 | rapl_domain_names[domain], val1, val2); | ||
1151 | |||
1152 | return -ENODEV; | ||
1153 | } | ||
1154 | |||
1155 | /* Detect active and valid domains for the given CPU, caller must | ||
1156 | * ensure the CPU belongs to the targeted package and CPU hotlug is disabled. | ||
1157 | */ | ||
1158 | static int rapl_detect_domains(struct rapl_package *rp, int cpu) | ||
1159 | { | ||
1160 | int i; | ||
1161 | int ret = 0; | ||
1162 | struct rapl_domain *rd; | ||
1163 | u64 locked; | ||
1164 | |||
1165 | for (i = 0; i < RAPL_DOMAIN_MAX; i++) { | ||
1166 | /* use physical package id to read counters */ | ||
1167 | if (!rapl_check_domain(cpu, i)) | ||
1168 | rp->domain_map |= 1 << i; | ||
1169 | } | ||
1170 | rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX); | ||
1171 | if (!rp->nr_domains) { | ||
1172 | pr_err("no valid rapl domains found in package %d\n", rp->id); | ||
1173 | ret = -ENODEV; | ||
1174 | goto done; | ||
1175 | } | ||
1176 | pr_debug("found %d domains on package %d\n", rp->nr_domains, rp->id); | ||
1177 | |||
1178 | rp->domains = kcalloc(rp->nr_domains + 1, sizeof(struct rapl_domain), | ||
1179 | GFP_KERNEL); | ||
1180 | if (!rp->domains) { | ||
1181 | ret = -ENOMEM; | ||
1182 | goto done; | ||
1183 | } | ||
1184 | rapl_init_domains(rp); | ||
1185 | |||
1186 | for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { | ||
1187 | /* check if the domain is locked by BIOS */ | ||
1188 | if (rapl_read_data_raw(rd, FW_LOCK, false, &locked)) { | ||
1189 | pr_info("RAPL package %d domain %s locked by BIOS\n", | ||
1190 | rp->id, rd->name); | ||
1191 | rd->state |= DOMAIN_STATE_BIOS_LOCKED; | ||
1192 | } | ||
1193 | } | ||
1194 | |||
1195 | |||
1196 | done: | ||
1197 | return ret; | ||
1198 | } | ||
1199 | |||
1200 | static bool is_package_new(int package) | ||
1201 | { | ||
1202 | struct rapl_package *rp; | ||
1203 | |||
1204 | /* caller prevents cpu hotplug, there will be no new packages added | ||
1205 | * or deleted while traversing the package list, no need for locking. | ||
1206 | */ | ||
1207 | list_for_each_entry(rp, &rapl_packages, plist) | ||
1208 | if (package == rp->id) | ||
1209 | return false; | ||
1210 | |||
1211 | return true; | ||
1212 | } | ||
1213 | |||
1214 | /* RAPL interface can be made of a two-level hierarchy: package level and domain | ||
1215 | * level. We first detect the number of packages then domains of each package. | ||
1216 | * We have to consider the possiblity of CPU online/offline due to hotplug and | ||
1217 | * other scenarios. | ||
1218 | */ | ||
1219 | static int rapl_detect_topology(void) | ||
1220 | { | ||
1221 | int i; | ||
1222 | int phy_package_id; | ||
1223 | struct rapl_package *new_package, *rp; | ||
1224 | |||
1225 | for_each_online_cpu(i) { | ||
1226 | phy_package_id = topology_physical_package_id(i); | ||
1227 | if (is_package_new(phy_package_id)) { | ||
1228 | new_package = kzalloc(sizeof(*rp), GFP_KERNEL); | ||
1229 | if (!new_package) { | ||
1230 | rapl_cleanup_data(); | ||
1231 | return -ENOMEM; | ||
1232 | } | ||
1233 | /* add the new package to the list */ | ||
1234 | new_package->id = phy_package_id; | ||
1235 | new_package->nr_cpus = 1; | ||
1236 | |||
1237 | /* check if the package contains valid domains */ | ||
1238 | if (rapl_detect_domains(new_package, i) || | ||
1239 | rapl_check_unit(new_package, i)) { | ||
1240 | kfree(new_package->domains); | ||
1241 | kfree(new_package); | ||
1242 | /* free up the packages already initialized */ | ||
1243 | rapl_cleanup_data(); | ||
1244 | return -ENODEV; | ||
1245 | } | ||
1246 | INIT_LIST_HEAD(&new_package->plist); | ||
1247 | list_add(&new_package->plist, &rapl_packages); | ||
1248 | } else { | ||
1249 | rp = find_package_by_id(phy_package_id); | ||
1250 | if (rp) | ||
1251 | ++rp->nr_cpus; | ||
1252 | } | ||
1253 | } | ||
1254 | |||
1255 | return 0; | ||
1256 | } | ||
1257 | |||
1258 | /* called from CPU hotplug notifier, hotplug lock held */ | ||
1259 | static void rapl_remove_package(struct rapl_package *rp) | ||
1260 | { | ||
1261 | struct rapl_domain *rd, *rd_package = NULL; | ||
1262 | |||
1263 | for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { | ||
1264 | if (rd->id == RAPL_DOMAIN_PACKAGE) { | ||
1265 | rd_package = rd; | ||
1266 | continue; | ||
1267 | } | ||
1268 | pr_debug("remove package %d, %s domain\n", rp->id, rd->name); | ||
1269 | powercap_unregister_zone(control_type, &rd->power_zone); | ||
1270 | } | ||
1271 | /* do parent zone last */ | ||
1272 | powercap_unregister_zone(control_type, &rd_package->power_zone); | ||
1273 | list_del(&rp->plist); | ||
1274 | kfree(rp); | ||
1275 | } | ||
1276 | |||
1277 | /* called from CPU hotplug notifier, hotplug lock held */ | ||
1278 | static int rapl_add_package(int cpu) | ||
1279 | { | ||
1280 | int ret = 0; | ||
1281 | int phy_package_id; | ||
1282 | struct rapl_package *rp; | ||
1283 | |||
1284 | phy_package_id = topology_physical_package_id(cpu); | ||
1285 | rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL); | ||
1286 | if (!rp) | ||
1287 | return -ENOMEM; | ||
1288 | |||
1289 | /* add the new package to the list */ | ||
1290 | rp->id = phy_package_id; | ||
1291 | rp->nr_cpus = 1; | ||
1292 | /* check if the package contains valid domains */ | ||
1293 | if (rapl_detect_domains(rp, cpu) || | ||
1294 | rapl_check_unit(rp, cpu)) { | ||
1295 | ret = -ENODEV; | ||
1296 | goto err_free_package; | ||
1297 | } | ||
1298 | if (!rapl_package_register_powercap(rp)) { | ||
1299 | INIT_LIST_HEAD(&rp->plist); | ||
1300 | list_add(&rp->plist, &rapl_packages); | ||
1301 | return ret; | ||
1302 | } | ||
1303 | |||
1304 | err_free_package: | ||
1305 | kfree(rp->domains); | ||
1306 | kfree(rp); | ||
1307 | |||
1308 | return ret; | ||
1309 | } | ||
1310 | |||
1311 | /* Handles CPU hotplug on multi-socket systems. | ||
1312 | * If a CPU goes online as the first CPU of the physical package | ||
1313 | * we add the RAPL package to the system. Similarly, when the last | ||
1314 | * CPU of the package is removed, we remove the RAPL package and its | ||
1315 | * associated domains. Cooling devices are handled accordingly at | ||
1316 | * per-domain level. | ||
1317 | */ | ||
1318 | static int rapl_cpu_callback(struct notifier_block *nfb, | ||
1319 | unsigned long action, void *hcpu) | ||
1320 | { | ||
1321 | unsigned long cpu = (unsigned long)hcpu; | ||
1322 | int phy_package_id; | ||
1323 | struct rapl_package *rp; | ||
1324 | |||
1325 | phy_package_id = topology_physical_package_id(cpu); | ||
1326 | switch (action) { | ||
1327 | case CPU_ONLINE: | ||
1328 | case CPU_ONLINE_FROZEN: | ||
1329 | case CPU_DOWN_FAILED: | ||
1330 | case CPU_DOWN_FAILED_FROZEN: | ||
1331 | rp = find_package_by_id(phy_package_id); | ||
1332 | if (rp) | ||
1333 | ++rp->nr_cpus; | ||
1334 | else | ||
1335 | rapl_add_package(cpu); | ||
1336 | break; | ||
1337 | case CPU_DOWN_PREPARE: | ||
1338 | case CPU_DOWN_PREPARE_FROZEN: | ||
1339 | rp = find_package_by_id(phy_package_id); | ||
1340 | if (!rp) | ||
1341 | break; | ||
1342 | if (--rp->nr_cpus == 0) | ||
1343 | rapl_remove_package(rp); | ||
1344 | } | ||
1345 | |||
1346 | return NOTIFY_OK; | ||
1347 | } | ||
1348 | |||
1349 | static struct notifier_block rapl_cpu_notifier = { | ||
1350 | .notifier_call = rapl_cpu_callback, | ||
1351 | }; | ||
1352 | |||
1353 | static int __init rapl_init(void) | ||
1354 | { | ||
1355 | int ret = 0; | ||
1356 | |||
1357 | if (!x86_match_cpu(rapl_ids)) { | ||
1358 | pr_err("driver does not support CPU family %d model %d\n", | ||
1359 | boot_cpu_data.x86, boot_cpu_data.x86_model); | ||
1360 | |||
1361 | return -ENODEV; | ||
1362 | } | ||
1363 | /* prevent CPU hotplug during detection */ | ||
1364 | get_online_cpus(); | ||
1365 | ret = rapl_detect_topology(); | ||
1366 | if (ret) | ||
1367 | goto done; | ||
1368 | |||
1369 | if (rapl_register_powercap()) { | ||
1370 | rapl_cleanup_data(); | ||
1371 | ret = -ENODEV; | ||
1372 | goto done; | ||
1373 | } | ||
1374 | register_hotcpu_notifier(&rapl_cpu_notifier); | ||
1375 | done: | ||
1376 | put_online_cpus(); | ||
1377 | |||
1378 | return ret; | ||
1379 | } | ||
1380 | |||
1381 | static void __exit rapl_exit(void) | ||
1382 | { | ||
1383 | get_online_cpus(); | ||
1384 | unregister_hotcpu_notifier(&rapl_cpu_notifier); | ||
1385 | rapl_unregister_powercap(); | ||
1386 | rapl_cleanup_data(); | ||
1387 | put_online_cpus(); | ||
1388 | } | ||
1389 | |||
1390 | module_init(rapl_init); | ||
1391 | module_exit(rapl_exit); | ||
1392 | |||
1393 | MODULE_DESCRIPTION("Driver for Intel RAPL (Running Average Power Limit)"); | ||
1394 | MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@intel.com>"); | ||
1395 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c new file mode 100644 index 000000000000..8d0fe431dbdd --- /dev/null +++ b/drivers/powercap/powercap_sys.c | |||
@@ -0,0 +1,685 @@ | |||
1 | /* | ||
2 | * Power capping class | ||
3 | * Copyright (c) 2013, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #include <linux/module.h> | ||
20 | #include <linux/device.h> | ||
21 | #include <linux/err.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/powercap.h> | ||
24 | |||
25 | #define to_powercap_zone(n) container_of(n, struct powercap_zone, dev) | ||
26 | #define to_powercap_control_type(n) \ | ||
27 | container_of(n, struct powercap_control_type, dev) | ||
28 | |||
29 | /* Power zone show function */ | ||
30 | #define define_power_zone_show(_attr) \ | ||
31 | static ssize_t _attr##_show(struct device *dev, \ | ||
32 | struct device_attribute *dev_attr,\ | ||
33 | char *buf) \ | ||
34 | { \ | ||
35 | u64 value; \ | ||
36 | ssize_t len = -EINVAL; \ | ||
37 | struct powercap_zone *power_zone = to_powercap_zone(dev); \ | ||
38 | \ | ||
39 | if (power_zone->ops->get_##_attr) { \ | ||
40 | if (!power_zone->ops->get_##_attr(power_zone, &value)) \ | ||
41 | len = sprintf(buf, "%lld\n", value); \ | ||
42 | } \ | ||
43 | \ | ||
44 | return len; \ | ||
45 | } | ||
46 | |||
47 | /* The only meaningful input is 0 (reset), others are silently ignored */ | ||
48 | #define define_power_zone_store(_attr) \ | ||
49 | static ssize_t _attr##_store(struct device *dev,\ | ||
50 | struct device_attribute *dev_attr, \ | ||
51 | const char *buf, size_t count) \ | ||
52 | { \ | ||
53 | int err; \ | ||
54 | struct powercap_zone *power_zone = to_powercap_zone(dev); \ | ||
55 | u64 value; \ | ||
56 | \ | ||
57 | err = kstrtoull(buf, 10, &value); \ | ||
58 | if (err) \ | ||
59 | return -EINVAL; \ | ||
60 | if (value) \ | ||
61 | return count; \ | ||
62 | if (power_zone->ops->reset_##_attr) { \ | ||
63 | if (!power_zone->ops->reset_##_attr(power_zone)) \ | ||
64 | return count; \ | ||
65 | } \ | ||
66 | \ | ||
67 | return -EINVAL; \ | ||
68 | } | ||
69 | |||
70 | /* Power zone constraint show function */ | ||
71 | #define define_power_zone_constraint_show(_attr) \ | ||
72 | static ssize_t show_constraint_##_attr(struct device *dev, \ | ||
73 | struct device_attribute *dev_attr,\ | ||
74 | char *buf) \ | ||
75 | { \ | ||
76 | u64 value; \ | ||
77 | ssize_t len = -ENODATA; \ | ||
78 | struct powercap_zone *power_zone = to_powercap_zone(dev); \ | ||
79 | int id; \ | ||
80 | struct powercap_zone_constraint *pconst;\ | ||
81 | \ | ||
82 | if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \ | ||
83 | return -EINVAL; \ | ||
84 | if (id >= power_zone->const_id_cnt) \ | ||
85 | return -EINVAL; \ | ||
86 | pconst = &power_zone->constraints[id]; \ | ||
87 | if (pconst && pconst->ops && pconst->ops->get_##_attr) { \ | ||
88 | if (!pconst->ops->get_##_attr(power_zone, id, &value)) \ | ||
89 | len = sprintf(buf, "%lld\n", value); \ | ||
90 | } \ | ||
91 | \ | ||
92 | return len; \ | ||
93 | } | ||
94 | |||
95 | /* Power zone constraint store function */ | ||
96 | #define define_power_zone_constraint_store(_attr) \ | ||
97 | static ssize_t store_constraint_##_attr(struct device *dev,\ | ||
98 | struct device_attribute *dev_attr, \ | ||
99 | const char *buf, size_t count) \ | ||
100 | { \ | ||
101 | int err; \ | ||
102 | u64 value; \ | ||
103 | struct powercap_zone *power_zone = to_powercap_zone(dev); \ | ||
104 | int id; \ | ||
105 | struct powercap_zone_constraint *pconst;\ | ||
106 | \ | ||
107 | if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \ | ||
108 | return -EINVAL; \ | ||
109 | if (id >= power_zone->const_id_cnt) \ | ||
110 | return -EINVAL; \ | ||
111 | pconst = &power_zone->constraints[id]; \ | ||
112 | err = kstrtoull(buf, 10, &value); \ | ||
113 | if (err) \ | ||
114 | return -EINVAL; \ | ||
115 | if (pconst && pconst->ops && pconst->ops->set_##_attr) { \ | ||
116 | if (!pconst->ops->set_##_attr(power_zone, id, value)) \ | ||
117 | return count; \ | ||
118 | } \ | ||
119 | \ | ||
120 | return -ENODATA; \ | ||
121 | } | ||
122 | |||
123 | /* Power zone information callbacks */ | ||
124 | define_power_zone_show(power_uw); | ||
125 | define_power_zone_show(max_power_range_uw); | ||
126 | define_power_zone_show(energy_uj); | ||
127 | define_power_zone_store(energy_uj); | ||
128 | define_power_zone_show(max_energy_range_uj); | ||
129 | |||
130 | /* Power zone attributes */ | ||
131 | static DEVICE_ATTR_RO(max_power_range_uw); | ||
132 | static DEVICE_ATTR_RO(power_uw); | ||
133 | static DEVICE_ATTR_RO(max_energy_range_uj); | ||
134 | static DEVICE_ATTR_RW(energy_uj); | ||
135 | |||
136 | /* Power zone constraint attributes callbacks */ | ||
137 | define_power_zone_constraint_show(power_limit_uw); | ||
138 | define_power_zone_constraint_store(power_limit_uw); | ||
139 | define_power_zone_constraint_show(time_window_us); | ||
140 | define_power_zone_constraint_store(time_window_us); | ||
141 | define_power_zone_constraint_show(max_power_uw); | ||
142 | define_power_zone_constraint_show(min_power_uw); | ||
143 | define_power_zone_constraint_show(max_time_window_us); | ||
144 | define_power_zone_constraint_show(min_time_window_us); | ||
145 | |||
146 | /* For one time seeding of constraint device attributes */ | ||
147 | struct powercap_constraint_attr { | ||
148 | struct device_attribute power_limit_attr; | ||
149 | struct device_attribute time_window_attr; | ||
150 | struct device_attribute max_power_attr; | ||
151 | struct device_attribute min_power_attr; | ||
152 | struct device_attribute max_time_window_attr; | ||
153 | struct device_attribute min_time_window_attr; | ||
154 | struct device_attribute name_attr; | ||
155 | }; | ||
156 | |||
157 | static struct powercap_constraint_attr | ||
158 | constraint_attrs[MAX_CONSTRAINTS_PER_ZONE]; | ||
159 | |||
160 | /* A list of powercap control_types */ | ||
161 | static LIST_HEAD(powercap_cntrl_list); | ||
162 | /* Mutex to protect list of powercap control_types */ | ||
163 | static DEFINE_MUTEX(powercap_cntrl_list_lock); | ||
164 | |||
165 | #define POWERCAP_CONSTRAINT_NAME_LEN 30 /* Some limit to avoid overflow */ | ||
166 | static ssize_t show_constraint_name(struct device *dev, | ||
167 | struct device_attribute *dev_attr, | ||
168 | char *buf) | ||
169 | { | ||
170 | const char *name; | ||
171 | struct powercap_zone *power_zone = to_powercap_zone(dev); | ||
172 | int id; | ||
173 | ssize_t len = -ENODATA; | ||
174 | struct powercap_zone_constraint *pconst; | ||
175 | |||
176 | if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) | ||
177 | return -EINVAL; | ||
178 | if (id >= power_zone->const_id_cnt) | ||
179 | return -EINVAL; | ||
180 | pconst = &power_zone->constraints[id]; | ||
181 | |||
182 | if (pconst && pconst->ops && pconst->ops->get_name) { | ||
183 | name = pconst->ops->get_name(power_zone, id); | ||
184 | if (name) { | ||
185 | snprintf(buf, POWERCAP_CONSTRAINT_NAME_LEN, | ||
186 | "%s\n", name); | ||
187 | buf[POWERCAP_CONSTRAINT_NAME_LEN] = '\0'; | ||
188 | len = strlen(buf); | ||
189 | } | ||
190 | } | ||
191 | |||
192 | return len; | ||
193 | } | ||
194 | |||
195 | static int create_constraint_attribute(int id, const char *name, | ||
196 | int mode, | ||
197 | struct device_attribute *dev_attr, | ||
198 | ssize_t (*show)(struct device *, | ||
199 | struct device_attribute *, char *), | ||
200 | ssize_t (*store)(struct device *, | ||
201 | struct device_attribute *, | ||
202 | const char *, size_t) | ||
203 | ) | ||
204 | { | ||
205 | |||
206 | dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s", | ||
207 | id, name); | ||
208 | if (!dev_attr->attr.name) | ||
209 | return -ENOMEM; | ||
210 | dev_attr->attr.mode = mode; | ||
211 | dev_attr->show = show; | ||
212 | dev_attr->store = store; | ||
213 | |||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | static void free_constraint_attributes(void) | ||
218 | { | ||
219 | int i; | ||
220 | |||
221 | for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) { | ||
222 | kfree(constraint_attrs[i].power_limit_attr.attr.name); | ||
223 | kfree(constraint_attrs[i].time_window_attr.attr.name); | ||
224 | kfree(constraint_attrs[i].name_attr.attr.name); | ||
225 | kfree(constraint_attrs[i].max_power_attr.attr.name); | ||
226 | kfree(constraint_attrs[i].min_power_attr.attr.name); | ||
227 | kfree(constraint_attrs[i].max_time_window_attr.attr.name); | ||
228 | kfree(constraint_attrs[i].min_time_window_attr.attr.name); | ||
229 | } | ||
230 | } | ||
231 | |||
232 | static int seed_constraint_attributes(void) | ||
233 | { | ||
234 | int i; | ||
235 | int ret; | ||
236 | |||
237 | for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) { | ||
238 | ret = create_constraint_attribute(i, "power_limit_uw", | ||
239 | S_IWUSR | S_IRUGO, | ||
240 | &constraint_attrs[i].power_limit_attr, | ||
241 | show_constraint_power_limit_uw, | ||
242 | store_constraint_power_limit_uw); | ||
243 | if (ret) | ||
244 | goto err_alloc; | ||
245 | ret = create_constraint_attribute(i, "time_window_us", | ||
246 | S_IWUSR | S_IRUGO, | ||
247 | &constraint_attrs[i].time_window_attr, | ||
248 | show_constraint_time_window_us, | ||
249 | store_constraint_time_window_us); | ||
250 | if (ret) | ||
251 | goto err_alloc; | ||
252 | ret = create_constraint_attribute(i, "name", S_IRUGO, | ||
253 | &constraint_attrs[i].name_attr, | ||
254 | show_constraint_name, | ||
255 | NULL); | ||
256 | if (ret) | ||
257 | goto err_alloc; | ||
258 | ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO, | ||
259 | &constraint_attrs[i].max_power_attr, | ||
260 | show_constraint_max_power_uw, | ||
261 | NULL); | ||
262 | if (ret) | ||
263 | goto err_alloc; | ||
264 | ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO, | ||
265 | &constraint_attrs[i].min_power_attr, | ||
266 | show_constraint_min_power_uw, | ||
267 | NULL); | ||
268 | if (ret) | ||
269 | goto err_alloc; | ||
270 | ret = create_constraint_attribute(i, "max_time_window_us", | ||
271 | S_IRUGO, | ||
272 | &constraint_attrs[i].max_time_window_attr, | ||
273 | show_constraint_max_time_window_us, | ||
274 | NULL); | ||
275 | if (ret) | ||
276 | goto err_alloc; | ||
277 | ret = create_constraint_attribute(i, "min_time_window_us", | ||
278 | S_IRUGO, | ||
279 | &constraint_attrs[i].min_time_window_attr, | ||
280 | show_constraint_min_time_window_us, | ||
281 | NULL); | ||
282 | if (ret) | ||
283 | goto err_alloc; | ||
284 | |||
285 | } | ||
286 | |||
287 | return 0; | ||
288 | |||
289 | err_alloc: | ||
290 | free_constraint_attributes(); | ||
291 | |||
292 | return ret; | ||
293 | } | ||
294 | |||
295 | static int create_constraints(struct powercap_zone *power_zone, | ||
296 | int nr_constraints, | ||
297 | struct powercap_zone_constraint_ops *const_ops) | ||
298 | { | ||
299 | int i; | ||
300 | int ret = 0; | ||
301 | int count; | ||
302 | struct powercap_zone_constraint *pconst; | ||
303 | |||
304 | if (!power_zone || !const_ops || !const_ops->get_power_limit_uw || | ||
305 | !const_ops->set_power_limit_uw || | ||
306 | !const_ops->get_time_window_us || | ||
307 | !const_ops->set_time_window_us) | ||
308 | return -EINVAL; | ||
309 | |||
310 | count = power_zone->zone_attr_count; | ||
311 | for (i = 0; i < nr_constraints; ++i) { | ||
312 | pconst = &power_zone->constraints[i]; | ||
313 | pconst->ops = const_ops; | ||
314 | pconst->id = power_zone->const_id_cnt; | ||
315 | power_zone->const_id_cnt++; | ||
316 | power_zone->zone_dev_attrs[count++] = | ||
317 | &constraint_attrs[i].power_limit_attr.attr; | ||
318 | power_zone->zone_dev_attrs[count++] = | ||
319 | &constraint_attrs[i].time_window_attr.attr; | ||
320 | if (pconst->ops->get_name) | ||
321 | power_zone->zone_dev_attrs[count++] = | ||
322 | &constraint_attrs[i].name_attr.attr; | ||
323 | if (pconst->ops->get_max_power_uw) | ||
324 | power_zone->zone_dev_attrs[count++] = | ||
325 | &constraint_attrs[i].max_power_attr.attr; | ||
326 | if (pconst->ops->get_min_power_uw) | ||
327 | power_zone->zone_dev_attrs[count++] = | ||
328 | &constraint_attrs[i].min_power_attr.attr; | ||
329 | if (pconst->ops->get_max_time_window_us) | ||
330 | power_zone->zone_dev_attrs[count++] = | ||
331 | &constraint_attrs[i].max_time_window_attr.attr; | ||
332 | if (pconst->ops->get_min_time_window_us) | ||
333 | power_zone->zone_dev_attrs[count++] = | ||
334 | &constraint_attrs[i].min_time_window_attr.attr; | ||
335 | } | ||
336 | power_zone->zone_attr_count = count; | ||
337 | |||
338 | return ret; | ||
339 | } | ||
340 | |||
341 | static bool control_type_valid(void *control_type) | ||
342 | { | ||
343 | struct powercap_control_type *pos = NULL; | ||
344 | bool found = false; | ||
345 | |||
346 | mutex_lock(&powercap_cntrl_list_lock); | ||
347 | |||
348 | list_for_each_entry(pos, &powercap_cntrl_list, node) { | ||
349 | if (pos == control_type) { | ||
350 | found = true; | ||
351 | break; | ||
352 | } | ||
353 | } | ||
354 | mutex_unlock(&powercap_cntrl_list_lock); | ||
355 | |||
356 | return found; | ||
357 | } | ||
358 | |||
359 | static ssize_t name_show(struct device *dev, | ||
360 | struct device_attribute *attr, | ||
361 | char *buf) | ||
362 | { | ||
363 | struct powercap_zone *power_zone = to_powercap_zone(dev); | ||
364 | |||
365 | return sprintf(buf, "%s\n", power_zone->name); | ||
366 | } | ||
367 | |||
368 | static DEVICE_ATTR_RO(name); | ||
369 | |||
370 | /* Create zone and attributes in sysfs */ | ||
371 | static void create_power_zone_common_attributes( | ||
372 | struct powercap_zone *power_zone) | ||
373 | { | ||
374 | int count = 0; | ||
375 | |||
376 | power_zone->zone_dev_attrs[count++] = &dev_attr_name.attr; | ||
377 | if (power_zone->ops->get_max_energy_range_uj) | ||
378 | power_zone->zone_dev_attrs[count++] = | ||
379 | &dev_attr_max_energy_range_uj.attr; | ||
380 | if (power_zone->ops->get_energy_uj) | ||
381 | power_zone->zone_dev_attrs[count++] = | ||
382 | &dev_attr_energy_uj.attr; | ||
383 | if (power_zone->ops->get_power_uw) | ||
384 | power_zone->zone_dev_attrs[count++] = | ||
385 | &dev_attr_power_uw.attr; | ||
386 | if (power_zone->ops->get_max_power_range_uw) | ||
387 | power_zone->zone_dev_attrs[count++] = | ||
388 | &dev_attr_max_power_range_uw.attr; | ||
389 | power_zone->zone_dev_attrs[count] = NULL; | ||
390 | power_zone->zone_attr_count = count; | ||
391 | } | ||
392 | |||
393 | static void powercap_release(struct device *dev) | ||
394 | { | ||
395 | bool allocated; | ||
396 | |||
397 | if (dev->parent) { | ||
398 | struct powercap_zone *power_zone = to_powercap_zone(dev); | ||
399 | |||
400 | /* Store flag as the release() may free memory */ | ||
401 | allocated = power_zone->allocated; | ||
402 | /* Remove id from parent idr struct */ | ||
403 | idr_remove(power_zone->parent_idr, power_zone->id); | ||
404 | /* Destroy idrs allocated for this zone */ | ||
405 | idr_destroy(&power_zone->idr); | ||
406 | kfree(power_zone->name); | ||
407 | kfree(power_zone->zone_dev_attrs); | ||
408 | kfree(power_zone->constraints); | ||
409 | if (power_zone->ops->release) | ||
410 | power_zone->ops->release(power_zone); | ||
411 | if (allocated) | ||
412 | kfree(power_zone); | ||
413 | } else { | ||
414 | struct powercap_control_type *control_type = | ||
415 | to_powercap_control_type(dev); | ||
416 | |||
417 | /* Store flag as the release() may free memory */ | ||
418 | allocated = control_type->allocated; | ||
419 | idr_destroy(&control_type->idr); | ||
420 | mutex_destroy(&control_type->lock); | ||
421 | if (control_type->ops && control_type->ops->release) | ||
422 | control_type->ops->release(control_type); | ||
423 | if (allocated) | ||
424 | kfree(control_type); | ||
425 | } | ||
426 | } | ||
427 | |||
428 | static ssize_t enabled_show(struct device *dev, | ||
429 | struct device_attribute *attr, | ||
430 | char *buf) | ||
431 | { | ||
432 | bool mode = true; | ||
433 | |||
434 | /* Default is enabled */ | ||
435 | if (dev->parent) { | ||
436 | struct powercap_zone *power_zone = to_powercap_zone(dev); | ||
437 | if (power_zone->ops->get_enable) | ||
438 | if (power_zone->ops->get_enable(power_zone, &mode)) | ||
439 | mode = false; | ||
440 | } else { | ||
441 | struct powercap_control_type *control_type = | ||
442 | to_powercap_control_type(dev); | ||
443 | if (control_type->ops && control_type->ops->get_enable) | ||
444 | if (control_type->ops->get_enable(control_type, &mode)) | ||
445 | mode = false; | ||
446 | } | ||
447 | |||
448 | return sprintf(buf, "%d\n", mode); | ||
449 | } | ||
450 | |||
451 | static ssize_t enabled_store(struct device *dev, | ||
452 | struct device_attribute *attr, | ||
453 | const char *buf, size_t len) | ||
454 | { | ||
455 | bool mode; | ||
456 | |||
457 | if (strtobool(buf, &mode)) | ||
458 | return -EINVAL; | ||
459 | if (dev->parent) { | ||
460 | struct powercap_zone *power_zone = to_powercap_zone(dev); | ||
461 | if (power_zone->ops->set_enable) | ||
462 | if (!power_zone->ops->set_enable(power_zone, mode)) | ||
463 | return len; | ||
464 | } else { | ||
465 | struct powercap_control_type *control_type = | ||
466 | to_powercap_control_type(dev); | ||
467 | if (control_type->ops && control_type->ops->set_enable) | ||
468 | if (!control_type->ops->set_enable(control_type, mode)) | ||
469 | return len; | ||
470 | } | ||
471 | |||
472 | return -ENOSYS; | ||
473 | } | ||
474 | |||
475 | static DEVICE_ATTR_RW(enabled); | ||
476 | |||
477 | static struct attribute *powercap_attrs[] = { | ||
478 | &dev_attr_enabled.attr, | ||
479 | NULL, | ||
480 | }; | ||
481 | ATTRIBUTE_GROUPS(powercap); | ||
482 | |||
483 | static struct class powercap_class = { | ||
484 | .name = "powercap", | ||
485 | .dev_release = powercap_release, | ||
486 | .dev_groups = powercap_groups, | ||
487 | }; | ||
488 | |||
489 | struct powercap_zone *powercap_register_zone( | ||
490 | struct powercap_zone *power_zone, | ||
491 | struct powercap_control_type *control_type, | ||
492 | const char *name, | ||
493 | struct powercap_zone *parent, | ||
494 | const struct powercap_zone_ops *ops, | ||
495 | int nr_constraints, | ||
496 | struct powercap_zone_constraint_ops *const_ops) | ||
497 | { | ||
498 | int result; | ||
499 | int nr_attrs; | ||
500 | |||
501 | if (!name || !control_type || !ops || | ||
502 | nr_constraints > MAX_CONSTRAINTS_PER_ZONE || | ||
503 | (!ops->get_energy_uj && !ops->get_power_uw) || | ||
504 | !control_type_valid(control_type)) | ||
505 | return ERR_PTR(-EINVAL); | ||
506 | |||
507 | if (power_zone) { | ||
508 | if (!ops->release) | ||
509 | return ERR_PTR(-EINVAL); | ||
510 | memset(power_zone, 0, sizeof(*power_zone)); | ||
511 | } else { | ||
512 | power_zone = kzalloc(sizeof(*power_zone), GFP_KERNEL); | ||
513 | if (!power_zone) | ||
514 | return ERR_PTR(-ENOMEM); | ||
515 | power_zone->allocated = true; | ||
516 | } | ||
517 | power_zone->ops = ops; | ||
518 | power_zone->control_type_inst = control_type; | ||
519 | if (!parent) { | ||
520 | power_zone->dev.parent = &control_type->dev; | ||
521 | power_zone->parent_idr = &control_type->idr; | ||
522 | } else { | ||
523 | power_zone->dev.parent = &parent->dev; | ||
524 | power_zone->parent_idr = &parent->idr; | ||
525 | } | ||
526 | power_zone->dev.class = &powercap_class; | ||
527 | |||
528 | mutex_lock(&control_type->lock); | ||
529 | /* Using idr to get the unique id */ | ||
530 | result = idr_alloc(power_zone->parent_idr, NULL, 0, 0, GFP_KERNEL); | ||
531 | if (result < 0) | ||
532 | goto err_idr_alloc; | ||
533 | |||
534 | power_zone->id = result; | ||
535 | idr_init(&power_zone->idr); | ||
536 | power_zone->name = kstrdup(name, GFP_KERNEL); | ||
537 | if (!power_zone->name) | ||
538 | goto err_name_alloc; | ||
539 | dev_set_name(&power_zone->dev, "%s:%x", | ||
540 | dev_name(power_zone->dev.parent), | ||
541 | power_zone->id); | ||
542 | power_zone->constraints = kzalloc(sizeof(*power_zone->constraints) * | ||
543 | nr_constraints, GFP_KERNEL); | ||
544 | if (!power_zone->constraints) | ||
545 | goto err_const_alloc; | ||
546 | |||
547 | nr_attrs = nr_constraints * POWERCAP_CONSTRAINTS_ATTRS + | ||
548 | POWERCAP_ZONE_MAX_ATTRS + 1; | ||
549 | power_zone->zone_dev_attrs = kzalloc(sizeof(void *) * | ||
550 | nr_attrs, GFP_KERNEL); | ||
551 | if (!power_zone->zone_dev_attrs) | ||
552 | goto err_attr_alloc; | ||
553 | create_power_zone_common_attributes(power_zone); | ||
554 | result = create_constraints(power_zone, nr_constraints, const_ops); | ||
555 | if (result) | ||
556 | goto err_dev_ret; | ||
557 | |||
558 | power_zone->zone_dev_attrs[power_zone->zone_attr_count] = NULL; | ||
559 | power_zone->dev_zone_attr_group.attrs = power_zone->zone_dev_attrs; | ||
560 | power_zone->dev_attr_groups[0] = &power_zone->dev_zone_attr_group; | ||
561 | power_zone->dev_attr_groups[1] = NULL; | ||
562 | power_zone->dev.groups = power_zone->dev_attr_groups; | ||
563 | result = device_register(&power_zone->dev); | ||
564 | if (result) | ||
565 | goto err_dev_ret; | ||
566 | |||
567 | control_type->nr_zones++; | ||
568 | mutex_unlock(&control_type->lock); | ||
569 | |||
570 | return power_zone; | ||
571 | |||
572 | err_dev_ret: | ||
573 | kfree(power_zone->zone_dev_attrs); | ||
574 | err_attr_alloc: | ||
575 | kfree(power_zone->constraints); | ||
576 | err_const_alloc: | ||
577 | kfree(power_zone->name); | ||
578 | err_name_alloc: | ||
579 | idr_remove(power_zone->parent_idr, power_zone->id); | ||
580 | err_idr_alloc: | ||
581 | if (power_zone->allocated) | ||
582 | kfree(power_zone); | ||
583 | mutex_unlock(&control_type->lock); | ||
584 | |||
585 | return ERR_PTR(result); | ||
586 | } | ||
587 | EXPORT_SYMBOL_GPL(powercap_register_zone); | ||
588 | |||
589 | int powercap_unregister_zone(struct powercap_control_type *control_type, | ||
590 | struct powercap_zone *power_zone) | ||
591 | { | ||
592 | if (!power_zone || !control_type) | ||
593 | return -EINVAL; | ||
594 | |||
595 | mutex_lock(&control_type->lock); | ||
596 | control_type->nr_zones--; | ||
597 | mutex_unlock(&control_type->lock); | ||
598 | |||
599 | device_unregister(&power_zone->dev); | ||
600 | |||
601 | return 0; | ||
602 | } | ||
603 | EXPORT_SYMBOL_GPL(powercap_unregister_zone); | ||
604 | |||
605 | struct powercap_control_type *powercap_register_control_type( | ||
606 | struct powercap_control_type *control_type, | ||
607 | const char *name, | ||
608 | const struct powercap_control_type_ops *ops) | ||
609 | { | ||
610 | int result; | ||
611 | |||
612 | if (!name) | ||
613 | return ERR_PTR(-EINVAL); | ||
614 | if (control_type) { | ||
615 | if (!ops || !ops->release) | ||
616 | return ERR_PTR(-EINVAL); | ||
617 | memset(control_type, 0, sizeof(*control_type)); | ||
618 | } else { | ||
619 | control_type = kzalloc(sizeof(*control_type), GFP_KERNEL); | ||
620 | if (!control_type) | ||
621 | return ERR_PTR(-ENOMEM); | ||
622 | control_type->allocated = true; | ||
623 | } | ||
624 | mutex_init(&control_type->lock); | ||
625 | control_type->ops = ops; | ||
626 | INIT_LIST_HEAD(&control_type->node); | ||
627 | control_type->dev.class = &powercap_class; | ||
628 | dev_set_name(&control_type->dev, "%s", name); | ||
629 | result = device_register(&control_type->dev); | ||
630 | if (result) { | ||
631 | if (control_type->allocated) | ||
632 | kfree(control_type); | ||
633 | return ERR_PTR(result); | ||
634 | } | ||
635 | idr_init(&control_type->idr); | ||
636 | |||
637 | mutex_lock(&powercap_cntrl_list_lock); | ||
638 | list_add_tail(&control_type->node, &powercap_cntrl_list); | ||
639 | mutex_unlock(&powercap_cntrl_list_lock); | ||
640 | |||
641 | return control_type; | ||
642 | } | ||
643 | EXPORT_SYMBOL_GPL(powercap_register_control_type); | ||
644 | |||
645 | int powercap_unregister_control_type(struct powercap_control_type *control_type) | ||
646 | { | ||
647 | struct powercap_control_type *pos = NULL; | ||
648 | |||
649 | if (control_type->nr_zones) { | ||
650 | dev_err(&control_type->dev, "Zones of this type still not freed\n"); | ||
651 | return -EINVAL; | ||
652 | } | ||
653 | mutex_lock(&powercap_cntrl_list_lock); | ||
654 | list_for_each_entry(pos, &powercap_cntrl_list, node) { | ||
655 | if (pos == control_type) { | ||
656 | list_del(&control_type->node); | ||
657 | mutex_unlock(&powercap_cntrl_list_lock); | ||
658 | device_unregister(&control_type->dev); | ||
659 | return 0; | ||
660 | } | ||
661 | } | ||
662 | mutex_unlock(&powercap_cntrl_list_lock); | ||
663 | |||
664 | return -ENODEV; | ||
665 | } | ||
666 | EXPORT_SYMBOL_GPL(powercap_unregister_control_type); | ||
667 | |||
668 | static int __init powercap_init(void) | ||
669 | { | ||
670 | int result = 0; | ||
671 | |||
672 | result = seed_constraint_attributes(); | ||
673 | if (result) | ||
674 | return result; | ||
675 | |||
676 | result = class_register(&powercap_class); | ||
677 | |||
678 | return result; | ||
679 | } | ||
680 | |||
681 | device_initcall(powercap_init); | ||
682 | |||
683 | MODULE_DESCRIPTION("PowerCap sysfs Driver"); | ||
684 | MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); | ||
685 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 8d05accf706c..927998aa5e71 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -245,15 +245,27 @@ EXPORT_SYMBOL_GPL(spi_bus_type); | |||
245 | static int spi_drv_probe(struct device *dev) | 245 | static int spi_drv_probe(struct device *dev) |
246 | { | 246 | { |
247 | const struct spi_driver *sdrv = to_spi_driver(dev->driver); | 247 | const struct spi_driver *sdrv = to_spi_driver(dev->driver); |
248 | struct spi_device *spi = to_spi_device(dev); | ||
249 | int ret; | ||
250 | |||
251 | acpi_dev_pm_attach(&spi->dev, true); | ||
252 | ret = sdrv->probe(spi); | ||
253 | if (ret) | ||
254 | acpi_dev_pm_detach(&spi->dev, true); | ||
248 | 255 | ||
249 | return sdrv->probe(to_spi_device(dev)); | 256 | return ret; |
250 | } | 257 | } |
251 | 258 | ||
252 | static int spi_drv_remove(struct device *dev) | 259 | static int spi_drv_remove(struct device *dev) |
253 | { | 260 | { |
254 | const struct spi_driver *sdrv = to_spi_driver(dev->driver); | 261 | const struct spi_driver *sdrv = to_spi_driver(dev->driver); |
262 | struct spi_device *spi = to_spi_device(dev); | ||
263 | int ret; | ||
264 | |||
265 | ret = sdrv->remove(spi); | ||
266 | acpi_dev_pm_detach(&spi->dev, true); | ||
255 | 267 | ||
256 | return sdrv->remove(to_spi_device(dev)); | 268 | return ret; |
257 | } | 269 | } |
258 | 270 | ||
259 | static void spi_drv_shutdown(struct device *dev) | 271 | static void spi_drv_shutdown(struct device *dev) |
@@ -1145,8 +1157,10 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, | |||
1145 | return AE_OK; | 1157 | return AE_OK; |
1146 | } | 1158 | } |
1147 | 1159 | ||
1160 | adev->power.flags.ignore_parent = true; | ||
1148 | strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); | 1161 | strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); |
1149 | if (spi_add_device(spi)) { | 1162 | if (spi_add_device(spi)) { |
1163 | adev->power.flags.ignore_parent = false; | ||
1150 | dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", | 1164 | dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", |
1151 | dev_name(&adev->dev)); | 1165 | dev_name(&adev->dev)); |
1152 | spi_dev_put(spi); | 1166 | spi_dev_put(spi); |
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index dbfc390330ac..5ef596765060 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig | |||
@@ -78,7 +78,6 @@ config THERMAL_GOV_USER_SPACE | |||
78 | config CPU_THERMAL | 78 | config CPU_THERMAL |
79 | bool "generic cpu cooling support" | 79 | bool "generic cpu cooling support" |
80 | depends on CPU_FREQ | 80 | depends on CPU_FREQ |
81 | select CPU_FREQ_TABLE | ||
82 | help | 81 | help |
83 | This implements the generic cpu cooling mechanism through frequency | 82 | This implements the generic cpu cooling mechanism through frequency |
84 | reduction. An ACPI version of this already exists | 83 | reduction. An ACPI version of this already exists |
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index 94a403a9717a..5d05555fe841 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c | |||
@@ -21,6 +21,9 @@ | |||
21 | #include <asm/backlight.h> | 21 | #include <asm/backlight.h> |
22 | #endif | 22 | #endif |
23 | 23 | ||
24 | static struct list_head backlight_dev_list; | ||
25 | static struct mutex backlight_dev_list_mutex; | ||
26 | |||
24 | static const char *const backlight_types[] = { | 27 | static const char *const backlight_types[] = { |
25 | [BACKLIGHT_RAW] = "raw", | 28 | [BACKLIGHT_RAW] = "raw", |
26 | [BACKLIGHT_PLATFORM] = "platform", | 29 | [BACKLIGHT_PLATFORM] = "platform", |
@@ -349,10 +352,32 @@ struct backlight_device *backlight_device_register(const char *name, | |||
349 | mutex_unlock(&pmac_backlight_mutex); | 352 | mutex_unlock(&pmac_backlight_mutex); |
350 | #endif | 353 | #endif |
351 | 354 | ||
355 | mutex_lock(&backlight_dev_list_mutex); | ||
356 | list_add(&new_bd->entry, &backlight_dev_list); | ||
357 | mutex_unlock(&backlight_dev_list_mutex); | ||
358 | |||
352 | return new_bd; | 359 | return new_bd; |
353 | } | 360 | } |
354 | EXPORT_SYMBOL(backlight_device_register); | 361 | EXPORT_SYMBOL(backlight_device_register); |
355 | 362 | ||
363 | bool backlight_device_registered(enum backlight_type type) | ||
364 | { | ||
365 | bool found = false; | ||
366 | struct backlight_device *bd; | ||
367 | |||
368 | mutex_lock(&backlight_dev_list_mutex); | ||
369 | list_for_each_entry(bd, &backlight_dev_list, entry) { | ||
370 | if (bd->props.type == type) { | ||
371 | found = true; | ||
372 | break; | ||
373 | } | ||
374 | } | ||
375 | mutex_unlock(&backlight_dev_list_mutex); | ||
376 | |||
377 | return found; | ||
378 | } | ||
379 | EXPORT_SYMBOL(backlight_device_registered); | ||
380 | |||
356 | /** | 381 | /** |
357 | * backlight_device_unregister - unregisters a backlight device object. | 382 | * backlight_device_unregister - unregisters a backlight device object. |
358 | * @bd: the backlight device object to be unregistered and freed. | 383 | * @bd: the backlight device object to be unregistered and freed. |
@@ -364,6 +389,10 @@ void backlight_device_unregister(struct backlight_device *bd) | |||
364 | if (!bd) | 389 | if (!bd) |
365 | return; | 390 | return; |
366 | 391 | ||
392 | mutex_lock(&backlight_dev_list_mutex); | ||
393 | list_del(&bd->entry); | ||
394 | mutex_unlock(&backlight_dev_list_mutex); | ||
395 | |||
367 | #ifdef CONFIG_PMAC_BACKLIGHT | 396 | #ifdef CONFIG_PMAC_BACKLIGHT |
368 | mutex_lock(&pmac_backlight_mutex); | 397 | mutex_lock(&pmac_backlight_mutex); |
369 | if (pmac_backlight == bd) | 398 | if (pmac_backlight == bd) |
@@ -499,6 +528,8 @@ static int __init backlight_class_init(void) | |||
499 | 528 | ||
500 | backlight_class->dev_groups = bl_device_groups; | 529 | backlight_class->dev_groups = bl_device_groups; |
501 | backlight_class->pm = &backlight_class_dev_pm_ops; | 530 | backlight_class->pm = &backlight_class_dev_pm_ops; |
531 | INIT_LIST_HEAD(&backlight_dev_list); | ||
532 | mutex_init(&backlight_dev_list_mutex); | ||
502 | return 0; | 533 | return 0; |
503 | } | 534 | } |
504 | 535 | ||