aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi')
-rw-r--r--drivers/acpi/Kconfig12
-rw-r--r--drivers/acpi/Makefile2
-rw-r--r--drivers/acpi/acpi_pad.c514
-rw-r--r--drivers/acpi/dock.c16
-rw-r--r--drivers/acpi/ec.c56
-rw-r--r--drivers/acpi/proc.c2
-rw-r--r--drivers/acpi/processor_core.c7
-rw-r--r--drivers/acpi/scan.c7
-rw-r--r--drivers/acpi/video.c2
9 files changed, 588 insertions, 30 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index dd8729d674e5..0ed42d8870c7 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -211,6 +211,18 @@ config ACPI_HOTPLUG_CPU
211 select ACPI_CONTAINER 211 select ACPI_CONTAINER
212 default y 212 default y
213 213
214config ACPI_PROCESSOR_AGGREGATOR
215 tristate "Processor Aggregator"
216 depends on ACPI_PROCESSOR
217 depends on EXPERIMENTAL
218 depends on X86
219 help
220 ACPI 4.0 defines processor Aggregator, which enables OS to perform
221 specfic processor configuration and control that applies to all
222 processors in the platform. Currently only logical processor idling
223 is defined, which is to reduce power consumption. This driver
224 support the new device.
225
214config ACPI_THERMAL 226config ACPI_THERMAL
215 tristate "Thermal Zone" 227 tristate "Thermal Zone"
216 depends on ACPI_PROCESSOR 228 depends on ACPI_PROCESSOR
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 82cd49dc603b..7702118509a0 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -62,3 +62,5 @@ obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o
62processor-y := processor_core.o processor_throttling.o 62processor-y := processor_core.o processor_throttling.o
63processor-y += processor_idle.o processor_thermal.o 63processor-y += processor_idle.o processor_thermal.o
64processor-$(CONFIG_CPU_FREQ) += processor_perflib.o 64processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
65
66obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
new file mode 100644
index 000000000000..0d2cdb86158b
--- /dev/null
+++ b/drivers/acpi/acpi_pad.c
@@ -0,0 +1,514 @@
1/*
2 * acpi_pad.c ACPI Processor Aggregator Driver
3 *
4 * Copyright (c) 2009, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 */
20
21#include <linux/kernel.h>
22#include <linux/cpumask.h>
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/types.h>
26#include <linux/kthread.h>
27#include <linux/freezer.h>
28#include <linux/cpu.h>
29#include <linux/clockchips.h>
30#include <acpi/acpi_bus.h>
31#include <acpi/acpi_drivers.h>
32
33#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
34#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
35#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
36static DEFINE_MUTEX(isolated_cpus_lock);
37
38#define MWAIT_SUBSTATE_MASK (0xf)
39#define MWAIT_CSTATE_MASK (0xf)
40#define MWAIT_SUBSTATE_SIZE (4)
41#define CPUID_MWAIT_LEAF (5)
42#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
43#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
44static unsigned long power_saving_mwait_eax;
45static void power_saving_mwait_init(void)
46{
47 unsigned int eax, ebx, ecx, edx;
48 unsigned int highest_cstate = 0;
49 unsigned int highest_subcstate = 0;
50 int i;
51
52 if (!boot_cpu_has(X86_FEATURE_MWAIT))
53 return;
54 if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
55 return;
56
57 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
58
59 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
60 !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
61 return;
62
63 edx >>= MWAIT_SUBSTATE_SIZE;
64 for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
65 if (edx & MWAIT_SUBSTATE_MASK) {
66 highest_cstate = i;
67 highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
68 }
69 }
70 power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
71 (highest_subcstate - 1);
72
73 for_each_online_cpu(i)
74 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &i);
75
76#if defined(CONFIG_GENERIC_TIME) && defined(CONFIG_X86)
77 switch (boot_cpu_data.x86_vendor) {
78 case X86_VENDOR_AMD:
79 case X86_VENDOR_INTEL:
80 /*
81 * AMD Fam10h TSC will tick in all
82 * C/P/S0/S1 states when this bit is set.
83 */
84 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
85 return;
86
87 /*FALL THROUGH*/
88 default:
89 /* TSC could halt in idle, so notify users */
90 mark_tsc_unstable("TSC halts in idle");
91 }
92#endif
93}
94
95static unsigned long cpu_weight[NR_CPUS];
96static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1};
97static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS);
98static void round_robin_cpu(unsigned int tsk_index)
99{
100 struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
101 cpumask_var_t tmp;
102 int cpu;
103 unsigned long min_weight = -1, preferred_cpu;
104
105 if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
106 return;
107
108 mutex_lock(&isolated_cpus_lock);
109 cpumask_clear(tmp);
110 for_each_cpu(cpu, pad_busy_cpus)
111 cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
112 cpumask_andnot(tmp, cpu_online_mask, tmp);
113 /* avoid HT sibilings if possible */
114 if (cpumask_empty(tmp))
115 cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
116 if (cpumask_empty(tmp)) {
117 mutex_unlock(&isolated_cpus_lock);
118 return;
119 }
120 for_each_cpu(cpu, tmp) {
121 if (cpu_weight[cpu] < min_weight) {
122 min_weight = cpu_weight[cpu];
123 preferred_cpu = cpu;
124 }
125 }
126
127 if (tsk_in_cpu[tsk_index] != -1)
128 cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
129 tsk_in_cpu[tsk_index] = preferred_cpu;
130 cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
131 cpu_weight[preferred_cpu]++;
132 mutex_unlock(&isolated_cpus_lock);
133
134 set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
135}
136
137static void exit_round_robin(unsigned int tsk_index)
138{
139 struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
140 cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
141 tsk_in_cpu[tsk_index] = -1;
142}
143
144static unsigned int idle_pct = 5; /* percentage */
145static unsigned int round_robin_time = 10; /* second */
146static int power_saving_thread(void *data)
147{
148 struct sched_param param = {.sched_priority = 1};
149 int do_sleep;
150 unsigned int tsk_index = (unsigned long)data;
151 u64 last_jiffies = 0;
152
153 sched_setscheduler(current, SCHED_RR, &param);
154
155 while (!kthread_should_stop()) {
156 int cpu;
157 u64 expire_time;
158
159 try_to_freeze();
160
161 /* round robin to cpus */
162 if (last_jiffies + round_robin_time * HZ < jiffies) {
163 last_jiffies = jiffies;
164 round_robin_cpu(tsk_index);
165 }
166
167 do_sleep = 0;
168
169 current_thread_info()->status &= ~TS_POLLING;
170 /*
171 * TS_POLLING-cleared state must be visible before we test
172 * NEED_RESCHED:
173 */
174 smp_mb();
175
176 expire_time = jiffies + HZ * (100 - idle_pct) / 100;
177
178 while (!need_resched()) {
179 local_irq_disable();
180 cpu = smp_processor_id();
181 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
182 &cpu);
183 stop_critical_timings();
184
185 __monitor((void *)&current_thread_info()->flags, 0, 0);
186 smp_mb();
187 if (!need_resched())
188 __mwait(power_saving_mwait_eax, 1);
189
190 start_critical_timings();
191 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
192 &cpu);
193 local_irq_enable();
194
195 if (jiffies > expire_time) {
196 do_sleep = 1;
197 break;
198 }
199 }
200
201 current_thread_info()->status |= TS_POLLING;
202
203 /*
204 * current sched_rt has threshold for rt task running time.
205 * When a rt task uses 95% CPU time, the rt thread will be
206 * scheduled out for 5% CPU time to not starve other tasks. But
207 * the mechanism only works when all CPUs have RT task running,
208 * as if one CPU hasn't RT task, RT task from other CPUs will
209 * borrow CPU time from this CPU and cause RT task use > 95%
210 * CPU time. To make 'avoid staration' work, takes a nap here.
211 */
212 if (do_sleep)
213 schedule_timeout_killable(HZ * idle_pct / 100);
214 }
215
216 exit_round_robin(tsk_index);
217 return 0;
218}
219
220static struct task_struct *ps_tsks[NR_CPUS];
221static unsigned int ps_tsk_num;
222static int create_power_saving_task(void)
223{
224 ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
225 (void *)(unsigned long)ps_tsk_num,
226 "power_saving/%d", ps_tsk_num);
227 if (ps_tsks[ps_tsk_num]) {
228 ps_tsk_num++;
229 return 0;
230 }
231 return -EINVAL;
232}
233
234static void destroy_power_saving_task(void)
235{
236 if (ps_tsk_num > 0) {
237 ps_tsk_num--;
238 kthread_stop(ps_tsks[ps_tsk_num]);
239 }
240}
241
242static void set_power_saving_task_num(unsigned int num)
243{
244 if (num > ps_tsk_num) {
245 while (ps_tsk_num < num) {
246 if (create_power_saving_task())
247 return;
248 }
249 } else if (num < ps_tsk_num) {
250 while (ps_tsk_num > num)
251 destroy_power_saving_task();
252 }
253}
254
255static int acpi_pad_idle_cpus(unsigned int num_cpus)
256{
257 get_online_cpus();
258
259 num_cpus = min_t(unsigned int, num_cpus, num_online_cpus());
260 set_power_saving_task_num(num_cpus);
261
262 put_online_cpus();
263 return 0;
264}
265
266static uint32_t acpi_pad_idle_cpus_num(void)
267{
268 return ps_tsk_num;
269}
270
271static ssize_t acpi_pad_rrtime_store(struct device *dev,
272 struct device_attribute *attr, const char *buf, size_t count)
273{
274 unsigned long num;
275 if (strict_strtoul(buf, 0, &num))
276 return -EINVAL;
277 if (num < 1 || num >= 100)
278 return -EINVAL;
279 mutex_lock(&isolated_cpus_lock);
280 round_robin_time = num;
281 mutex_unlock(&isolated_cpus_lock);
282 return count;
283}
284
285static ssize_t acpi_pad_rrtime_show(struct device *dev,
286 struct device_attribute *attr, char *buf)
287{
288 return scnprintf(buf, PAGE_SIZE, "%d", round_robin_time);
289}
290static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR,
291 acpi_pad_rrtime_show,
292 acpi_pad_rrtime_store);
293
294static ssize_t acpi_pad_idlepct_store(struct device *dev,
295 struct device_attribute *attr, const char *buf, size_t count)
296{
297 unsigned long num;
298 if (strict_strtoul(buf, 0, &num))
299 return -EINVAL;
300 if (num < 1 || num >= 100)
301 return -EINVAL;
302 mutex_lock(&isolated_cpus_lock);
303 idle_pct = num;
304 mutex_unlock(&isolated_cpus_lock);
305 return count;
306}
307
308static ssize_t acpi_pad_idlepct_show(struct device *dev,
309 struct device_attribute *attr, char *buf)
310{
311 return scnprintf(buf, PAGE_SIZE, "%d", idle_pct);
312}
313static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR,
314 acpi_pad_idlepct_show,
315 acpi_pad_idlepct_store);
316
317static ssize_t acpi_pad_idlecpus_store(struct device *dev,
318 struct device_attribute *attr, const char *buf, size_t count)
319{
320 unsigned long num;
321 if (strict_strtoul(buf, 0, &num))
322 return -EINVAL;
323 mutex_lock(&isolated_cpus_lock);
324 acpi_pad_idle_cpus(num);
325 mutex_unlock(&isolated_cpus_lock);
326 return count;
327}
328
329static ssize_t acpi_pad_idlecpus_show(struct device *dev,
330 struct device_attribute *attr, char *buf)
331{
332 return cpumask_scnprintf(buf, PAGE_SIZE,
333 to_cpumask(pad_busy_cpus_bits));
334}
335static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR,
336 acpi_pad_idlecpus_show,
337 acpi_pad_idlecpus_store);
338
339static int acpi_pad_add_sysfs(struct acpi_device *device)
340{
341 int result;
342
343 result = device_create_file(&device->dev, &dev_attr_idlecpus);
344 if (result)
345 return -ENODEV;
346 result = device_create_file(&device->dev, &dev_attr_idlepct);
347 if (result) {
348 device_remove_file(&device->dev, &dev_attr_idlecpus);
349 return -ENODEV;
350 }
351 result = device_create_file(&device->dev, &dev_attr_rrtime);
352 if (result) {
353 device_remove_file(&device->dev, &dev_attr_idlecpus);
354 device_remove_file(&device->dev, &dev_attr_idlepct);
355 return -ENODEV;
356 }
357 return 0;
358}
359
360static void acpi_pad_remove_sysfs(struct acpi_device *device)
361{
362 device_remove_file(&device->dev, &dev_attr_idlecpus);
363 device_remove_file(&device->dev, &dev_attr_idlepct);
364 device_remove_file(&device->dev, &dev_attr_rrtime);
365}
366
367/* Query firmware how many CPUs should be idle */
368static int acpi_pad_pur(acpi_handle handle, int *num_cpus)
369{
370 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
371 acpi_status status;
372 union acpi_object *package;
373 int rev, num, ret = -EINVAL;
374
375 status = acpi_evaluate_object(handle, "_PUR", NULL, &buffer);
376 if (ACPI_FAILURE(status))
377 return -EINVAL;
378 package = buffer.pointer;
379 if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2)
380 goto out;
381 rev = package->package.elements[0].integer.value;
382 num = package->package.elements[1].integer.value;
383 if (rev != 1)
384 goto out;
385 *num_cpus = num;
386 ret = 0;
387out:
388 kfree(buffer.pointer);
389 return ret;
390}
391
392/* Notify firmware how many CPUs are idle */
393static void acpi_pad_ost(acpi_handle handle, int stat,
394 uint32_t idle_cpus)
395{
396 union acpi_object params[3] = {
397 {.type = ACPI_TYPE_INTEGER,},
398 {.type = ACPI_TYPE_INTEGER,},
399 {.type = ACPI_TYPE_BUFFER,},
400 };
401 struct acpi_object_list arg_list = {3, params};
402
403 params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY;
404 params[1].integer.value = stat;
405 params[2].buffer.length = 4;
406 params[2].buffer.pointer = (void *)&idle_cpus;
407 acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
408}
409
410static void acpi_pad_handle_notify(acpi_handle handle)
411{
412 int num_cpus, ret;
413 uint32_t idle_cpus;
414
415 mutex_lock(&isolated_cpus_lock);
416 if (acpi_pad_pur(handle, &num_cpus)) {
417 mutex_unlock(&isolated_cpus_lock);
418 return;
419 }
420 ret = acpi_pad_idle_cpus(num_cpus);
421 idle_cpus = acpi_pad_idle_cpus_num();
422 if (!ret)
423 acpi_pad_ost(handle, 0, idle_cpus);
424 else
425 acpi_pad_ost(handle, 1, 0);
426 mutex_unlock(&isolated_cpus_lock);
427}
428
429static void acpi_pad_notify(acpi_handle handle, u32 event,
430 void *data)
431{
432 struct acpi_device *device = data;
433
434 switch (event) {
435 case ACPI_PROCESSOR_AGGREGATOR_NOTIFY:
436 acpi_pad_handle_notify(handle);
437 acpi_bus_generate_proc_event(device, event, 0);
438 acpi_bus_generate_netlink_event(device->pnp.device_class,
439 dev_name(&device->dev), event, 0);
440 break;
441 default:
442 printk(KERN_WARNING"Unsupported event [0x%x]\n", event);
443 break;
444 }
445}
446
447static int acpi_pad_add(struct acpi_device *device)
448{
449 acpi_status status;
450
451 strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
452 strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS);
453
454 if (acpi_pad_add_sysfs(device))
455 return -ENODEV;
456
457 status = acpi_install_notify_handler(device->handle,
458 ACPI_DEVICE_NOTIFY, acpi_pad_notify, device);
459 if (ACPI_FAILURE(status)) {
460 acpi_pad_remove_sysfs(device);
461 return -ENODEV;
462 }
463
464 return 0;
465}
466
467static int acpi_pad_remove(struct acpi_device *device,
468 int type)
469{
470 mutex_lock(&isolated_cpus_lock);
471 acpi_pad_idle_cpus(0);
472 mutex_unlock(&isolated_cpus_lock);
473
474 acpi_remove_notify_handler(device->handle,
475 ACPI_DEVICE_NOTIFY, acpi_pad_notify);
476 acpi_pad_remove_sysfs(device);
477 return 0;
478}
479
480static const struct acpi_device_id pad_device_ids[] = {
481 {"ACPI000C", 0},
482 {"", 0},
483};
484MODULE_DEVICE_TABLE(acpi, pad_device_ids);
485
486static struct acpi_driver acpi_pad_driver = {
487 .name = "processor_aggregator",
488 .class = ACPI_PROCESSOR_AGGREGATOR_CLASS,
489 .ids = pad_device_ids,
490 .ops = {
491 .add = acpi_pad_add,
492 .remove = acpi_pad_remove,
493 },
494};
495
496static int __init acpi_pad_init(void)
497{
498 power_saving_mwait_init();
499 if (power_saving_mwait_eax == 0)
500 return -EINVAL;
501
502 return acpi_bus_register_driver(&acpi_pad_driver);
503}
504
505static void __exit acpi_pad_exit(void)
506{
507 acpi_bus_unregister_driver(&acpi_pad_driver);
508}
509
510module_init(acpi_pad_init);
511module_exit(acpi_pad_exit);
512MODULE_AUTHOR("Shaohua Li<shaohua.li@intel.com>");
513MODULE_DESCRIPTION("ACPI Processor Aggregator Driver");
514MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 3a2cfefc71ab..7338b6a3e049 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -67,7 +67,7 @@ struct dock_station {
67 struct list_head dependent_devices; 67 struct list_head dependent_devices;
68 struct list_head hotplug_devices; 68 struct list_head hotplug_devices;
69 69
70 struct list_head sibiling; 70 struct list_head sibling;
71 struct platform_device *dock_device; 71 struct platform_device *dock_device;
72}; 72};
73static LIST_HEAD(dock_stations); 73static LIST_HEAD(dock_stations);
@@ -275,7 +275,7 @@ int is_dock_device(acpi_handle handle)
275 275
276 if (is_dock(handle)) 276 if (is_dock(handle))
277 return 1; 277 return 1;
278 list_for_each_entry(dock_station, &dock_stations, sibiling) { 278 list_for_each_entry(dock_station, &dock_stations, sibling) {
279 if (find_dock_dependent_device(dock_station, handle)) 279 if (find_dock_dependent_device(dock_station, handle))
280 return 1; 280 return 1;
281 } 281 }
@@ -619,7 +619,7 @@ register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
619 * make sure this handle is for a device dependent on the dock, 619 * make sure this handle is for a device dependent on the dock,
620 * this would include the dock station itself 620 * this would include the dock station itself
621 */ 621 */
622 list_for_each_entry(dock_station, &dock_stations, sibiling) { 622 list_for_each_entry(dock_station, &dock_stations, sibling) {
623 /* 623 /*
624 * An ATA bay can be in a dock and itself can be ejected 624 * An ATA bay can be in a dock and itself can be ejected
625 * seperately, so there are two 'dock stations' which need the 625 * seperately, so there are two 'dock stations' which need the
@@ -651,7 +651,7 @@ void unregister_hotplug_dock_device(acpi_handle handle)
651 if (!dock_station_count) 651 if (!dock_station_count)
652 return; 652 return;
653 653
654 list_for_each_entry(dock_station, &dock_stations, sibiling) { 654 list_for_each_entry(dock_station, &dock_stations, sibling) {
655 dd = find_dock_dependent_device(dock_station, handle); 655 dd = find_dock_dependent_device(dock_station, handle);
656 if (dd) 656 if (dd)
657 dock_del_hotplug_device(dock_station, dd); 657 dock_del_hotplug_device(dock_station, dd);
@@ -787,7 +787,7 @@ static int acpi_dock_notifier_call(struct notifier_block *this,
787 if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK 787 if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
788 && event != ACPI_NOTIFY_EJECT_REQUEST) 788 && event != ACPI_NOTIFY_EJECT_REQUEST)
789 return 0; 789 return 0;
790 list_for_each_entry(dock_station, &dock_stations, sibiling) { 790 list_for_each_entry(dock_station, &dock_stations, sibling) {
791 if (dock_station->handle == handle) { 791 if (dock_station->handle == handle) {
792 struct dock_data *dock_data; 792 struct dock_data *dock_data;
793 793
@@ -958,7 +958,7 @@ static int dock_add(acpi_handle handle)
958 dock_station->last_dock_time = jiffies - HZ; 958 dock_station->last_dock_time = jiffies - HZ;
959 INIT_LIST_HEAD(&dock_station->dependent_devices); 959 INIT_LIST_HEAD(&dock_station->dependent_devices);
960 INIT_LIST_HEAD(&dock_station->hotplug_devices); 960 INIT_LIST_HEAD(&dock_station->hotplug_devices);
961 INIT_LIST_HEAD(&dock_station->sibiling); 961 INIT_LIST_HEAD(&dock_station->sibling);
962 spin_lock_init(&dock_station->dd_lock); 962 spin_lock_init(&dock_station->dd_lock);
963 mutex_init(&dock_station->hp_lock); 963 mutex_init(&dock_station->hp_lock);
964 ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list); 964 ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
@@ -1044,7 +1044,7 @@ static int dock_add(acpi_handle handle)
1044 add_dock_dependent_device(dock_station, dd); 1044 add_dock_dependent_device(dock_station, dd);
1045 1045
1046 dock_station_count++; 1046 dock_station_count++;
1047 list_add(&dock_station->sibiling, &dock_stations); 1047 list_add(&dock_station->sibling, &dock_stations);
1048 return 0; 1048 return 0;
1049 1049
1050dock_add_err_unregister: 1050dock_add_err_unregister:
@@ -1149,7 +1149,7 @@ static void __exit dock_exit(void)
1149 struct dock_station *tmp; 1149 struct dock_station *tmp;
1150 1150
1151 unregister_acpi_bus_notifier(&dock_acpi_notifier); 1151 unregister_acpi_bus_notifier(&dock_acpi_notifier);
1152 list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibiling) 1152 list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling)
1153 dock_remove(dock_station); 1153 dock_remove(dock_station);
1154} 1154}
1155 1155
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index f70796081c4c..baef28c1e630 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -119,6 +119,8 @@ static struct acpi_ec {
119} *boot_ec, *first_ec; 119} *boot_ec, *first_ec;
120 120
121static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ 121static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
122static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
123static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
122 124
123/* -------------------------------------------------------------------------- 125/* --------------------------------------------------------------------------
124 Transaction Management 126 Transaction Management
@@ -232,10 +234,8 @@ static int ec_poll(struct acpi_ec *ec)
232 } 234 }
233 advance_transaction(ec, acpi_ec_read_status(ec)); 235 advance_transaction(ec, acpi_ec_read_status(ec));
234 } while (time_before(jiffies, delay)); 236 } while (time_before(jiffies, delay));
235 if (!ec->curr->irq_count || 237 if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
236 (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF))
237 break; 238 break;
238 /* try restart command if we get any false interrupts */
239 pr_debug(PREFIX "controller reset, restart transaction\n"); 239 pr_debug(PREFIX "controller reset, restart transaction\n");
240 spin_lock_irqsave(&ec->curr_lock, flags); 240 spin_lock_irqsave(&ec->curr_lock, flags);
241 start_transaction(ec); 241 start_transaction(ec);
@@ -899,6 +899,44 @@ static const struct acpi_device_id ec_device_ids[] = {
899 {"", 0}, 899 {"", 0},
900}; 900};
901 901
902/* Some BIOS do not survive early DSDT scan, skip it */
903static int ec_skip_dsdt_scan(const struct dmi_system_id *id)
904{
905 EC_FLAGS_SKIP_DSDT_SCAN = 1;
906 return 0;
907}
908
909/* ASUStek often supplies us with broken ECDT, validate it */
910static int ec_validate_ecdt(const struct dmi_system_id *id)
911{
912 EC_FLAGS_VALIDATE_ECDT = 1;
913 return 0;
914}
915
916/* MSI EC needs special treatment, enable it */
917static int ec_flag_msi(const struct dmi_system_id *id)
918{
919 EC_FLAGS_MSI = 1;
920 EC_FLAGS_VALIDATE_ECDT = 1;
921 return 0;
922}
923
924static struct dmi_system_id __initdata ec_dmi_table[] = {
925 {
926 ec_skip_dsdt_scan, "Compal JFL92", {
927 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
928 DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
929 {
930 ec_flag_msi, "MSI hardware", {
931 DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star"),
932 DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star") }, NULL},
933 {
934 ec_validate_ecdt, "ASUS hardware", {
935 DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
936 {},
937};
938
939
902int __init acpi_ec_ecdt_probe(void) 940int __init acpi_ec_ecdt_probe(void)
903{ 941{
904 acpi_status status; 942 acpi_status status;
@@ -911,11 +949,7 @@ int __init acpi_ec_ecdt_probe(void)
911 /* 949 /*
912 * Generate a boot ec context 950 * Generate a boot ec context
913 */ 951 */
914 if (dmi_name_in_vendors("Micro-Star") || 952 dmi_check_system(ec_dmi_table);
915 dmi_name_in_vendors("Notebook")) {
916 pr_info(PREFIX "Enabling special treatment for EC from MSI.\n");
917 EC_FLAGS_MSI = 1;
918 }
919 status = acpi_get_table(ACPI_SIG_ECDT, 1, 953 status = acpi_get_table(ACPI_SIG_ECDT, 1,
920 (struct acpi_table_header **)&ecdt_ptr); 954 (struct acpi_table_header **)&ecdt_ptr);
921 if (ACPI_SUCCESS(status)) { 955 if (ACPI_SUCCESS(status)) {
@@ -926,7 +960,7 @@ int __init acpi_ec_ecdt_probe(void)
926 boot_ec->handle = ACPI_ROOT_OBJECT; 960 boot_ec->handle = ACPI_ROOT_OBJECT;
927 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle); 961 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle);
928 /* Don't trust ECDT, which comes from ASUSTek */ 962 /* Don't trust ECDT, which comes from ASUSTek */
929 if (!dmi_name_in_vendors("ASUS") && EC_FLAGS_MSI == 0) 963 if (!EC_FLAGS_VALIDATE_ECDT)
930 goto install; 964 goto install;
931 saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); 965 saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL);
932 if (!saved_ec) 966 if (!saved_ec)
@@ -934,6 +968,10 @@ int __init acpi_ec_ecdt_probe(void)
934 memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec)); 968 memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec));
935 /* fall through */ 969 /* fall through */
936 } 970 }
971
972 if (EC_FLAGS_SKIP_DSDT_SCAN)
973 return -ENODEV;
974
937 /* This workaround is needed only on some broken machines, 975 /* This workaround is needed only on some broken machines,
938 * which require early EC, but fail to provide ECDT */ 976 * which require early EC, but fail to provide ECDT */
939 printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n"); 977 printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index d0d550d22a6d..f8b6f555ba52 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -398,6 +398,8 @@ acpi_system_write_wakeup_device(struct file *file,
398 398
399 if (len > 4) 399 if (len > 4)
400 len = 4; 400 len = 4;
401 if (len < 0)
402 return -EFAULT;
401 403
402 if (copy_from_user(strbuf, buffer, len)) 404 if (copy_from_user(strbuf, buffer, len))
403 return -EFAULT; 405 return -EFAULT;
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index c2d4d6e09364..c567b46dfa0f 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -863,13 +863,6 @@ static int acpi_processor_add(struct acpi_device *device)
863 goto err_remove_sysfs; 863 goto err_remove_sysfs;
864 } 864 }
865 865
866 if (pr->flags.throttling) {
867 printk(KERN_INFO PREFIX "%s [%s] (supports",
868 acpi_device_name(device), acpi_device_bid(device));
869 printk(" %d throttling states", pr->throttling.state_count);
870 printk(")\n");
871 }
872
873 return 0; 866 return 0;
874 867
875err_remove_sysfs: 868err_remove_sysfs:
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 468921bed22f..14a7481c97d7 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1052,6 +1052,8 @@ static void acpi_device_set_id(struct acpi_device *device)
1052 device->flags.bus_address = 1; 1052 device->flags.bus_address = 1;
1053 } 1053 }
1054 1054
1055 kfree(info);
1056
1055 /* 1057 /*
1056 * Some devices don't reliably have _HIDs & _CIDs, so add 1058 * Some devices don't reliably have _HIDs & _CIDs, so add
1057 * synthetic HIDs to make sure drivers can find them. 1059 * synthetic HIDs to make sure drivers can find them.
@@ -1325,13 +1327,8 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
1325 struct acpi_device **child) 1327 struct acpi_device **child)
1326{ 1328{
1327 acpi_status status; 1329 acpi_status status;
1328 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1329 void *device = NULL; 1330 void *device = NULL;
1330 1331
1331 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
1332 printk(KERN_INFO PREFIX "Enumerating devices from [%s]\n",
1333 (char *) buffer.pointer);
1334
1335 status = acpi_bus_check_add(handle, 0, ops, &device); 1332 status = acpi_bus_check_add(handle, 0, ops, &device);
1336 if (ACPI_SUCCESS(status)) 1333 if (ACPI_SUCCESS(status))
1337 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, 1334 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index a4fddb24476f..f6e54bf8dd96 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -285,7 +285,7 @@ static int acpi_video_device_brightness_open_fs(struct inode *inode,
285 struct file *file); 285 struct file *file);
286static ssize_t acpi_video_device_write_brightness(struct file *file, 286static ssize_t acpi_video_device_write_brightness(struct file *file,
287 const char __user *buffer, size_t count, loff_t *data); 287 const char __user *buffer, size_t count, loff_t *data);
288static struct file_operations acpi_video_device_brightness_fops = { 288static const struct file_operations acpi_video_device_brightness_fops = {
289 .owner = THIS_MODULE, 289 .owner = THIS_MODULE,
290 .open = acpi_video_device_brightness_open_fs, 290 .open = acpi_video_device_brightness_open_fs,
291 .read = seq_read, 291 .read = seq_read,