diff options
84 files changed, 3373 insertions, 1611 deletions
diff --git a/Documentation/ABI/stable/thermal-notification b/Documentation/ABI/stable/thermal-notification new file mode 100644 index 000000000000..9723e8b7aeb3 --- /dev/null +++ b/Documentation/ABI/stable/thermal-notification | |||
| @@ -0,0 +1,4 @@ | |||
| 1 | What: A notification mechanism for thermal related events | ||
| 2 | Description: | ||
| 3 | This interface enables notification for thermal related events. | ||
| 4 | The notification is in the form of a netlink event. | ||
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt index 69dd29ed824e..b2bea15137d2 100644 --- a/Documentation/IPMI.txt +++ b/Documentation/IPMI.txt | |||
| @@ -533,6 +533,33 @@ completion during sending a panic event. | |||
| 533 | Other Pieces | 533 | Other Pieces |
| 534 | ------------ | 534 | ------------ |
| 535 | 535 | ||
| 536 | Get the detailed info related with the IPMI device | ||
| 537 | -------------------------------------------------- | ||
| 538 | |||
| 539 | Some users need more detailed information about a device, like where | ||
| 540 | the address came from or the raw base device for the IPMI interface. | ||
| 541 | You can use the IPMI smi_watcher to catch the IPMI interfaces as they | ||
| 542 | come or go, and to grab the information, you can use the function | ||
| 543 | ipmi_get_smi_info(), which returns the following structure: | ||
| 544 | |||
| 545 | struct ipmi_smi_info { | ||
| 546 | enum ipmi_addr_src addr_src; | ||
| 547 | struct device *dev; | ||
| 548 | union { | ||
| 549 | struct { | ||
| 550 | void *acpi_handle; | ||
| 551 | } acpi_info; | ||
| 552 | } addr_info; | ||
| 553 | }; | ||
| 554 | |||
| 555 | Currently special info for only for SI_ACPI address sources is | ||
| 556 | returned. Others may be added as necessary. | ||
| 557 | |||
| 558 | Note that the dev pointer is included in the above structure, and | ||
| 559 | assuming ipmi_smi_get_info returns success, you must call put_device | ||
| 560 | on the dev pointer. | ||
| 561 | |||
| 562 | |||
| 536 | Watchdog | 563 | Watchdog |
| 537 | -------- | 564 | -------- |
| 538 | 565 | ||
diff --git a/Documentation/acpi/apei/output_format.txt b/Documentation/acpi/apei/output_format.txt new file mode 100644 index 000000000000..9146952c612a --- /dev/null +++ b/Documentation/acpi/apei/output_format.txt | |||
| @@ -0,0 +1,122 @@ | |||
| 1 | APEI output format | ||
| 2 | ~~~~~~~~~~~~~~~~~~ | ||
| 3 | |||
| 4 | APEI uses printk as hardware error reporting interface, the output | ||
| 5 | format is as follow. | ||
| 6 | |||
| 7 | <error record> := | ||
| 8 | APEI generic hardware error status | ||
| 9 | severity: <integer>, <severity string> | ||
| 10 | section: <integer>, severity: <integer>, <severity string> | ||
| 11 | flags: <integer> | ||
| 12 | <section flags strings> | ||
| 13 | fru_id: <uuid string> | ||
| 14 | fru_text: <string> | ||
| 15 | section_type: <section type string> | ||
| 16 | <section data> | ||
| 17 | |||
| 18 | <severity string>* := recoverable | fatal | corrected | info | ||
| 19 | |||
| 20 | <section flags strings># := | ||
| 21 | [primary][, containment warning][, reset][, threshold exceeded]\ | ||
| 22 | [, resource not accessible][, latent error] | ||
| 23 | |||
| 24 | <section type string> := generic processor error | memory error | \ | ||
| 25 | PCIe error | unknown, <uuid string> | ||
| 26 | |||
| 27 | <section data> := | ||
| 28 | <generic processor section data> | <memory section data> | \ | ||
| 29 | <pcie section data> | <null> | ||
| 30 | |||
| 31 | <generic processor section data> := | ||
| 32 | [processor_type: <integer>, <proc type string>] | ||
| 33 | [processor_isa: <integer>, <proc isa string>] | ||
| 34 | [error_type: <integer> | ||
| 35 | <proc error type strings>] | ||
| 36 | [operation: <integer>, <proc operation string>] | ||
| 37 | [flags: <integer> | ||
| 38 | <proc flags strings>] | ||
| 39 | [level: <integer>] | ||
| 40 | [version_info: <integer>] | ||
| 41 | [processor_id: <integer>] | ||
| 42 | [target_address: <integer>] | ||
| 43 | [requestor_id: <integer>] | ||
| 44 | [responder_id: <integer>] | ||
| 45 | [IP: <integer>] | ||
| 46 | |||
| 47 | <proc type string>* := IA32/X64 | IA64 | ||
| 48 | |||
| 49 | <proc isa string>* := IA32 | IA64 | X64 | ||
| 50 | |||
| 51 | <processor error type strings># := | ||
| 52 | [cache error][, TLB error][, bus error][, micro-architectural error] | ||
| 53 | |||
| 54 | <proc operation string>* := unknown or generic | data read | data write | \ | ||
| 55 | instruction execution | ||
| 56 | |||
| 57 | <proc flags strings># := | ||
| 58 | [restartable][, precise IP][, overflow][, corrected] | ||
| 59 | |||
| 60 | <memory section data> := | ||
| 61 | [error_status: <integer>] | ||
| 62 | [physical_address: <integer>] | ||
| 63 | [physical_address_mask: <integer>] | ||
| 64 | [node: <integer>] | ||
| 65 | [card: <integer>] | ||
| 66 | [module: <integer>] | ||
| 67 | [bank: <integer>] | ||
| 68 | [device: <integer>] | ||
| 69 | [row: <integer>] | ||
| 70 | [column: <integer>] | ||
| 71 | [bit_position: <integer>] | ||
| 72 | [requestor_id: <integer>] | ||
| 73 | [responder_id: <integer>] | ||
| 74 | [target_id: <integer>] | ||
| 75 | [error_type: <integer>, <mem error type string>] | ||
| 76 | |||
| 77 | <mem error type string>* := | ||
| 78 | unknown | no error | single-bit ECC | multi-bit ECC | \ | ||
| 79 | single-symbol chipkill ECC | multi-symbol chipkill ECC | master abort | \ | ||
| 80 | target abort | parity error | watchdog timeout | invalid address | \ | ||
| 81 | mirror Broken | memory sparing | scrub corrected error | \ | ||
| 82 | scrub uncorrected error | ||
| 83 | |||
| 84 | <pcie section data> := | ||
| 85 | [port_type: <integer>, <pcie port type string>] | ||
| 86 | [version: <integer>.<integer>] | ||
| 87 | [command: <integer>, status: <integer>] | ||
| 88 | [device_id: <integer>:<integer>:<integer>.<integer> | ||
| 89 | slot: <integer> | ||
| 90 | secondary_bus: <integer> | ||
| 91 | vendor_id: <integer>, device_id: <integer> | ||
| 92 | class_code: <integer>] | ||
| 93 | [serial number: <integer>, <integer>] | ||
| 94 | [bridge: secondary_status: <integer>, control: <integer>] | ||
| 95 | |||
| 96 | <pcie port type string>* := PCIe end point | legacy PCI end point | \ | ||
| 97 | unknown | unknown | root port | upstream switch port | \ | ||
| 98 | downstream switch port | PCIe to PCI/PCI-X bridge | \ | ||
| 99 | PCI/PCI-X to PCIe bridge | root complex integrated endpoint device | \ | ||
| 100 | root complex event collector | ||
| 101 | |||
| 102 | Where, [] designate corresponding content is optional | ||
| 103 | |||
| 104 | All <field string> description with * has the following format: | ||
| 105 | |||
| 106 | field: <integer>, <field string> | ||
| 107 | |||
| 108 | Where value of <integer> should be the position of "string" in <field | ||
| 109 | string> description. Otherwise, <field string> will be "unknown". | ||
| 110 | |||
| 111 | All <field strings> description with # has the following format: | ||
| 112 | |||
| 113 | field: <integer> | ||
| 114 | <field strings> | ||
| 115 | |||
| 116 | Where each string in <fields strings> corresponding to one set bit of | ||
| 117 | <integer>. The bit position is the position of "string" in <field | ||
| 118 | strings> description. | ||
| 119 | |||
| 120 | For more detailed explanation of every field, please refer to UEFI | ||
| 121 | specification version 2.3 or later, section Appendix N: Common | ||
| 122 | Platform Error Record. | ||
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 6cbbd20534cf..8c594c45b6a1 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
| @@ -248,6 +248,17 @@ Who: Zhang Rui <rui.zhang@intel.com> | |||
| 248 | 248 | ||
| 249 | --------------------------- | 249 | --------------------------- |
| 250 | 250 | ||
| 251 | What: CONFIG_ACPI_PROCFS_POWER | ||
| 252 | When: 2.6.39 | ||
| 253 | Why: sysfs I/F for ACPI power devices, including AC and Battery, | ||
| 254 | has been working in upstream kenrel since 2.6.24, Sep 2007. | ||
| 255 | In 2.6.37, we make the sysfs I/F always built in and this option | ||
| 256 | disabled by default. | ||
| 257 | Remove this option and the ACPI power procfs interface in 2.6.39. | ||
| 258 | Who: Zhang Rui <rui.zhang@intel.com> | ||
| 259 | |||
| 260 | --------------------------- | ||
| 261 | |||
| 251 | What: /proc/acpi/button | 262 | What: /proc/acpi/button |
| 252 | When: August 2007 | 263 | When: August 2007 |
| 253 | Why: /proc/acpi/button has been replaced by events to the input layer | 264 | Why: /proc/acpi/button has been replaced by events to the input layer |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 55fe7599bc8e..b72e071a3e5b 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
| @@ -199,11 +199,6 @@ and is between 256 and 4096 characters. It is defined in the file | |||
| 199 | unusable. The "log_buf_len" parameter may be useful | 199 | unusable. The "log_buf_len" parameter may be useful |
| 200 | if you need to capture more output. | 200 | if you need to capture more output. |
| 201 | 201 | ||
| 202 | acpi_display_output= [HW,ACPI] | ||
| 203 | acpi_display_output=vendor | ||
| 204 | acpi_display_output=video | ||
| 205 | See above. | ||
| 206 | |||
| 207 | acpi_irq_balance [HW,ACPI] | 202 | acpi_irq_balance [HW,ACPI] |
| 208 | ACPI will balance active IRQs | 203 | ACPI will balance active IRQs |
| 209 | default in APIC mode | 204 | default in APIC mode |
diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt index cb3d15bc1aeb..b61e46f449aa 100644 --- a/Documentation/thermal/sysfs-api.txt +++ b/Documentation/thermal/sysfs-api.txt | |||
| @@ -278,3 +278,15 @@ method, the sys I/F structure will be built like this: | |||
| 278 | |---name: acpitz | 278 | |---name: acpitz |
| 279 | |---temp1_input: 37000 | 279 | |---temp1_input: 37000 |
| 280 | |---temp1_crit: 100000 | 280 | |---temp1_crit: 100000 |
| 281 | |||
| 282 | 4. Event Notification | ||
| 283 | |||
| 284 | The framework includes a simple notification mechanism, in the form of a | ||
| 285 | netlink event. Netlink socket initialization is done during the _init_ | ||
| 286 | of the framework. Drivers which intend to use the notification mechanism | ||
| 287 | just need to call generate_netlink_event() with two arguments viz | ||
| 288 | (originator, event). Typically the originator will be an integer assigned | ||
| 289 | to a thermal_zone_device when it registers itself with the framework. The | ||
| 290 | event will be one of:{THERMAL_AUX0, THERMAL_AUX1, THERMAL_CRITICAL, | ||
| 291 | THERMAL_DEV_FAULT}. Notification can be sent when the current temperature | ||
| 292 | crosses any of the configured thresholds. | ||
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index cc8335eb3110..e5a6c3530c6c 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h | |||
| @@ -426,6 +426,11 @@ extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size) | |||
| 426 | extern void iounmap (volatile void __iomem *addr); | 426 | extern void iounmap (volatile void __iomem *addr); |
| 427 | extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size); | 427 | extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size); |
| 428 | extern void early_iounmap (volatile void __iomem *addr, unsigned long size); | 428 | extern void early_iounmap (volatile void __iomem *addr, unsigned long size); |
| 429 | static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size) | ||
| 430 | { | ||
| 431 | return ioremap(phys_addr, size); | ||
| 432 | } | ||
| 433 | |||
| 429 | 434 | ||
| 430 | /* | 435 | /* |
| 431 | * String version of IO memory access ops: | 436 | * String version of IO memory access ops: |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index ec881c6bfee0..b3a71137983a 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
| @@ -509,6 +509,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) | |||
| 509 | 509 | ||
| 510 | return 0; | 510 | return 0; |
| 511 | } | 511 | } |
| 512 | EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); | ||
| 512 | 513 | ||
| 513 | int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) | 514 | int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) |
| 514 | { | 515 | { |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index d6fb146c0d8b..df20723a6a1b 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
| @@ -234,6 +234,7 @@ unsigned __kprobes long oops_begin(void) | |||
| 234 | bust_spinlocks(1); | 234 | bust_spinlocks(1); |
| 235 | return flags; | 235 | return flags; |
| 236 | } | 236 | } |
| 237 | EXPORT_SYMBOL_GPL(oops_begin); | ||
| 237 | 238 | ||
| 238 | void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) | 239 | void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) |
| 239 | { | 240 | { |
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 0c2b7ef7a34d..294f26da0c0c 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/bootmem.h> | 14 | #include <linux/bootmem.h> |
| 15 | #include <linux/pfn.h> | 15 | #include <linux/pfn.h> |
| 16 | #include <linux/suspend.h> | 16 | #include <linux/suspend.h> |
| 17 | #include <linux/acpi.h> | ||
| 17 | #include <linux/firmware-map.h> | 18 | #include <linux/firmware-map.h> |
| 18 | #include <linux/memblock.h> | 19 | #include <linux/memblock.h> |
| 19 | 20 | ||
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 3f3489c5ca8c..10c7ad59c0e1 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
| @@ -51,12 +51,7 @@ config ACPI_PROCFS | |||
| 51 | For backwards compatibility, this option allows | 51 | For backwards compatibility, this option allows |
| 52 | deprecated /proc/acpi/ files to exist, even when | 52 | deprecated /proc/acpi/ files to exist, even when |
| 53 | they have been replaced by functions in /sys. | 53 | they have been replaced by functions in /sys. |
| 54 | The deprecated files (and their replacements) include: | ||
| 55 | 54 | ||
| 56 | /proc/acpi/processor/*/throttling (/sys/class/thermal/ | ||
| 57 | cooling_device*/*) | ||
| 58 | /proc/acpi/video/*/brightness (/sys/class/backlight/) | ||
| 59 | /proc/acpi/thermal_zone/*/* (/sys/class/thermal/) | ||
| 60 | This option has no effect on /proc/acpi/ files | 55 | This option has no effect on /proc/acpi/ files |
| 61 | and functions which do not yet exist in /sys. | 56 | and functions which do not yet exist in /sys. |
| 62 | 57 | ||
| @@ -74,6 +69,8 @@ config ACPI_PROCFS_POWER | |||
| 74 | /proc/acpi/ac_adapter/* (sys/class/power_supply/*) | 69 | /proc/acpi/ac_adapter/* (sys/class/power_supply/*) |
| 75 | This option has no effect on /proc/acpi/ directories | 70 | This option has no effect on /proc/acpi/ directories |
| 76 | and functions, which do not yet exist in /sys | 71 | and functions, which do not yet exist in /sys |
| 72 | This option, together with the proc directories, will be | ||
| 73 | deleted in 2.6.39. | ||
| 77 | 74 | ||
| 78 | Say N to delete power /proc/acpi/ directories that have moved to /sys/ | 75 | Say N to delete power /proc/acpi/ directories that have moved to /sys/ |
| 79 | 76 | ||
| @@ -209,6 +206,17 @@ config ACPI_PROCESSOR | |||
| 209 | 206 | ||
| 210 | To compile this driver as a module, choose M here: | 207 | To compile this driver as a module, choose M here: |
| 211 | the module will be called processor. | 208 | the module will be called processor. |
| 209 | config ACPI_IPMI | ||
| 210 | tristate "IPMI" | ||
| 211 | depends on EXPERIMENTAL && IPMI_SI && IPMI_HANDLER | ||
| 212 | default n | ||
| 213 | help | ||
| 214 | This driver enables the ACPI to access the BMC controller. And it | ||
| 215 | uses the IPMI request/response message to communicate with BMC | ||
| 216 | controller, which can be found on on the server. | ||
| 217 | |||
| 218 | To compile this driver as a module, choose M here: | ||
| 219 | the module will be called as acpi_ipmi. | ||
| 212 | 220 | ||
| 213 | config ACPI_HOTPLUG_CPU | 221 | config ACPI_HOTPLUG_CPU |
| 214 | bool | 222 | bool |
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 3d031d02e54b..d113fa5100b2 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile | |||
| @@ -24,7 +24,7 @@ acpi-y += atomicio.o | |||
| 24 | # sleep related files | 24 | # sleep related files |
| 25 | acpi-y += wakeup.o | 25 | acpi-y += wakeup.o |
| 26 | acpi-y += sleep.o | 26 | acpi-y += sleep.o |
| 27 | acpi-$(CONFIG_ACPI_SLEEP) += proc.o | 27 | acpi-$(CONFIG_ACPI_SLEEP) += proc.o nvs.o |
| 28 | 28 | ||
| 29 | 29 | ||
| 30 | # | 30 | # |
| @@ -69,5 +69,6 @@ processor-y += processor_idle.o processor_thermal.o | |||
| 69 | processor-$(CONFIG_CPU_FREQ) += processor_perflib.o | 69 | processor-$(CONFIG_CPU_FREQ) += processor_perflib.o |
| 70 | 70 | ||
| 71 | obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o | 71 | obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o |
| 72 | obj-$(CONFIG_ACPI_IPMI) += acpi_ipmi.o | ||
| 72 | 73 | ||
| 73 | obj-$(CONFIG_ACPI_APEI) += apei/ | 74 | obj-$(CONFIG_ACPI_APEI) += apei/ |
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 25d3aaebc10d..58c3f74bd84c 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c | |||
| @@ -197,7 +197,8 @@ static int acpi_ac_add_fs(struct acpi_device *device) | |||
| 197 | { | 197 | { |
| 198 | struct proc_dir_entry *entry = NULL; | 198 | struct proc_dir_entry *entry = NULL; |
| 199 | 199 | ||
| 200 | 200 | printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded," | |
| 201 | " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); | ||
| 201 | if (!acpi_device_dir(device)) { | 202 | if (!acpi_device_dir(device)) { |
| 202 | acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), | 203 | acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), |
| 203 | acpi_ac_dir); | 204 | acpi_ac_dir); |
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c new file mode 100644 index 000000000000..f40acef80269 --- /dev/null +++ b/drivers/acpi/acpi_ipmi.c | |||
| @@ -0,0 +1,525 @@ | |||
| 1 | /* | ||
| 2 | * acpi_ipmi.c - ACPI IPMI opregion | ||
| 3 | * | ||
| 4 | * Copyright (C) 2010 Intel Corporation | ||
| 5 | * Copyright (C) 2010 Zhao Yakui <yakui.zhao@intel.com> | ||
| 6 | * | ||
| 7 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify | ||
| 10 | * it under the terms of the GNU General Public License as published by | ||
| 11 | * the Free Software Foundation; either version 2 of the License, or (at | ||
| 12 | * your option) any later version. | ||
| 13 | * | ||
| 14 | * This program is distributed in the hope that it will be useful, but | ||
| 15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 17 | * General Public License for more details. | ||
| 18 | * | ||
| 19 | * You should have received a copy of the GNU General Public License along | ||
| 20 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
| 21 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
| 22 | * | ||
| 23 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
| 24 | */ | ||
| 25 | |||
| 26 | #include <linux/kernel.h> | ||
| 27 | #include <linux/module.h> | ||
| 28 | #include <linux/init.h> | ||
| 29 | #include <linux/types.h> | ||
| 30 | #include <linux/delay.h> | ||
| 31 | #include <linux/proc_fs.h> | ||
| 32 | #include <linux/seq_file.h> | ||
| 33 | #include <linux/interrupt.h> | ||
| 34 | #include <linux/list.h> | ||
| 35 | #include <linux/spinlock.h> | ||
| 36 | #include <linux/io.h> | ||
| 37 | #include <acpi/acpi_bus.h> | ||
| 38 | #include <acpi/acpi_drivers.h> | ||
| 39 | #include <linux/ipmi.h> | ||
| 40 | #include <linux/device.h> | ||
| 41 | #include <linux/pnp.h> | ||
| 42 | |||
| 43 | MODULE_AUTHOR("Zhao Yakui"); | ||
| 44 | MODULE_DESCRIPTION("ACPI IPMI Opregion driver"); | ||
| 45 | MODULE_LICENSE("GPL"); | ||
| 46 | |||
| 47 | #define IPMI_FLAGS_HANDLER_INSTALL 0 | ||
| 48 | |||
| 49 | #define ACPI_IPMI_OK 0 | ||
| 50 | #define ACPI_IPMI_TIMEOUT 0x10 | ||
| 51 | #define ACPI_IPMI_UNKNOWN 0x07 | ||
| 52 | /* the IPMI timeout is 5s */ | ||
| 53 | #define IPMI_TIMEOUT (5 * HZ) | ||
| 54 | |||
| 55 | struct acpi_ipmi_device { | ||
| 56 | /* the device list attached to driver_data.ipmi_devices */ | ||
| 57 | struct list_head head; | ||
| 58 | /* the IPMI request message list */ | ||
| 59 | struct list_head tx_msg_list; | ||
| 60 | struct mutex tx_msg_lock; | ||
| 61 | acpi_handle handle; | ||
| 62 | struct pnp_dev *pnp_dev; | ||
| 63 | ipmi_user_t user_interface; | ||
| 64 | int ipmi_ifnum; /* IPMI interface number */ | ||
| 65 | long curr_msgid; | ||
| 66 | unsigned long flags; | ||
| 67 | struct ipmi_smi_info smi_data; | ||
| 68 | }; | ||
| 69 | |||
| 70 | struct ipmi_driver_data { | ||
| 71 | struct list_head ipmi_devices; | ||
| 72 | struct ipmi_smi_watcher bmc_events; | ||
| 73 | struct ipmi_user_hndl ipmi_hndlrs; | ||
| 74 | struct mutex ipmi_lock; | ||
| 75 | }; | ||
| 76 | |||
| 77 | struct acpi_ipmi_msg { | ||
| 78 | struct list_head head; | ||
| 79 | /* | ||
| 80 | * General speaking the addr type should be SI_ADDR_TYPE. And | ||
| 81 | * the addr channel should be BMC. | ||
| 82 | * In fact it can also be IPMB type. But we will have to | ||
| 83 | * parse it from the Netfn command buffer. It is so complex | ||
| 84 | * that it is skipped. | ||
| 85 | */ | ||
| 86 | struct ipmi_addr addr; | ||
| 87 | long tx_msgid; | ||
| 88 | /* it is used to track whether the IPMI message is finished */ | ||
| 89 | struct completion tx_complete; | ||
| 90 | struct kernel_ipmi_msg tx_message; | ||
| 91 | int msg_done; | ||
| 92 | /* tx data . And copy it from ACPI object buffer */ | ||
| 93 | u8 tx_data[64]; | ||
| 94 | int tx_len; | ||
| 95 | u8 rx_data[64]; | ||
| 96 | int rx_len; | ||
| 97 | struct acpi_ipmi_device *device; | ||
| 98 | }; | ||
| 99 | |||
| 100 | /* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */ | ||
| 101 | struct acpi_ipmi_buffer { | ||
| 102 | u8 status; | ||
| 103 | u8 length; | ||
| 104 | u8 data[64]; | ||
| 105 | }; | ||
| 106 | |||
| 107 | static void ipmi_register_bmc(int iface, struct device *dev); | ||
| 108 | static void ipmi_bmc_gone(int iface); | ||
| 109 | static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data); | ||
| 110 | static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device); | ||
| 111 | static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device); | ||
| 112 | |||
| 113 | static struct ipmi_driver_data driver_data = { | ||
| 114 | .ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices), | ||
| 115 | .bmc_events = { | ||
| 116 | .owner = THIS_MODULE, | ||
| 117 | .new_smi = ipmi_register_bmc, | ||
| 118 | .smi_gone = ipmi_bmc_gone, | ||
| 119 | }, | ||
| 120 | .ipmi_hndlrs = { | ||
| 121 | .ipmi_recv_hndl = ipmi_msg_handler, | ||
| 122 | }, | ||
| 123 | }; | ||
| 124 | |||
| 125 | static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi) | ||
| 126 | { | ||
| 127 | struct acpi_ipmi_msg *ipmi_msg; | ||
| 128 | struct pnp_dev *pnp_dev = ipmi->pnp_dev; | ||
| 129 | |||
| 130 | ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL); | ||
| 131 | if (!ipmi_msg) { | ||
| 132 | dev_warn(&pnp_dev->dev, "Can't allocate memory for ipmi_msg\n"); | ||
| 133 | return NULL; | ||
| 134 | } | ||
| 135 | init_completion(&ipmi_msg->tx_complete); | ||
| 136 | INIT_LIST_HEAD(&ipmi_msg->head); | ||
| 137 | ipmi_msg->device = ipmi; | ||
| 138 | return ipmi_msg; | ||
| 139 | } | ||
| 140 | |||
| 141 | #define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff) | ||
| 142 | #define IPMI_OP_RGN_CMD(offset) (offset & 0xff) | ||
| 143 | static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg, | ||
| 144 | acpi_physical_address address, | ||
| 145 | acpi_integer *value) | ||
| 146 | { | ||
| 147 | struct kernel_ipmi_msg *msg; | ||
| 148 | struct acpi_ipmi_buffer *buffer; | ||
| 149 | struct acpi_ipmi_device *device; | ||
| 150 | |||
| 151 | msg = &tx_msg->tx_message; | ||
| 152 | /* | ||
| 153 | * IPMI network function and command are encoded in the address | ||
| 154 | * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3. | ||
| 155 | */ | ||
| 156 | msg->netfn = IPMI_OP_RGN_NETFN(address); | ||
| 157 | msg->cmd = IPMI_OP_RGN_CMD(address); | ||
| 158 | msg->data = tx_msg->tx_data; | ||
| 159 | /* | ||
| 160 | * value is the parameter passed by the IPMI opregion space handler. | ||
| 161 | * It points to the IPMI request message buffer | ||
| 162 | */ | ||
| 163 | buffer = (struct acpi_ipmi_buffer *)value; | ||
| 164 | /* copy the tx message data */ | ||
| 165 | msg->data_len = buffer->length; | ||
| 166 | memcpy(tx_msg->tx_data, buffer->data, msg->data_len); | ||
| 167 | /* | ||
| 168 | * now the default type is SYSTEM_INTERFACE and channel type is BMC. | ||
| 169 | * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE, | ||
| 170 | * the addr type should be changed to IPMB. Then we will have to parse | ||
| 171 | * the IPMI request message buffer to get the IPMB address. | ||
| 172 | * If so, please fix me. | ||
| 173 | */ | ||
| 174 | tx_msg->addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | ||
| 175 | tx_msg->addr.channel = IPMI_BMC_CHANNEL; | ||
| 176 | tx_msg->addr.data[0] = 0; | ||
| 177 | |||
| 178 | /* Get the msgid */ | ||
| 179 | device = tx_msg->device; | ||
| 180 | mutex_lock(&device->tx_msg_lock); | ||
| 181 | device->curr_msgid++; | ||
| 182 | tx_msg->tx_msgid = device->curr_msgid; | ||
| 183 | mutex_unlock(&device->tx_msg_lock); | ||
| 184 | } | ||
| 185 | |||
| 186 | static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, | ||
| 187 | acpi_integer *value, int rem_time) | ||
| 188 | { | ||
| 189 | struct acpi_ipmi_buffer *buffer; | ||
| 190 | |||
| 191 | /* | ||
| 192 | * value is also used as output parameter. It represents the response | ||
| 193 | * IPMI message returned by IPMI command. | ||
| 194 | */ | ||
| 195 | buffer = (struct acpi_ipmi_buffer *)value; | ||
| 196 | if (!rem_time && !msg->msg_done) { | ||
| 197 | buffer->status = ACPI_IPMI_TIMEOUT; | ||
| 198 | return; | ||
| 199 | } | ||
| 200 | /* | ||
| 201 | * If the flag of msg_done is not set or the recv length is zero, it | ||
| 202 | * means that the IPMI command is not executed correctly. | ||
| 203 | * The status code will be ACPI_IPMI_UNKNOWN. | ||
| 204 | */ | ||
| 205 | if (!msg->msg_done || !msg->rx_len) { | ||
| 206 | buffer->status = ACPI_IPMI_UNKNOWN; | ||
| 207 | return; | ||
| 208 | } | ||
| 209 | /* | ||
| 210 | * If the IPMI response message is obtained correctly, the status code | ||
| 211 | * will be ACPI_IPMI_OK | ||
| 212 | */ | ||
| 213 | buffer->status = ACPI_IPMI_OK; | ||
| 214 | buffer->length = msg->rx_len; | ||
| 215 | memcpy(buffer->data, msg->rx_data, msg->rx_len); | ||
| 216 | } | ||
| 217 | |||
| 218 | static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi) | ||
| 219 | { | ||
| 220 | struct acpi_ipmi_msg *tx_msg, *temp; | ||
| 221 | int count = HZ / 10; | ||
| 222 | struct pnp_dev *pnp_dev = ipmi->pnp_dev; | ||
| 223 | |||
| 224 | list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) { | ||
| 225 | /* wake up the sleep thread on the Tx msg */ | ||
| 226 | complete(&tx_msg->tx_complete); | ||
| 227 | } | ||
| 228 | |||
| 229 | /* wait for about 100ms to flush the tx message list */ | ||
| 230 | while (count--) { | ||
| 231 | if (list_empty(&ipmi->tx_msg_list)) | ||
| 232 | break; | ||
| 233 | schedule_timeout(1); | ||
| 234 | } | ||
| 235 | if (!list_empty(&ipmi->tx_msg_list)) | ||
| 236 | dev_warn(&pnp_dev->dev, "tx msg list is not NULL\n"); | ||
| 237 | } | ||
| 238 | |||
| 239 | static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) | ||
| 240 | { | ||
| 241 | struct acpi_ipmi_device *ipmi_device = user_msg_data; | ||
| 242 | int msg_found = 0; | ||
| 243 | struct acpi_ipmi_msg *tx_msg; | ||
| 244 | struct pnp_dev *pnp_dev = ipmi_device->pnp_dev; | ||
| 245 | |||
| 246 | if (msg->user != ipmi_device->user_interface) { | ||
| 247 | dev_warn(&pnp_dev->dev, "Unexpected response is returned. " | ||
| 248 | "returned user %p, expected user %p\n", | ||
| 249 | msg->user, ipmi_device->user_interface); | ||
| 250 | ipmi_free_recv_msg(msg); | ||
| 251 | return; | ||
| 252 | } | ||
| 253 | mutex_lock(&ipmi_device->tx_msg_lock); | ||
| 254 | list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) { | ||
| 255 | if (msg->msgid == tx_msg->tx_msgid) { | ||
| 256 | msg_found = 1; | ||
| 257 | break; | ||
| 258 | } | ||
| 259 | } | ||
| 260 | |||
| 261 | mutex_unlock(&ipmi_device->tx_msg_lock); | ||
| 262 | if (!msg_found) { | ||
| 263 | dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is " | ||
| 264 | "returned.\n", msg->msgid); | ||
| 265 | ipmi_free_recv_msg(msg); | ||
| 266 | return; | ||
| 267 | } | ||
| 268 | |||
| 269 | if (msg->msg.data_len) { | ||
| 270 | /* copy the response data to Rx_data buffer */ | ||
| 271 | memcpy(tx_msg->rx_data, msg->msg_data, msg->msg.data_len); | ||
| 272 | tx_msg->rx_len = msg->msg.data_len; | ||
| 273 | tx_msg->msg_done = 1; | ||
| 274 | } | ||
| 275 | complete(&tx_msg->tx_complete); | ||
| 276 | ipmi_free_recv_msg(msg); | ||
| 277 | }; | ||
| 278 | |||
| 279 | static void ipmi_register_bmc(int iface, struct device *dev) | ||
| 280 | { | ||
| 281 | struct acpi_ipmi_device *ipmi_device, *temp; | ||
| 282 | struct pnp_dev *pnp_dev; | ||
| 283 | ipmi_user_t user; | ||
| 284 | int err; | ||
| 285 | struct ipmi_smi_info smi_data; | ||
| 286 | acpi_handle handle; | ||
| 287 | |||
| 288 | err = ipmi_get_smi_info(iface, &smi_data); | ||
| 289 | |||
| 290 | if (err) | ||
| 291 | return; | ||
| 292 | |||
| 293 | if (smi_data.addr_src != SI_ACPI) { | ||
| 294 | put_device(smi_data.dev); | ||
| 295 | return; | ||
| 296 | } | ||
| 297 | |||
| 298 | handle = smi_data.addr_info.acpi_info.acpi_handle; | ||
| 299 | |||
| 300 | mutex_lock(&driver_data.ipmi_lock); | ||
| 301 | list_for_each_entry(temp, &driver_data.ipmi_devices, head) { | ||
| 302 | /* | ||
| 303 | * if the corresponding ACPI handle is already added | ||
| 304 | * to the device list, don't add it again. | ||
| 305 | */ | ||
| 306 | if (temp->handle == handle) | ||
| 307 | goto out; | ||
| 308 | } | ||
| 309 | |||
| 310 | ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL); | ||
| 311 | |||
| 312 | if (!ipmi_device) | ||
| 313 | goto out; | ||
| 314 | |||
| 315 | pnp_dev = to_pnp_dev(smi_data.dev); | ||
| 316 | ipmi_device->handle = handle; | ||
| 317 | ipmi_device->pnp_dev = pnp_dev; | ||
| 318 | |||
| 319 | err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs, | ||
| 320 | ipmi_device, &user); | ||
| 321 | if (err) { | ||
| 322 | dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n"); | ||
| 323 | kfree(ipmi_device); | ||
| 324 | goto out; | ||
| 325 | } | ||
| 326 | acpi_add_ipmi_device(ipmi_device); | ||
| 327 | ipmi_device->user_interface = user; | ||
| 328 | ipmi_device->ipmi_ifnum = iface; | ||
| 329 | mutex_unlock(&driver_data.ipmi_lock); | ||
| 330 | memcpy(&ipmi_device->smi_data, &smi_data, sizeof(struct ipmi_smi_info)); | ||
| 331 | return; | ||
| 332 | |||
| 333 | out: | ||
| 334 | mutex_unlock(&driver_data.ipmi_lock); | ||
| 335 | put_device(smi_data.dev); | ||
| 336 | return; | ||
| 337 | } | ||
| 338 | |||
| 339 | static void ipmi_bmc_gone(int iface) | ||
| 340 | { | ||
| 341 | struct acpi_ipmi_device *ipmi_device, *temp; | ||
| 342 | |||
| 343 | mutex_lock(&driver_data.ipmi_lock); | ||
| 344 | list_for_each_entry_safe(ipmi_device, temp, | ||
| 345 | &driver_data.ipmi_devices, head) { | ||
| 346 | if (ipmi_device->ipmi_ifnum != iface) | ||
| 347 | continue; | ||
| 348 | |||
| 349 | acpi_remove_ipmi_device(ipmi_device); | ||
| 350 | put_device(ipmi_device->smi_data.dev); | ||
| 351 | kfree(ipmi_device); | ||
| 352 | break; | ||
| 353 | } | ||
| 354 | mutex_unlock(&driver_data.ipmi_lock); | ||
| 355 | } | ||
| 356 | /* -------------------------------------------------------------------------- | ||
| 357 | * Address Space Management | ||
| 358 | * -------------------------------------------------------------------------- */ | ||
| 359 | /* | ||
| 360 | * This is the IPMI opregion space handler. | ||
| 361 | * @function: indicates the read/write. In fact as the IPMI message is driven | ||
| 362 | * by command, only write is meaningful. | ||
| 363 | * @address: This contains the netfn/command of IPMI request message. | ||
| 364 | * @bits : not used. | ||
| 365 | * @value : it is an in/out parameter. It points to the IPMI message buffer. | ||
| 366 | * Before the IPMI message is sent, it represents the actual request | ||
| 367 | * IPMI message. After the IPMI message is finished, it represents | ||
| 368 | * the response IPMI message returned by IPMI command. | ||
| 369 | * @handler_context: IPMI device context. | ||
| 370 | */ | ||
| 371 | |||
| 372 | static acpi_status | ||
| 373 | acpi_ipmi_space_handler(u32 function, acpi_physical_address address, | ||
| 374 | u32 bits, acpi_integer *value, | ||
| 375 | void *handler_context, void *region_context) | ||
| 376 | { | ||
| 377 | struct acpi_ipmi_msg *tx_msg; | ||
| 378 | struct acpi_ipmi_device *ipmi_device = handler_context; | ||
| 379 | int err, rem_time; | ||
| 380 | acpi_status status; | ||
| 381 | /* | ||
| 382 | * IPMI opregion message. | ||
| 383 | * IPMI message is firstly written to the BMC and system software | ||
| 384 | * can get the respsonse. So it is unmeaningful for the read access | ||
| 385 | * of IPMI opregion. | ||
| 386 | */ | ||
| 387 | if ((function & ACPI_IO_MASK) == ACPI_READ) | ||
| 388 | return AE_TYPE; | ||
| 389 | |||
| 390 | if (!ipmi_device->user_interface) | ||
| 391 | return AE_NOT_EXIST; | ||
| 392 | |||
| 393 | tx_msg = acpi_alloc_ipmi_msg(ipmi_device); | ||
| 394 | if (!tx_msg) | ||
| 395 | return AE_NO_MEMORY; | ||
| 396 | |||
| 397 | acpi_format_ipmi_msg(tx_msg, address, value); | ||
| 398 | mutex_lock(&ipmi_device->tx_msg_lock); | ||
| 399 | list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); | ||
| 400 | mutex_unlock(&ipmi_device->tx_msg_lock); | ||
| 401 | err = ipmi_request_settime(ipmi_device->user_interface, | ||
| 402 | &tx_msg->addr, | ||
| 403 | tx_msg->tx_msgid, | ||
| 404 | &tx_msg->tx_message, | ||
| 405 | NULL, 0, 0, 0); | ||
| 406 | if (err) { | ||
| 407 | status = AE_ERROR; | ||
| 408 | goto end_label; | ||
| 409 | } | ||
| 410 | rem_time = wait_for_completion_timeout(&tx_msg->tx_complete, | ||
| 411 | IPMI_TIMEOUT); | ||
| 412 | acpi_format_ipmi_response(tx_msg, value, rem_time); | ||
| 413 | status = AE_OK; | ||
| 414 | |||
| 415 | end_label: | ||
| 416 | mutex_lock(&ipmi_device->tx_msg_lock); | ||
| 417 | list_del(&tx_msg->head); | ||
| 418 | mutex_unlock(&ipmi_device->tx_msg_lock); | ||
| 419 | kfree(tx_msg); | ||
| 420 | return status; | ||
| 421 | } | ||
| 422 | |||
| 423 | static void ipmi_remove_space_handler(struct acpi_ipmi_device *ipmi) | ||
| 424 | { | ||
| 425 | if (!test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags)) | ||
| 426 | return; | ||
| 427 | |||
| 428 | acpi_remove_address_space_handler(ipmi->handle, | ||
| 429 | ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler); | ||
| 430 | |||
| 431 | clear_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags); | ||
| 432 | } | ||
| 433 | |||
| 434 | static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi) | ||
| 435 | { | ||
| 436 | acpi_status status; | ||
| 437 | |||
| 438 | if (test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags)) | ||
| 439 | return 0; | ||
| 440 | |||
| 441 | status = acpi_install_address_space_handler(ipmi->handle, | ||
| 442 | ACPI_ADR_SPACE_IPMI, | ||
| 443 | &acpi_ipmi_space_handler, | ||
| 444 | NULL, ipmi); | ||
| 445 | if (ACPI_FAILURE(status)) { | ||
| 446 | struct pnp_dev *pnp_dev = ipmi->pnp_dev; | ||
| 447 | dev_warn(&pnp_dev->dev, "Can't register IPMI opregion space " | ||
| 448 | "handle\n"); | ||
| 449 | return -EINVAL; | ||
| 450 | } | ||
| 451 | set_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags); | ||
| 452 | return 0; | ||
| 453 | } | ||
| 454 | |||
| 455 | static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device) | ||
| 456 | { | ||
| 457 | |||
| 458 | INIT_LIST_HEAD(&ipmi_device->head); | ||
| 459 | |||
| 460 | mutex_init(&ipmi_device->tx_msg_lock); | ||
| 461 | INIT_LIST_HEAD(&ipmi_device->tx_msg_list); | ||
| 462 | ipmi_install_space_handler(ipmi_device); | ||
| 463 | |||
| 464 | list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices); | ||
| 465 | } | ||
| 466 | |||
| 467 | static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device) | ||
| 468 | { | ||
| 469 | /* | ||
| 470 | * If the IPMI user interface is created, it should be | ||
| 471 | * destroyed. | ||
| 472 | */ | ||
| 473 | if (ipmi_device->user_interface) { | ||
| 474 | ipmi_destroy_user(ipmi_device->user_interface); | ||
| 475 | ipmi_device->user_interface = NULL; | ||
| 476 | } | ||
| 477 | /* flush the Tx_msg list */ | ||
| 478 | if (!list_empty(&ipmi_device->tx_msg_list)) | ||
| 479 | ipmi_flush_tx_msg(ipmi_device); | ||
| 480 | |||
| 481 | list_del(&ipmi_device->head); | ||
| 482 | ipmi_remove_space_handler(ipmi_device); | ||
| 483 | } | ||
| 484 | |||
| 485 | static int __init acpi_ipmi_init(void) | ||
| 486 | { | ||
| 487 | int result = 0; | ||
| 488 | |||
| 489 | if (acpi_disabled) | ||
| 490 | return result; | ||
| 491 | |||
| 492 | mutex_init(&driver_data.ipmi_lock); | ||
| 493 | |||
| 494 | result = ipmi_smi_watcher_register(&driver_data.bmc_events); | ||
| 495 | |||
| 496 | return result; | ||
| 497 | } | ||
| 498 | |||
| 499 | static void __exit acpi_ipmi_exit(void) | ||
| 500 | { | ||
| 501 | struct acpi_ipmi_device *ipmi_device, *temp; | ||
| 502 | |||
| 503 | if (acpi_disabled) | ||
| 504 | return; | ||
| 505 | |||
| 506 | ipmi_smi_watcher_unregister(&driver_data.bmc_events); | ||
| 507 | |||
| 508 | /* | ||
| 509 | * When one smi_watcher is unregistered, it is only deleted | ||
| 510 | * from the smi_watcher list. But the smi_gone callback function | ||
| 511 | * is not called. So explicitly uninstall the ACPI IPMI oregion | ||
| 512 | * handler and free it. | ||
| 513 | */ | ||
| 514 | mutex_lock(&driver_data.ipmi_lock); | ||
| 515 | list_for_each_entry_safe(ipmi_device, temp, | ||
| 516 | &driver_data.ipmi_devices, head) { | ||
| 517 | acpi_remove_ipmi_device(ipmi_device); | ||
| 518 | put_device(ipmi_device->smi_data.dev); | ||
| 519 | kfree(ipmi_device); | ||
| 520 | } | ||
| 521 | mutex_unlock(&driver_data.ipmi_lock); | ||
| 522 | } | ||
| 523 | |||
| 524 | module_init(acpi_ipmi_init); | ||
| 525 | module_exit(acpi_ipmi_exit); | ||
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index a7e1d1aa4107..eec2eadd2431 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile | |||
| @@ -14,7 +14,7 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \ | |||
| 14 | 14 | ||
| 15 | acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \ | 15 | acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \ |
| 16 | evmisc.o evrgnini.o evxface.o evxfregn.o \ | 16 | evmisc.o evrgnini.o evxface.o evxfregn.o \ |
| 17 | evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o | 17 | evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o |
| 18 | 18 | ||
| 19 | acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\ | 19 | acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\ |
| 20 | exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\ | 20 | exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\ |
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index a6f99cc37a19..70e0b28801aa 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h | |||
| @@ -51,8 +51,6 @@ acpi_status acpi_ev_initialize_events(void); | |||
| 51 | 51 | ||
| 52 | acpi_status acpi_ev_install_xrupt_handlers(void); | 52 | acpi_status acpi_ev_install_xrupt_handlers(void); |
| 53 | 53 | ||
| 54 | acpi_status acpi_ev_install_fadt_gpes(void); | ||
| 55 | |||
| 56 | u32 acpi_ev_fixed_event_detect(void); | 54 | u32 acpi_ev_fixed_event_detect(void); |
| 57 | 55 | ||
| 58 | /* | 56 | /* |
| @@ -82,9 +80,9 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info); | |||
| 82 | 80 | ||
| 83 | acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); | 81 | acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); |
| 84 | 82 | ||
| 85 | acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); | 83 | acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); |
| 86 | 84 | ||
| 87 | acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info); | 85 | acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); |
| 88 | 86 | ||
| 89 | struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, | 87 | struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, |
| 90 | u32 gpe_number); | 88 | u32 gpe_number); |
| @@ -93,6 +91,8 @@ struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number, | |||
| 93 | struct acpi_gpe_block_info | 91 | struct acpi_gpe_block_info |
| 94 | *gpe_block); | 92 | *gpe_block); |
| 95 | 93 | ||
| 94 | acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info); | ||
| 95 | |||
| 96 | /* | 96 | /* |
| 97 | * evgpeblk - Upper-level GPE block support | 97 | * evgpeblk - Upper-level GPE block support |
| 98 | */ | 98 | */ |
| @@ -107,12 +107,13 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, | |||
| 107 | acpi_status | 107 | acpi_status |
| 108 | acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | 108 | acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
| 109 | struct acpi_gpe_block_info *gpe_block, | 109 | struct acpi_gpe_block_info *gpe_block, |
| 110 | void *ignored); | 110 | void *context); |
| 111 | 111 | ||
| 112 | acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block); | 112 | acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block); |
| 113 | 113 | ||
| 114 | u32 | 114 | u32 |
| 115 | acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, | 115 | acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, |
| 116 | struct acpi_gpe_event_info *gpe_event_info, | ||
| 116 | u32 gpe_number); | 117 | u32 gpe_number); |
| 117 | 118 | ||
| 118 | /* | 119 | /* |
| @@ -126,10 +127,6 @@ acpi_status | |||
| 126 | acpi_ev_match_gpe_method(acpi_handle obj_handle, | 127 | acpi_ev_match_gpe_method(acpi_handle obj_handle, |
| 127 | u32 level, void *context, void **return_value); | 128 | u32 level, void *context, void **return_value); |
| 128 | 129 | ||
| 129 | acpi_status | ||
| 130 | acpi_ev_match_prw_and_gpe(acpi_handle obj_handle, | ||
| 131 | u32 level, void *context, void **return_value); | ||
| 132 | |||
| 133 | /* | 130 | /* |
| 134 | * evgpeutil - GPE utilities | 131 | * evgpeutil - GPE utilities |
| 135 | */ | 132 | */ |
| @@ -138,6 +135,10 @@ acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context); | |||
| 138 | 135 | ||
| 139 | u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info); | 136 | u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info); |
| 140 | 137 | ||
| 138 | acpi_status | ||
| 139 | acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | ||
| 140 | struct acpi_gpe_block_info *gpe_block, void *context); | ||
| 141 | |||
| 141 | struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number); | 142 | struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number); |
| 142 | 143 | ||
| 143 | acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt); | 144 | acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt); |
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index ad88fcae4eb9..9bb69c59bb12 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h | |||
| @@ -146,6 +146,9 @@ u8 acpi_gbl_system_awake_and_running; | |||
| 146 | 146 | ||
| 147 | extern u32 acpi_gbl_nesting_level; | 147 | extern u32 acpi_gbl_nesting_level; |
| 148 | 148 | ||
| 149 | ACPI_EXTERN u32 acpi_gpe_count; | ||
| 150 | ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]; | ||
| 151 | |||
| 149 | /* Support for dynamic control method tracing mechanism */ | 152 | /* Support for dynamic control method tracing mechanism */ |
| 150 | 153 | ||
| 151 | ACPI_EXTERN u32 acpi_gbl_original_dbg_level; | 154 | ACPI_EXTERN u32 acpi_gbl_original_dbg_level; |
| @@ -370,7 +373,9 @@ ACPI_EXTERN struct acpi_fixed_event_handler | |||
| 370 | ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head; | 373 | ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head; |
| 371 | ACPI_EXTERN struct acpi_gpe_block_info | 374 | ACPI_EXTERN struct acpi_gpe_block_info |
| 372 | *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; | 375 | *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; |
| 373 | ACPI_EXTERN u8 acpi_all_gpes_initialized; | 376 | ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized; |
| 377 | ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler; | ||
| 378 | ACPI_EXTERN void *acpi_gbl_global_event_handler_context; | ||
| 374 | 379 | ||
| 375 | /***************************************************************************** | 380 | /***************************************************************************** |
| 376 | * | 381 | * |
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index 167470ad2d21..258d628793ea 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h | |||
| @@ -94,7 +94,7 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info, | |||
| 94 | struct acpi_gpe_register_info *gpe_register_info); | 94 | struct acpi_gpe_register_info *gpe_register_info); |
| 95 | 95 | ||
| 96 | acpi_status | 96 | acpi_status |
| 97 | acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action); | 97 | acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action); |
| 98 | 98 | ||
| 99 | acpi_status | 99 | acpi_status |
| 100 | acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | 100 | acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 2ceb0c05b2d7..74000f5b7dab 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h | |||
| @@ -408,17 +408,18 @@ struct acpi_predefined_data { | |||
| 408 | 408 | ||
| 409 | /* Dispatch info for each GPE -- either a method or handler, cannot be both */ | 409 | /* Dispatch info for each GPE -- either a method or handler, cannot be both */ |
| 410 | 410 | ||
| 411 | struct acpi_handler_info { | 411 | struct acpi_gpe_handler_info { |
| 412 | acpi_event_handler address; /* Address of handler, if any */ | 412 | acpi_gpe_handler address; /* Address of handler, if any */ |
| 413 | void *context; /* Context to be passed to handler */ | 413 | void *context; /* Context to be passed to handler */ |
| 414 | struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */ | 414 | struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */ |
| 415 | u8 orig_flags; /* Original misc info about this GPE */ | 415 | u8 original_flags; /* Original (pre-handler) GPE info */ |
| 416 | u8 orig_enabled; /* Set if the GPE was originally enabled */ | 416 | u8 originally_enabled; /* True if GPE was originally enabled */ |
| 417 | }; | 417 | }; |
| 418 | 418 | ||
| 419 | union acpi_gpe_dispatch_info { | 419 | union acpi_gpe_dispatch_info { |
| 420 | struct acpi_namespace_node *method_node; /* Method node for this GPE level */ | 420 | struct acpi_namespace_node *method_node; /* Method node for this GPE level */ |
| 421 | struct acpi_handler_info *handler; | 421 | struct acpi_gpe_handler_info *handler; /* Installed GPE handler */ |
| 422 | struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */ | ||
| 422 | }; | 423 | }; |
| 423 | 424 | ||
| 424 | /* | 425 | /* |
| @@ -458,7 +459,7 @@ struct acpi_gpe_block_info { | |||
| 458 | u32 register_count; /* Number of register pairs in block */ | 459 | u32 register_count; /* Number of register pairs in block */ |
| 459 | u16 gpe_count; /* Number of individual GPEs in block */ | 460 | u16 gpe_count; /* Number of individual GPEs in block */ |
| 460 | u8 block_base_number; /* Base GPE number for this block */ | 461 | u8 block_base_number; /* Base GPE number for this block */ |
| 461 | u8 initialized; /* If set, the GPE block has been initialized */ | 462 | u8 initialized; /* TRUE if this block is initialized */ |
| 462 | }; | 463 | }; |
| 463 | 464 | ||
| 464 | /* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */ | 465 | /* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */ |
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c index c61c3039c31a..e5e313c663a5 100644 --- a/drivers/acpi/acpica/evevent.c +++ b/drivers/acpi/acpica/evevent.c | |||
| @@ -217,9 +217,17 @@ u32 acpi_ev_fixed_event_detect(void) | |||
| 217 | status_bit_mask) | 217 | status_bit_mask) |
| 218 | && (fixed_enable & acpi_gbl_fixed_event_info[i]. | 218 | && (fixed_enable & acpi_gbl_fixed_event_info[i]. |
| 219 | enable_bit_mask)) { | 219 | enable_bit_mask)) { |
| 220 | /* | ||
| 221 | * Found an active (signalled) event. Invoke global event | ||
| 222 | * handler if present. | ||
| 223 | */ | ||
| 224 | acpi_fixed_event_count[i]++; | ||
| 225 | if (acpi_gbl_global_event_handler) { | ||
| 226 | acpi_gbl_global_event_handler | ||
| 227 | (ACPI_EVENT_TYPE_FIXED, NULL, i, | ||
| 228 | acpi_gbl_global_event_handler_context); | ||
| 229 | } | ||
| 220 | 230 | ||
| 221 | /* Found an active (signalled) event */ | ||
| 222 | acpi_os_fixed_event_count(i); | ||
| 223 | int_status |= acpi_ev_fixed_event_dispatch(i); | 231 | int_status |= acpi_ev_fixed_event_dispatch(i); |
| 224 | } | 232 | } |
| 225 | } | 233 | } |
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index f226eac314db..7c339d34ab42 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c | |||
| @@ -52,6 +52,8 @@ ACPI_MODULE_NAME("evgpe") | |||
| 52 | /* Local prototypes */ | 52 | /* Local prototypes */ |
| 53 | static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); | 53 | static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); |
| 54 | 54 | ||
| 55 | static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context); | ||
| 56 | |||
| 55 | /******************************************************************************* | 57 | /******************************************************************************* |
| 56 | * | 58 | * |
| 57 | * FUNCTION: acpi_ev_update_gpe_enable_mask | 59 | * FUNCTION: acpi_ev_update_gpe_enable_mask |
| @@ -102,7 +104,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) | |||
| 102 | * | 104 | * |
| 103 | * RETURN: Status | 105 | * RETURN: Status |
| 104 | * | 106 | * |
| 105 | * DESCRIPTION: Clear the given GPE from stale events and enable it. | 107 | * DESCRIPTION: Clear a GPE of stale events and enable it. |
| 106 | * | 108 | * |
| 107 | ******************************************************************************/ | 109 | ******************************************************************************/ |
| 108 | acpi_status | 110 | acpi_status |
| @@ -113,12 +115,13 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
| 113 | ACPI_FUNCTION_TRACE(ev_enable_gpe); | 115 | ACPI_FUNCTION_TRACE(ev_enable_gpe); |
| 114 | 116 | ||
| 115 | /* | 117 | /* |
| 116 | * We will only allow a GPE to be enabled if it has either an | 118 | * We will only allow a GPE to be enabled if it has either an associated |
| 117 | * associated method (_Lxx/_Exx) or a handler. Otherwise, the | 119 | * method (_Lxx/_Exx) or a handler, or is using the implicit notify |
| 118 | * GPE will be immediately disabled by acpi_ev_gpe_dispatch the | 120 | * feature. Otherwise, the GPE will be immediately disabled by |
| 119 | * first time it fires. | 121 | * acpi_ev_gpe_dispatch the first time it fires. |
| 120 | */ | 122 | */ |
| 121 | if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) { | 123 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == |
| 124 | ACPI_GPE_DISPATCH_NONE) { | ||
| 122 | return_ACPI_STATUS(AE_NO_HANDLER); | 125 | return_ACPI_STATUS(AE_NO_HANDLER); |
| 123 | } | 126 | } |
| 124 | 127 | ||
| @@ -137,9 +140,9 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
| 137 | 140 | ||
| 138 | /******************************************************************************* | 141 | /******************************************************************************* |
| 139 | * | 142 | * |
| 140 | * FUNCTION: acpi_raw_enable_gpe | 143 | * FUNCTION: acpi_ev_add_gpe_reference |
| 141 | * | 144 | * |
| 142 | * PARAMETERS: gpe_event_info - GPE to enable | 145 | * PARAMETERS: gpe_event_info - Add a reference to this GPE |
| 143 | * | 146 | * |
| 144 | * RETURN: Status | 147 | * RETURN: Status |
| 145 | * | 148 | * |
| @@ -148,16 +151,21 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
| 148 | * | 151 | * |
| 149 | ******************************************************************************/ | 152 | ******************************************************************************/ |
| 150 | 153 | ||
| 151 | acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | 154 | acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) |
| 152 | { | 155 | { |
| 153 | acpi_status status = AE_OK; | 156 | acpi_status status = AE_OK; |
| 154 | 157 | ||
| 158 | ACPI_FUNCTION_TRACE(ev_add_gpe_reference); | ||
| 159 | |||
| 155 | if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) { | 160 | if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) { |
| 156 | return_ACPI_STATUS(AE_LIMIT); | 161 | return_ACPI_STATUS(AE_LIMIT); |
| 157 | } | 162 | } |
| 158 | 163 | ||
| 159 | gpe_event_info->runtime_count++; | 164 | gpe_event_info->runtime_count++; |
| 160 | if (gpe_event_info->runtime_count == 1) { | 165 | if (gpe_event_info->runtime_count == 1) { |
| 166 | |||
| 167 | /* Enable on first reference */ | ||
| 168 | |||
| 161 | status = acpi_ev_update_gpe_enable_mask(gpe_event_info); | 169 | status = acpi_ev_update_gpe_enable_mask(gpe_event_info); |
| 162 | if (ACPI_SUCCESS(status)) { | 170 | if (ACPI_SUCCESS(status)) { |
| 163 | status = acpi_ev_enable_gpe(gpe_event_info); | 171 | status = acpi_ev_enable_gpe(gpe_event_info); |
| @@ -173,9 +181,9 @@ acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
| 173 | 181 | ||
| 174 | /******************************************************************************* | 182 | /******************************************************************************* |
| 175 | * | 183 | * |
| 176 | * FUNCTION: acpi_raw_disable_gpe | 184 | * FUNCTION: acpi_ev_remove_gpe_reference |
| 177 | * | 185 | * |
| 178 | * PARAMETERS: gpe_event_info - GPE to disable | 186 | * PARAMETERS: gpe_event_info - Remove a reference to this GPE |
| 179 | * | 187 | * |
| 180 | * RETURN: Status | 188 | * RETURN: Status |
| 181 | * | 189 | * |
| @@ -184,16 +192,21 @@ acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
| 184 | * | 192 | * |
| 185 | ******************************************************************************/ | 193 | ******************************************************************************/ |
| 186 | 194 | ||
| 187 | acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) | 195 | acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) |
| 188 | { | 196 | { |
| 189 | acpi_status status = AE_OK; | 197 | acpi_status status = AE_OK; |
| 190 | 198 | ||
| 199 | ACPI_FUNCTION_TRACE(ev_remove_gpe_reference); | ||
| 200 | |||
| 191 | if (!gpe_event_info->runtime_count) { | 201 | if (!gpe_event_info->runtime_count) { |
| 192 | return_ACPI_STATUS(AE_LIMIT); | 202 | return_ACPI_STATUS(AE_LIMIT); |
| 193 | } | 203 | } |
| 194 | 204 | ||
| 195 | gpe_event_info->runtime_count--; | 205 | gpe_event_info->runtime_count--; |
| 196 | if (!gpe_event_info->runtime_count) { | 206 | if (!gpe_event_info->runtime_count) { |
| 207 | |||
| 208 | /* Disable on last reference */ | ||
| 209 | |||
| 197 | status = acpi_ev_update_gpe_enable_mask(gpe_event_info); | 210 | status = acpi_ev_update_gpe_enable_mask(gpe_event_info); |
| 198 | if (ACPI_SUCCESS(status)) { | 211 | if (ACPI_SUCCESS(status)) { |
| 199 | status = acpi_hw_low_set_gpe(gpe_event_info, | 212 | status = acpi_hw_low_set_gpe(gpe_event_info, |
| @@ -379,7 +392,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
| 379 | } | 392 | } |
| 380 | 393 | ||
| 381 | ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, | 394 | ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, |
| 382 | "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n", | 395 | "Read GPE Register at GPE%02X: Status=%02X, Enable=%02X\n", |
| 383 | gpe_register_info->base_gpe_number, | 396 | gpe_register_info->base_gpe_number, |
| 384 | status_reg, enable_reg)); | 397 | status_reg, enable_reg)); |
| 385 | 398 | ||
| @@ -405,7 +418,9 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
| 405 | * or method. | 418 | * or method. |
| 406 | */ | 419 | */ |
| 407 | int_status |= | 420 | int_status |= |
| 408 | acpi_ev_gpe_dispatch(&gpe_block-> | 421 | acpi_ev_gpe_dispatch(gpe_block-> |
| 422 | node, | ||
| 423 | &gpe_block-> | ||
| 409 | event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); | 424 | event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); |
| 410 | } | 425 | } |
| 411 | } | 426 | } |
| @@ -435,17 +450,25 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
| 435 | * an interrupt handler. | 450 | * an interrupt handler. |
| 436 | * | 451 | * |
| 437 | ******************************************************************************/ | 452 | ******************************************************************************/ |
| 438 | static void acpi_ev_asynch_enable_gpe(void *context); | ||
| 439 | 453 | ||
| 440 | static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | 454 | static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) |
| 441 | { | 455 | { |
| 442 | struct acpi_gpe_event_info *gpe_event_info = (void *)context; | 456 | struct acpi_gpe_event_info *gpe_event_info = context; |
| 443 | acpi_status status; | 457 | acpi_status status; |
| 444 | struct acpi_gpe_event_info local_gpe_event_info; | 458 | struct acpi_gpe_event_info *local_gpe_event_info; |
| 445 | struct acpi_evaluate_info *info; | 459 | struct acpi_evaluate_info *info; |
| 446 | 460 | ||
| 447 | ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); | 461 | ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); |
| 448 | 462 | ||
| 463 | /* Allocate a local GPE block */ | ||
| 464 | |||
| 465 | local_gpe_event_info = | ||
| 466 | ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info)); | ||
| 467 | if (!local_gpe_event_info) { | ||
| 468 | ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE")); | ||
| 469 | return_VOID; | ||
| 470 | } | ||
| 471 | |||
| 449 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | 472 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); |
| 450 | if (ACPI_FAILURE(status)) { | 473 | if (ACPI_FAILURE(status)) { |
| 451 | return_VOID; | 474 | return_VOID; |
| @@ -462,7 +485,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
| 462 | * Take a snapshot of the GPE info for this level - we copy the info to | 485 | * Take a snapshot of the GPE info for this level - we copy the info to |
| 463 | * prevent a race condition with remove_handler/remove_block. | 486 | * prevent a race condition with remove_handler/remove_block. |
| 464 | */ | 487 | */ |
| 465 | ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info, | 488 | ACPI_MEMCPY(local_gpe_event_info, gpe_event_info, |
| 466 | sizeof(struct acpi_gpe_event_info)); | 489 | sizeof(struct acpi_gpe_event_info)); |
| 467 | 490 | ||
| 468 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); | 491 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); |
| @@ -470,12 +493,26 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
| 470 | return_VOID; | 493 | return_VOID; |
| 471 | } | 494 | } |
| 472 | 495 | ||
| 473 | /* | 496 | /* Do the correct dispatch - normal method or implicit notify */ |
| 474 | * Must check for control method type dispatch one more time to avoid a | 497 | |
| 475 | * race with ev_gpe_install_handler | 498 | switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { |
| 476 | */ | 499 | case ACPI_GPE_DISPATCH_NOTIFY: |
| 477 | if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) == | 500 | |
| 478 | ACPI_GPE_DISPATCH_METHOD) { | 501 | /* |
| 502 | * Implicit notify. | ||
| 503 | * Dispatch a DEVICE_WAKE notify to the appropriate handler. | ||
| 504 | * NOTE: the request is queued for execution after this method | ||
| 505 | * completes. The notify handlers are NOT invoked synchronously | ||
| 506 | * from this thread -- because handlers may in turn run other | ||
| 507 | * control methods. | ||
| 508 | */ | ||
| 509 | status = | ||
| 510 | acpi_ev_queue_notify_request(local_gpe_event_info->dispatch. | ||
| 511 | device_node, | ||
| 512 | ACPI_NOTIFY_DEVICE_WAKE); | ||
| 513 | break; | ||
| 514 | |||
| 515 | case ACPI_GPE_DISPATCH_METHOD: | ||
| 479 | 516 | ||
| 480 | /* Allocate the evaluation information block */ | 517 | /* Allocate the evaluation information block */ |
| 481 | 518 | ||
| @@ -488,7 +525,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
| 488 | * control method that corresponds to this GPE | 525 | * control method that corresponds to this GPE |
| 489 | */ | 526 | */ |
| 490 | info->prefix_node = | 527 | info->prefix_node = |
| 491 | local_gpe_event_info.dispatch.method_node; | 528 | local_gpe_event_info->dispatch.method_node; |
| 492 | info->flags = ACPI_IGNORE_RETURN_VALUE; | 529 | info->flags = ACPI_IGNORE_RETURN_VALUE; |
| 493 | 530 | ||
| 494 | status = acpi_ns_evaluate(info); | 531 | status = acpi_ns_evaluate(info); |
| @@ -499,46 +536,98 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
| 499 | ACPI_EXCEPTION((AE_INFO, status, | 536 | ACPI_EXCEPTION((AE_INFO, status, |
| 500 | "while evaluating GPE method [%4.4s]", | 537 | "while evaluating GPE method [%4.4s]", |
| 501 | acpi_ut_get_node_name | 538 | acpi_ut_get_node_name |
| 502 | (local_gpe_event_info.dispatch. | 539 | (local_gpe_event_info->dispatch. |
| 503 | method_node))); | 540 | method_node))); |
| 504 | } | 541 | } |
| 542 | |||
| 543 | break; | ||
| 544 | |||
| 545 | default: | ||
| 546 | return_VOID; /* Should never happen */ | ||
| 505 | } | 547 | } |
| 548 | |||
| 506 | /* Defer enabling of GPE until all notify handlers are done */ | 549 | /* Defer enabling of GPE until all notify handlers are done */ |
| 507 | acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe, | 550 | |
| 508 | gpe_event_info); | 551 | status = acpi_os_execute(OSL_NOTIFY_HANDLER, |
| 552 | acpi_ev_asynch_enable_gpe, | ||
| 553 | local_gpe_event_info); | ||
| 554 | if (ACPI_FAILURE(status)) { | ||
| 555 | ACPI_FREE(local_gpe_event_info); | ||
| 556 | } | ||
| 509 | return_VOID; | 557 | return_VOID; |
| 510 | } | 558 | } |
| 511 | 559 | ||
| 512 | static void acpi_ev_asynch_enable_gpe(void *context) | 560 | |
| 561 | /******************************************************************************* | ||
| 562 | * | ||
| 563 | * FUNCTION: acpi_ev_asynch_enable_gpe | ||
| 564 | * | ||
| 565 | * PARAMETERS: Context (gpe_event_info) - Info for this GPE | ||
| 566 | * Callback from acpi_os_execute | ||
| 567 | * | ||
| 568 | * RETURN: None | ||
| 569 | * | ||
| 570 | * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to | ||
| 571 | * complete (i.e., finish execution of Notify) | ||
| 572 | * | ||
| 573 | ******************************************************************************/ | ||
| 574 | |||
| 575 | static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context) | ||
| 513 | { | 576 | { |
| 514 | struct acpi_gpe_event_info *gpe_event_info = context; | 577 | struct acpi_gpe_event_info *gpe_event_info = context; |
| 578 | |||
| 579 | (void)acpi_ev_finish_gpe(gpe_event_info); | ||
| 580 | |||
| 581 | ACPI_FREE(gpe_event_info); | ||
| 582 | return; | ||
| 583 | } | ||
| 584 | |||
| 585 | |||
| 586 | /******************************************************************************* | ||
| 587 | * | ||
| 588 | * FUNCTION: acpi_ev_finish_gpe | ||
| 589 | * | ||
| 590 | * PARAMETERS: gpe_event_info - Info for this GPE | ||
| 591 | * | ||
| 592 | * RETURN: Status | ||
| 593 | * | ||
| 594 | * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution | ||
| 595 | * of a GPE method or a synchronous or asynchronous GPE handler. | ||
| 596 | * | ||
| 597 | ******************************************************************************/ | ||
| 598 | |||
| 599 | acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info) | ||
| 600 | { | ||
| 515 | acpi_status status; | 601 | acpi_status status; |
| 602 | |||
| 516 | if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == | 603 | if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == |
| 517 | ACPI_GPE_LEVEL_TRIGGERED) { | 604 | ACPI_GPE_LEVEL_TRIGGERED) { |
| 518 | /* | 605 | /* |
| 519 | * GPE is level-triggered, we clear the GPE status bit after handling | 606 | * GPE is level-triggered, we clear the GPE status bit after |
| 520 | * the event. | 607 | * handling the event. |
| 521 | */ | 608 | */ |
| 522 | status = acpi_hw_clear_gpe(gpe_event_info); | 609 | status = acpi_hw_clear_gpe(gpe_event_info); |
| 523 | if (ACPI_FAILURE(status)) { | 610 | if (ACPI_FAILURE(status)) { |
| 524 | return_VOID; | 611 | return (status); |
| 525 | } | 612 | } |
| 526 | } | 613 | } |
| 527 | 614 | ||
| 528 | /* | 615 | /* |
| 529 | * Enable this GPE, conditionally. This means that the GPE will only be | 616 | * Enable this GPE, conditionally. This means that the GPE will |
| 530 | * physically enabled if the enable_for_run bit is set in the event_info | 617 | * only be physically enabled if the enable_for_run bit is set |
| 618 | * in the event_info. | ||
| 531 | */ | 619 | */ |
| 532 | (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE); | 620 | (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE); |
| 533 | 621 | return (AE_OK); | |
| 534 | return_VOID; | ||
| 535 | } | 622 | } |
| 536 | 623 | ||
| 624 | |||
| 537 | /******************************************************************************* | 625 | /******************************************************************************* |
| 538 | * | 626 | * |
| 539 | * FUNCTION: acpi_ev_gpe_dispatch | 627 | * FUNCTION: acpi_ev_gpe_dispatch |
| 540 | * | 628 | * |
| 541 | * PARAMETERS: gpe_event_info - Info for this GPE | 629 | * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 |
| 630 | * gpe_event_info - Info for this GPE | ||
| 542 | * gpe_number - Number relative to the parent GPE block | 631 | * gpe_number - Number relative to the parent GPE block |
| 543 | * | 632 | * |
| 544 | * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED | 633 | * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED |
| @@ -551,13 +640,22 @@ static void acpi_ev_asynch_enable_gpe(void *context) | |||
| 551 | ******************************************************************************/ | 640 | ******************************************************************************/ |
| 552 | 641 | ||
| 553 | u32 | 642 | u32 |
| 554 | acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | 643 | acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, |
| 644 | struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | ||
| 555 | { | 645 | { |
| 556 | acpi_status status; | 646 | acpi_status status; |
| 647 | u32 return_value; | ||
| 557 | 648 | ||
| 558 | ACPI_FUNCTION_TRACE(ev_gpe_dispatch); | 649 | ACPI_FUNCTION_TRACE(ev_gpe_dispatch); |
| 559 | 650 | ||
| 560 | acpi_os_gpe_count(gpe_number); | 651 | /* Invoke global event handler if present */ |
| 652 | |||
| 653 | acpi_gpe_count++; | ||
| 654 | if (acpi_gbl_global_event_handler) { | ||
| 655 | acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device, | ||
| 656 | gpe_number, | ||
| 657 | acpi_gbl_global_event_handler_context); | ||
| 658 | } | ||
| 561 | 659 | ||
| 562 | /* | 660 | /* |
| 563 | * If edge-triggered, clear the GPE status bit now. Note that | 661 | * If edge-triggered, clear the GPE status bit now. Note that |
| @@ -568,59 +666,55 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | |||
| 568 | status = acpi_hw_clear_gpe(gpe_event_info); | 666 | status = acpi_hw_clear_gpe(gpe_event_info); |
| 569 | if (ACPI_FAILURE(status)) { | 667 | if (ACPI_FAILURE(status)) { |
| 570 | ACPI_EXCEPTION((AE_INFO, status, | 668 | ACPI_EXCEPTION((AE_INFO, status, |
| 571 | "Unable to clear GPE[0x%2X]", | 669 | "Unable to clear GPE%02X", gpe_number)); |
| 572 | gpe_number)); | ||
| 573 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); | 670 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); |
| 574 | } | 671 | } |
| 575 | } | 672 | } |
| 576 | 673 | ||
| 577 | /* | 674 | /* |
| 578 | * Dispatch the GPE to either an installed handler, or the control method | 675 | * Always disable the GPE so that it does not keep firing before |
| 579 | * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke | 676 | * any asynchronous activity completes (either from the execution |
| 580 | * it and do not attempt to run the method. If there is neither a handler | 677 | * of a GPE method or an asynchronous GPE handler.) |
| 581 | * nor a method, we disable this GPE to prevent further such pointless | 678 | * |
| 582 | * events from firing. | 679 | * If there is no handler or method to run, just disable the |
| 680 | * GPE and leave it disabled permanently to prevent further such | ||
| 681 | * pointless events from firing. | ||
| 682 | */ | ||
| 683 | status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); | ||
| 684 | if (ACPI_FAILURE(status)) { | ||
| 685 | ACPI_EXCEPTION((AE_INFO, status, | ||
| 686 | "Unable to disable GPE%02X", gpe_number)); | ||
| 687 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); | ||
| 688 | } | ||
| 689 | |||
| 690 | /* | ||
| 691 | * Dispatch the GPE to either an installed handler or the control | ||
| 692 | * method associated with this GPE (_Lxx or _Exx). If a handler | ||
| 693 | * exists, we invoke it and do not attempt to run the method. | ||
| 694 | * If there is neither a handler nor a method, leave the GPE | ||
| 695 | * disabled. | ||
| 583 | */ | 696 | */ |
| 584 | switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { | 697 | switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { |
| 585 | case ACPI_GPE_DISPATCH_HANDLER: | 698 | case ACPI_GPE_DISPATCH_HANDLER: |
| 586 | 699 | ||
| 587 | /* | 700 | /* Invoke the installed handler (at interrupt level) */ |
| 588 | * Invoke the installed handler (at interrupt level) | ||
| 589 | * Ignore return status for now. | ||
| 590 | * TBD: leave GPE disabled on error? | ||
| 591 | */ | ||
| 592 | (void)gpe_event_info->dispatch.handler->address(gpe_event_info-> | ||
| 593 | dispatch. | ||
| 594 | handler-> | ||
| 595 | context); | ||
| 596 | 701 | ||
| 597 | /* It is now safe to clear level-triggered events. */ | 702 | return_value = |
| 703 | gpe_event_info->dispatch.handler->address(gpe_device, | ||
| 704 | gpe_number, | ||
| 705 | gpe_event_info-> | ||
| 706 | dispatch.handler-> | ||
| 707 | context); | ||
| 598 | 708 | ||
| 599 | if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == | 709 | /* If requested, clear (if level-triggered) and reenable the GPE */ |
| 600 | ACPI_GPE_LEVEL_TRIGGERED) { | 710 | |
| 601 | status = acpi_hw_clear_gpe(gpe_event_info); | 711 | if (return_value & ACPI_REENABLE_GPE) { |
| 602 | if (ACPI_FAILURE(status)) { | 712 | (void)acpi_ev_finish_gpe(gpe_event_info); |
| 603 | ACPI_EXCEPTION((AE_INFO, status, | ||
| 604 | "Unable to clear GPE[0x%2X]", | ||
| 605 | gpe_number)); | ||
| 606 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); | ||
| 607 | } | ||
| 608 | } | 713 | } |
| 609 | break; | 714 | break; |
| 610 | 715 | ||
| 611 | case ACPI_GPE_DISPATCH_METHOD: | 716 | case ACPI_GPE_DISPATCH_METHOD: |
| 612 | 717 | case ACPI_GPE_DISPATCH_NOTIFY: | |
| 613 | /* | ||
| 614 | * Disable the GPE, so it doesn't keep firing before the method has a | ||
| 615 | * chance to run (it runs asynchronously with interrupts enabled). | ||
| 616 | */ | ||
| 617 | status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); | ||
| 618 | if (ACPI_FAILURE(status)) { | ||
| 619 | ACPI_EXCEPTION((AE_INFO, status, | ||
| 620 | "Unable to disable GPE[0x%2X]", | ||
| 621 | gpe_number)); | ||
| 622 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); | ||
| 623 | } | ||
| 624 | 718 | ||
| 625 | /* | 719 | /* |
| 626 | * Execute the method associated with the GPE | 720 | * Execute the method associated with the GPE |
| @@ -631,7 +725,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | |||
| 631 | gpe_event_info); | 725 | gpe_event_info); |
| 632 | if (ACPI_FAILURE(status)) { | 726 | if (ACPI_FAILURE(status)) { |
| 633 | ACPI_EXCEPTION((AE_INFO, status, | 727 | ACPI_EXCEPTION((AE_INFO, status, |
| 634 | "Unable to queue handler for GPE[0x%2X] - event disabled", | 728 | "Unable to queue handler for GPE%2X - event disabled", |
| 635 | gpe_number)); | 729 | gpe_number)); |
| 636 | } | 730 | } |
| 637 | break; | 731 | break; |
| @@ -644,20 +738,9 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | |||
| 644 | * a GPE to be enabled if it has no handler or method. | 738 | * a GPE to be enabled if it has no handler or method. |
| 645 | */ | 739 | */ |
| 646 | ACPI_ERROR((AE_INFO, | 740 | ACPI_ERROR((AE_INFO, |
| 647 | "No handler or method for GPE[0x%2X], disabling event", | 741 | "No handler or method for GPE%02X, disabling event", |
| 648 | gpe_number)); | 742 | gpe_number)); |
| 649 | 743 | ||
| 650 | /* | ||
| 651 | * Disable the GPE. The GPE will remain disabled a handler | ||
| 652 | * is installed or ACPICA is restarted. | ||
| 653 | */ | ||
| 654 | status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); | ||
| 655 | if (ACPI_FAILURE(status)) { | ||
| 656 | ACPI_EXCEPTION((AE_INFO, status, | ||
| 657 | "Unable to disable GPE[0x%2X]", | ||
| 658 | gpe_number)); | ||
| 659 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); | ||
| 660 | } | ||
| 661 | break; | 744 | break; |
| 662 | } | 745 | } |
| 663 | 746 | ||
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index 020add3eee1c..9acb86958c09 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c | |||
| @@ -361,9 +361,9 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, | |||
| 361 | 361 | ||
| 362 | gpe_block->node = gpe_device; | 362 | gpe_block->node = gpe_device; |
| 363 | gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH); | 363 | gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH); |
| 364 | gpe_block->initialized = FALSE; | ||
| 364 | gpe_block->register_count = register_count; | 365 | gpe_block->register_count = register_count; |
| 365 | gpe_block->block_base_number = gpe_block_base_number; | 366 | gpe_block->block_base_number = gpe_block_base_number; |
| 366 | gpe_block->initialized = FALSE; | ||
| 367 | 367 | ||
| 368 | ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address, | 368 | ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address, |
| 369 | sizeof(struct acpi_generic_address)); | 369 | sizeof(struct acpi_generic_address)); |
| @@ -386,7 +386,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, | |||
| 386 | return_ACPI_STATUS(status); | 386 | return_ACPI_STATUS(status); |
| 387 | } | 387 | } |
| 388 | 388 | ||
| 389 | acpi_all_gpes_initialized = FALSE; | 389 | acpi_gbl_all_gpes_initialized = FALSE; |
| 390 | 390 | ||
| 391 | /* Find all GPE methods (_Lxx or_Exx) for this block */ | 391 | /* Find all GPE methods (_Lxx or_Exx) for this block */ |
| 392 | 392 | ||
| @@ -423,14 +423,12 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, | |||
| 423 | * | 423 | * |
| 424 | * FUNCTION: acpi_ev_initialize_gpe_block | 424 | * FUNCTION: acpi_ev_initialize_gpe_block |
| 425 | * | 425 | * |
| 426 | * PARAMETERS: gpe_device - Handle to the parent GPE block | 426 | * PARAMETERS: acpi_gpe_callback |
| 427 | * gpe_block - Gpe Block info | ||
| 428 | * | 427 | * |
| 429 | * RETURN: Status | 428 | * RETURN: Status |
| 430 | * | 429 | * |
| 431 | * DESCRIPTION: Initialize and enable a GPE block. First find and run any | 430 | * DESCRIPTION: Initialize and enable a GPE block. Enable GPEs that have |
| 432 | * _PRT methods associated with the block, then enable the | 431 | * associated methods. |
| 433 | * appropriate GPEs. | ||
| 434 | * Note: Assumes namespace is locked. | 432 | * Note: Assumes namespace is locked. |
| 435 | * | 433 | * |
| 436 | ******************************************************************************/ | 434 | ******************************************************************************/ |
| @@ -450,8 +448,8 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
| 450 | ACPI_FUNCTION_TRACE(ev_initialize_gpe_block); | 448 | ACPI_FUNCTION_TRACE(ev_initialize_gpe_block); |
| 451 | 449 | ||
| 452 | /* | 450 | /* |
| 453 | * Ignore a null GPE block (e.g., if no GPE block 1 exists) and | 451 | * Ignore a null GPE block (e.g., if no GPE block 1 exists), and |
| 454 | * GPE blocks that have been initialized already. | 452 | * any GPE blocks that have been initialized already. |
| 455 | */ | 453 | */ |
| 456 | if (!gpe_block || gpe_block->initialized) { | 454 | if (!gpe_block || gpe_block->initialized) { |
| 457 | return_ACPI_STATUS(AE_OK); | 455 | return_ACPI_STATUS(AE_OK); |
| @@ -459,8 +457,8 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
| 459 | 457 | ||
| 460 | /* | 458 | /* |
| 461 | * Enable all GPEs that have a corresponding method and have the | 459 | * Enable all GPEs that have a corresponding method and have the |
| 462 | * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block must | 460 | * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block |
| 463 | * be enabled via the acpi_enable_gpe() interface. | 461 | * must be enabled via the acpi_enable_gpe() interface. |
| 464 | */ | 462 | */ |
| 465 | gpe_enabled_count = 0; | 463 | gpe_enabled_count = 0; |
| 466 | 464 | ||
| @@ -472,14 +470,19 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
| 472 | gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j; | 470 | gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j; |
| 473 | gpe_event_info = &gpe_block->event_info[gpe_index]; | 471 | gpe_event_info = &gpe_block->event_info[gpe_index]; |
| 474 | 472 | ||
| 475 | /* Ignore GPEs that have no corresponding _Lxx/_Exx method */ | 473 | /* |
| 476 | 474 | * Ignore GPEs that have no corresponding _Lxx/_Exx method | |
| 477 | if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD) | 475 | * and GPEs that are used to wake the system |
| 476 | */ | ||
| 477 | if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | ||
| 478 | ACPI_GPE_DISPATCH_NONE) | ||
| 479 | || ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) | ||
| 480 | == ACPI_GPE_DISPATCH_HANDLER) | ||
| 478 | || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { | 481 | || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { |
| 479 | continue; | 482 | continue; |
| 480 | } | 483 | } |
| 481 | 484 | ||
| 482 | status = acpi_raw_enable_gpe(gpe_event_info); | 485 | status = acpi_ev_add_gpe_reference(gpe_event_info); |
| 483 | if (ACPI_FAILURE(status)) { | 486 | if (ACPI_FAILURE(status)) { |
| 484 | ACPI_EXCEPTION((AE_INFO, status, | 487 | ACPI_EXCEPTION((AE_INFO, status, |
| 485 | "Could not enable GPE 0x%02X", | 488 | "Could not enable GPE 0x%02X", |
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c index 4c8dea513b66..c59dc2340593 100644 --- a/drivers/acpi/acpica/evgpeinit.c +++ b/drivers/acpi/acpica/evgpeinit.c | |||
| @@ -45,11 +45,27 @@ | |||
| 45 | #include "accommon.h" | 45 | #include "accommon.h" |
| 46 | #include "acevents.h" | 46 | #include "acevents.h" |
| 47 | #include "acnamesp.h" | 47 | #include "acnamesp.h" |
| 48 | #include "acinterp.h" | ||
| 49 | 48 | ||
| 50 | #define _COMPONENT ACPI_EVENTS | 49 | #define _COMPONENT ACPI_EVENTS |
| 51 | ACPI_MODULE_NAME("evgpeinit") | 50 | ACPI_MODULE_NAME("evgpeinit") |
| 52 | 51 | ||
| 52 | /* | ||
| 53 | * Note: History of _PRW support in ACPICA | ||
| 54 | * | ||
| 55 | * Originally (2000 - 2010), the GPE initialization code performed a walk of | ||
| 56 | * the entire namespace to execute the _PRW methods and detect all GPEs | ||
| 57 | * capable of waking the system. | ||
| 58 | * | ||
| 59 | * As of 10/2010, the _PRW method execution has been removed since it is | ||
| 60 | * actually unnecessary. The host OS must in fact execute all _PRW methods | ||
| 61 | * in order to identify the device/power-resource dependencies. We now put | ||
| 62 | * the onus on the host OS to identify the wake GPEs as part of this process | ||
| 63 | * and to inform ACPICA of these GPEs via the acpi_setup_gpe_for_wake interface. This | ||
| 64 | * not only reduces the complexity of the ACPICA initialization code, but in | ||
| 65 | * some cases (on systems with very large namespaces) it should reduce the | ||
| 66 | * kernel boot time as well. | ||
| 67 | */ | ||
| 68 | |||
| 53 | /******************************************************************************* | 69 | /******************************************************************************* |
| 54 | * | 70 | * |
| 55 | * FUNCTION: acpi_ev_gpe_initialize | 71 | * FUNCTION: acpi_ev_gpe_initialize |
| @@ -222,7 +238,7 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id) | |||
| 222 | acpi_status status = AE_OK; | 238 | acpi_status status = AE_OK; |
| 223 | 239 | ||
| 224 | /* | 240 | /* |
| 225 | * 2) Find any _Lxx/_Exx GPE methods that have just been loaded. | 241 | * Find any _Lxx/_Exx GPE methods that have just been loaded. |
| 226 | * | 242 | * |
| 227 | * Any GPEs that correspond to new _Lxx/_Exx methods are immediately | 243 | * Any GPEs that correspond to new _Lxx/_Exx methods are immediately |
| 228 | * enabled. | 244 | * enabled. |
| @@ -235,9 +251,9 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id) | |||
| 235 | return; | 251 | return; |
| 236 | } | 252 | } |
| 237 | 253 | ||
| 254 | walk_info.count = 0; | ||
| 238 | walk_info.owner_id = table_owner_id; | 255 | walk_info.owner_id = table_owner_id; |
| 239 | walk_info.execute_by_owner_id = TRUE; | 256 | walk_info.execute_by_owner_id = TRUE; |
| 240 | walk_info.count = 0; | ||
| 241 | 257 | ||
| 242 | /* Walk the interrupt level descriptor list */ | 258 | /* Walk the interrupt level descriptor list */ |
| 243 | 259 | ||
| @@ -298,7 +314,7 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id) | |||
| 298 | * xx - is the GPE number [in HEX] | 314 | * xx - is the GPE number [in HEX] |
| 299 | * | 315 | * |
| 300 | * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods | 316 | * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods |
| 301 | * with that owner. | 317 | * with that owner. |
| 302 | * | 318 | * |
| 303 | ******************************************************************************/ | 319 | ******************************************************************************/ |
| 304 | 320 | ||
| @@ -415,6 +431,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle, | |||
| 415 | * Add the GPE information from above to the gpe_event_info block for | 431 | * Add the GPE information from above to the gpe_event_info block for |
| 416 | * use during dispatch of this GPE. | 432 | * use during dispatch of this GPE. |
| 417 | */ | 433 | */ |
| 434 | gpe_event_info->flags &= ~(ACPI_GPE_DISPATCH_MASK); | ||
| 418 | gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD); | 435 | gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD); |
| 419 | gpe_event_info->dispatch.method_node = method_node; | 436 | gpe_event_info->dispatch.method_node = method_node; |
| 420 | 437 | ||
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c index 19a0e513ea48..10e477494dcf 100644 --- a/drivers/acpi/acpica/evgpeutil.c +++ b/drivers/acpi/acpica/evgpeutil.c | |||
| @@ -154,6 +154,45 @@ u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info) | |||
| 154 | 154 | ||
| 155 | /******************************************************************************* | 155 | /******************************************************************************* |
| 156 | * | 156 | * |
| 157 | * FUNCTION: acpi_ev_get_gpe_device | ||
| 158 | * | ||
| 159 | * PARAMETERS: GPE_WALK_CALLBACK | ||
| 160 | * | ||
| 161 | * RETURN: Status | ||
| 162 | * | ||
| 163 | * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE | ||
| 164 | * block device. NULL if the GPE is one of the FADT-defined GPEs. | ||
| 165 | * | ||
| 166 | ******************************************************************************/ | ||
| 167 | |||
| 168 | acpi_status | ||
| 169 | acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | ||
| 170 | struct acpi_gpe_block_info *gpe_block, void *context) | ||
| 171 | { | ||
| 172 | struct acpi_gpe_device_info *info = context; | ||
| 173 | |||
| 174 | /* Increment Index by the number of GPEs in this block */ | ||
| 175 | |||
| 176 | info->next_block_base_index += gpe_block->gpe_count; | ||
| 177 | |||
| 178 | if (info->index < info->next_block_base_index) { | ||
| 179 | /* | ||
| 180 | * The GPE index is within this block, get the node. Leave the node | ||
| 181 | * NULL for the FADT-defined GPEs | ||
| 182 | */ | ||
| 183 | if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) { | ||
| 184 | info->gpe_device = gpe_block->node; | ||
| 185 | } | ||
| 186 | |||
| 187 | info->status = AE_OK; | ||
| 188 | return (AE_CTRL_END); | ||
| 189 | } | ||
| 190 | |||
| 191 | return (AE_OK); | ||
| 192 | } | ||
| 193 | |||
| 194 | /******************************************************************************* | ||
| 195 | * | ||
| 157 | * FUNCTION: acpi_ev_get_gpe_xrupt_block | 196 | * FUNCTION: acpi_ev_get_gpe_xrupt_block |
| 158 | * | 197 | * |
| 159 | * PARAMETERS: interrupt_number - Interrupt for a GPE block | 198 | * PARAMETERS: interrupt_number - Interrupt for a GPE block |
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c index fcaed9fb44ff..8e31bb5a973a 100644 --- a/drivers/acpi/acpica/evmisc.c +++ b/drivers/acpi/acpica/evmisc.c | |||
| @@ -284,41 +284,41 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context) | |||
| 284 | * RETURN: ACPI_INTERRUPT_HANDLED | 284 | * RETURN: ACPI_INTERRUPT_HANDLED |
| 285 | * | 285 | * |
| 286 | * DESCRIPTION: Invoked directly from the SCI handler when a global lock | 286 | * DESCRIPTION: Invoked directly from the SCI handler when a global lock |
| 287 | * release interrupt occurs. Attempt to acquire the global lock, | 287 | * release interrupt occurs. If there's a thread waiting for |
| 288 | * if successful, signal the thread waiting for the lock. | 288 | * the global lock, signal it. |
| 289 | * | 289 | * |
| 290 | * NOTE: Assumes that the semaphore can be signaled from interrupt level. If | 290 | * NOTE: Assumes that the semaphore can be signaled from interrupt level. If |
| 291 | * this is not possible for some reason, a separate thread will have to be | 291 | * this is not possible for some reason, a separate thread will have to be |
| 292 | * scheduled to do this. | 292 | * scheduled to do this. |
| 293 | * | 293 | * |
| 294 | ******************************************************************************/ | 294 | ******************************************************************************/ |
| 295 | static u8 acpi_ev_global_lock_pending; | ||
| 296 | static spinlock_t _acpi_ev_global_lock_pending_lock; | ||
| 297 | #define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock | ||
| 295 | 298 | ||
| 296 | static u32 acpi_ev_global_lock_handler(void *context) | 299 | static u32 acpi_ev_global_lock_handler(void *context) |
| 297 | { | 300 | { |
| 298 | u8 acquired = FALSE; | 301 | acpi_status status; |
| 302 | acpi_cpu_flags flags; | ||
| 299 | 303 | ||
| 300 | /* | 304 | flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock); |
| 301 | * Attempt to get the lock. | ||
| 302 | * | ||
| 303 | * If we don't get it now, it will be marked pending and we will | ||
| 304 | * take another interrupt when it becomes free. | ||
| 305 | */ | ||
| 306 | ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); | ||
| 307 | if (acquired) { | ||
| 308 | 305 | ||
| 309 | /* Got the lock, now wake all threads waiting for it */ | 306 | if (!acpi_ev_global_lock_pending) { |
| 307 | goto out; | ||
| 308 | } | ||
| 310 | 309 | ||
| 311 | acpi_gbl_global_lock_acquired = TRUE; | 310 | /* Send a unit to the semaphore */ |
| 312 | /* Send a unit to the semaphore */ | ||
| 313 | 311 | ||
| 314 | if (ACPI_FAILURE | 312 | status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1); |
| 315 | (acpi_os_signal_semaphore | 313 | if (ACPI_FAILURE(status)) { |
| 316 | (acpi_gbl_global_lock_semaphore, 1))) { | 314 | ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore")); |
| 317 | ACPI_ERROR((AE_INFO, | ||
| 318 | "Could not signal Global Lock semaphore")); | ||
| 319 | } | ||
| 320 | } | 315 | } |
| 321 | 316 | ||
| 317 | acpi_ev_global_lock_pending = FALSE; | ||
| 318 | |||
| 319 | out: | ||
| 320 | acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags); | ||
| 321 | |||
| 322 | return (ACPI_INTERRUPT_HANDLED); | 322 | return (ACPI_INTERRUPT_HANDLED); |
| 323 | } | 323 | } |
| 324 | 324 | ||
| @@ -415,6 +415,7 @@ static int acpi_ev_global_lock_acquired; | |||
| 415 | 415 | ||
| 416 | acpi_status acpi_ev_acquire_global_lock(u16 timeout) | 416 | acpi_status acpi_ev_acquire_global_lock(u16 timeout) |
| 417 | { | 417 | { |
| 418 | acpi_cpu_flags flags; | ||
| 418 | acpi_status status = AE_OK; | 419 | acpi_status status = AE_OK; |
| 419 | u8 acquired = FALSE; | 420 | u8 acquired = FALSE; |
| 420 | 421 | ||
| @@ -467,32 +468,47 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout) | |||
| 467 | return_ACPI_STATUS(AE_OK); | 468 | return_ACPI_STATUS(AE_OK); |
| 468 | } | 469 | } |
| 469 | 470 | ||
| 470 | /* Attempt to acquire the actual hardware lock */ | 471 | flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock); |
| 472 | |||
| 473 | do { | ||
| 474 | |||
| 475 | /* Attempt to acquire the actual hardware lock */ | ||
| 476 | |||
| 477 | ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); | ||
| 478 | if (acquired) { | ||
| 479 | acpi_gbl_global_lock_acquired = TRUE; | ||
| 480 | |||
| 481 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, | ||
| 482 | "Acquired hardware Global Lock\n")); | ||
| 483 | break; | ||
| 484 | } | ||
| 471 | 485 | ||
| 472 | ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); | 486 | acpi_ev_global_lock_pending = TRUE; |
| 473 | if (acquired) { | ||
| 474 | 487 | ||
| 475 | /* We got the lock */ | 488 | acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags); |
| 476 | 489 | ||
| 490 | /* | ||
| 491 | * Did not get the lock. The pending bit was set above, and we | ||
| 492 | * must wait until we get the global lock released interrupt. | ||
| 493 | */ | ||
| 477 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, | 494 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, |
| 478 | "Acquired hardware Global Lock\n")); | 495 | "Waiting for hardware Global Lock\n")); |
| 479 | 496 | ||
| 480 | acpi_gbl_global_lock_acquired = TRUE; | 497 | /* |
| 481 | return_ACPI_STATUS(AE_OK); | 498 | * Wait for handshake with the global lock interrupt handler. |
| 482 | } | 499 | * This interface releases the interpreter if we must wait. |
| 500 | */ | ||
| 501 | status = acpi_ex_system_wait_semaphore( | ||
| 502 | acpi_gbl_global_lock_semaphore, | ||
| 503 | ACPI_WAIT_FOREVER); | ||
| 483 | 504 | ||
| 484 | /* | 505 | flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock); |
| 485 | * Did not get the lock. The pending bit was set above, and we must now | ||
| 486 | * wait until we get the global lock released interrupt. | ||
| 487 | */ | ||
| 488 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n")); | ||
| 489 | 506 | ||
| 490 | /* | 507 | } while (ACPI_SUCCESS(status)); |
| 491 | * Wait for handshake with the global lock interrupt handler. | 508 | |
| 492 | * This interface releases the interpreter if we must wait. | 509 | acpi_ev_global_lock_pending = FALSE; |
| 493 | */ | 510 | |
| 494 | status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore, | 511 | acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags); |
| 495 | ACPI_WAIT_FOREVER); | ||
| 496 | 512 | ||
| 497 | return_ACPI_STATUS(status); | 513 | return_ACPI_STATUS(status); |
| 498 | } | 514 | } |
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index 36af222cac65..1226689bdb1b 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c | |||
| @@ -92,6 +92,57 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler) | |||
| 92 | 92 | ||
| 93 | ACPI_EXPORT_SYMBOL(acpi_install_exception_handler) | 93 | ACPI_EXPORT_SYMBOL(acpi_install_exception_handler) |
| 94 | #endif /* ACPI_FUTURE_USAGE */ | 94 | #endif /* ACPI_FUTURE_USAGE */ |
| 95 | |||
| 96 | /******************************************************************************* | ||
| 97 | * | ||
| 98 | * FUNCTION: acpi_install_global_event_handler | ||
| 99 | * | ||
| 100 | * PARAMETERS: Handler - Pointer to the global event handler function | ||
| 101 | * Context - Value passed to the handler on each event | ||
| 102 | * | ||
| 103 | * RETURN: Status | ||
| 104 | * | ||
| 105 | * DESCRIPTION: Saves the pointer to the handler function. The global handler | ||
| 106 | * is invoked upon each incoming GPE and Fixed Event. It is | ||
| 107 | * invoked at interrupt level at the time of the event dispatch. | ||
| 108 | * Can be used to update event counters, etc. | ||
| 109 | * | ||
| 110 | ******************************************************************************/ | ||
| 111 | acpi_status | ||
| 112 | acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context) | ||
| 113 | { | ||
| 114 | acpi_status status; | ||
| 115 | |||
| 116 | ACPI_FUNCTION_TRACE(acpi_install_global_event_handler); | ||
| 117 | |||
| 118 | /* Parameter validation */ | ||
| 119 | |||
| 120 | if (!handler) { | ||
| 121 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
| 122 | } | ||
| 123 | |||
| 124 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
| 125 | if (ACPI_FAILURE(status)) { | ||
| 126 | return_ACPI_STATUS(status); | ||
| 127 | } | ||
| 128 | |||
| 129 | /* Don't allow two handlers. */ | ||
| 130 | |||
| 131 | if (acpi_gbl_global_event_handler) { | ||
| 132 | status = AE_ALREADY_EXISTS; | ||
| 133 | goto cleanup; | ||
| 134 | } | ||
| 135 | |||
| 136 | acpi_gbl_global_event_handler = handler; | ||
| 137 | acpi_gbl_global_event_handler_context = context; | ||
| 138 | |||
| 139 | cleanup: | ||
| 140 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
| 141 | return_ACPI_STATUS(status); | ||
| 142 | } | ||
| 143 | |||
| 144 | ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler) | ||
| 145 | |||
| 95 | /******************************************************************************* | 146 | /******************************************************************************* |
| 96 | * | 147 | * |
| 97 | * FUNCTION: acpi_install_fixed_event_handler | 148 | * FUNCTION: acpi_install_fixed_event_handler |
| @@ -671,10 +722,10 @@ ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler) | |||
| 671 | acpi_status | 722 | acpi_status |
| 672 | acpi_install_gpe_handler(acpi_handle gpe_device, | 723 | acpi_install_gpe_handler(acpi_handle gpe_device, |
| 673 | u32 gpe_number, | 724 | u32 gpe_number, |
| 674 | u32 type, acpi_event_handler address, void *context) | 725 | u32 type, acpi_gpe_handler address, void *context) |
| 675 | { | 726 | { |
| 676 | struct acpi_gpe_event_info *gpe_event_info; | 727 | struct acpi_gpe_event_info *gpe_event_info; |
| 677 | struct acpi_handler_info *handler; | 728 | struct acpi_gpe_handler_info *handler; |
| 678 | acpi_status status; | 729 | acpi_status status; |
| 679 | acpi_cpu_flags flags; | 730 | acpi_cpu_flags flags; |
| 680 | 731 | ||
| @@ -693,7 +744,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device, | |||
| 693 | 744 | ||
| 694 | /* Allocate memory for the handler object */ | 745 | /* Allocate memory for the handler object */ |
| 695 | 746 | ||
| 696 | handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info)); | 747 | handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_handler_info)); |
| 697 | if (!handler) { | 748 | if (!handler) { |
| 698 | status = AE_NO_MEMORY; | 749 | status = AE_NO_MEMORY; |
| 699 | goto unlock_and_exit; | 750 | goto unlock_and_exit; |
| @@ -722,7 +773,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device, | |||
| 722 | handler->address = address; | 773 | handler->address = address; |
| 723 | handler->context = context; | 774 | handler->context = context; |
| 724 | handler->method_node = gpe_event_info->dispatch.method_node; | 775 | handler->method_node = gpe_event_info->dispatch.method_node; |
| 725 | handler->orig_flags = gpe_event_info->flags & | 776 | handler->original_flags = gpe_event_info->flags & |
| 726 | (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); | 777 | (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); |
| 727 | 778 | ||
| 728 | /* | 779 | /* |
| @@ -731,10 +782,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device, | |||
| 731 | * disabled now to avoid spurious execution of the handler. | 782 | * disabled now to avoid spurious execution of the handler. |
| 732 | */ | 783 | */ |
| 733 | 784 | ||
| 734 | if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD) | 785 | if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) |
| 735 | && gpe_event_info->runtime_count) { | 786 | && gpe_event_info->runtime_count) { |
| 736 | handler->orig_enabled = 1; | 787 | handler->originally_enabled = 1; |
| 737 | (void)acpi_raw_disable_gpe(gpe_event_info); | 788 | (void)acpi_ev_remove_gpe_reference(gpe_event_info); |
| 738 | } | 789 | } |
| 739 | 790 | ||
| 740 | /* Install the handler */ | 791 | /* Install the handler */ |
| @@ -777,10 +828,10 @@ ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler) | |||
| 777 | ******************************************************************************/ | 828 | ******************************************************************************/ |
| 778 | acpi_status | 829 | acpi_status |
| 779 | acpi_remove_gpe_handler(acpi_handle gpe_device, | 830 | acpi_remove_gpe_handler(acpi_handle gpe_device, |
| 780 | u32 gpe_number, acpi_event_handler address) | 831 | u32 gpe_number, acpi_gpe_handler address) |
| 781 | { | 832 | { |
| 782 | struct acpi_gpe_event_info *gpe_event_info; | 833 | struct acpi_gpe_event_info *gpe_event_info; |
| 783 | struct acpi_handler_info *handler; | 834 | struct acpi_gpe_handler_info *handler; |
| 784 | acpi_status status; | 835 | acpi_status status; |
| 785 | acpi_cpu_flags flags; | 836 | acpi_cpu_flags flags; |
| 786 | 837 | ||
| @@ -835,7 +886,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, | |||
| 835 | gpe_event_info->dispatch.method_node = handler->method_node; | 886 | gpe_event_info->dispatch.method_node = handler->method_node; |
| 836 | gpe_event_info->flags &= | 887 | gpe_event_info->flags &= |
| 837 | ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); | 888 | ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); |
| 838 | gpe_event_info->flags |= handler->orig_flags; | 889 | gpe_event_info->flags |= handler->original_flags; |
| 839 | 890 | ||
| 840 | /* | 891 | /* |
| 841 | * If the GPE was previously associated with a method and it was | 892 | * If the GPE was previously associated with a method and it was |
| @@ -843,9 +894,9 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, | |||
| 843 | * post-initialization configuration. | 894 | * post-initialization configuration. |
| 844 | */ | 895 | */ |
| 845 | 896 | ||
| 846 | if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD) | 897 | if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) |
| 847 | && handler->orig_enabled) | 898 | && handler->originally_enabled) |
| 848 | (void)acpi_raw_enable_gpe(gpe_event_info); | 899 | (void)acpi_ev_add_gpe_reference(gpe_event_info); |
| 849 | 900 | ||
| 850 | /* Now we can free the handler object */ | 901 | /* Now we can free the handler object */ |
| 851 | 902 | ||
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index a1dabe3fd8ae..90488c1e0f3d 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c | |||
| @@ -43,18 +43,11 @@ | |||
| 43 | 43 | ||
| 44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
| 45 | #include "accommon.h" | 45 | #include "accommon.h" |
| 46 | #include "acevents.h" | ||
| 47 | #include "acnamesp.h" | ||
| 48 | #include "actables.h" | 46 | #include "actables.h" |
| 49 | 47 | ||
| 50 | #define _COMPONENT ACPI_EVENTS | 48 | #define _COMPONENT ACPI_EVENTS |
| 51 | ACPI_MODULE_NAME("evxfevnt") | 49 | ACPI_MODULE_NAME("evxfevnt") |
| 52 | 50 | ||
| 53 | /* Local prototypes */ | ||
| 54 | static acpi_status | ||
| 55 | acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | ||
| 56 | struct acpi_gpe_block_info *gpe_block, void *context); | ||
| 57 | |||
| 58 | /******************************************************************************* | 51 | /******************************************************************************* |
| 59 | * | 52 | * |
| 60 | * FUNCTION: acpi_enable | 53 | * FUNCTION: acpi_enable |
| @@ -213,185 +206,6 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event) | |||
| 213 | 206 | ||
| 214 | /******************************************************************************* | 207 | /******************************************************************************* |
| 215 | * | 208 | * |
| 216 | * FUNCTION: acpi_gpe_wakeup | ||
| 217 | * | ||
| 218 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
| 219 | * gpe_number - GPE level within the GPE block | ||
| 220 | * Action - Enable or Disable | ||
| 221 | * | ||
| 222 | * RETURN: Status | ||
| 223 | * | ||
| 224 | * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit. | ||
| 225 | * | ||
| 226 | ******************************************************************************/ | ||
| 227 | acpi_status acpi_gpe_wakeup(acpi_handle gpe_device, u32 gpe_number, u8 action) | ||
| 228 | { | ||
| 229 | acpi_status status = AE_OK; | ||
| 230 | struct acpi_gpe_event_info *gpe_event_info; | ||
| 231 | struct acpi_gpe_register_info *gpe_register_info; | ||
| 232 | acpi_cpu_flags flags; | ||
| 233 | u32 register_bit; | ||
| 234 | |||
| 235 | ACPI_FUNCTION_TRACE(acpi_gpe_wakeup); | ||
| 236 | |||
| 237 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
| 238 | |||
| 239 | /* Ensure that we have a valid GPE number */ | ||
| 240 | |||
| 241 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
| 242 | if (!gpe_event_info || !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { | ||
| 243 | status = AE_BAD_PARAMETER; | ||
| 244 | goto unlock_and_exit; | ||
| 245 | } | ||
| 246 | |||
| 247 | gpe_register_info = gpe_event_info->register_info; | ||
| 248 | if (!gpe_register_info) { | ||
| 249 | status = AE_NOT_EXIST; | ||
| 250 | goto unlock_and_exit; | ||
| 251 | } | ||
| 252 | |||
| 253 | register_bit = | ||
| 254 | acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); | ||
| 255 | |||
| 256 | /* Perform the action */ | ||
| 257 | |||
| 258 | switch (action) { | ||
| 259 | case ACPI_GPE_ENABLE: | ||
| 260 | ACPI_SET_BIT(gpe_register_info->enable_for_wake, | ||
| 261 | (u8)register_bit); | ||
| 262 | break; | ||
| 263 | |||
| 264 | case ACPI_GPE_DISABLE: | ||
| 265 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, | ||
| 266 | (u8)register_bit); | ||
| 267 | break; | ||
| 268 | |||
| 269 | default: | ||
| 270 | ACPI_ERROR((AE_INFO, "%u, Invalid action", action)); | ||
| 271 | status = AE_BAD_PARAMETER; | ||
| 272 | break; | ||
| 273 | } | ||
| 274 | |||
| 275 | unlock_and_exit: | ||
| 276 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
| 277 | return_ACPI_STATUS(status); | ||
| 278 | } | ||
| 279 | |||
| 280 | ACPI_EXPORT_SYMBOL(acpi_gpe_wakeup) | ||
| 281 | |||
| 282 | /******************************************************************************* | ||
| 283 | * | ||
| 284 | * FUNCTION: acpi_enable_gpe | ||
| 285 | * | ||
| 286 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
| 287 | * gpe_number - GPE level within the GPE block | ||
| 288 | * | ||
| 289 | * RETURN: Status | ||
| 290 | * | ||
| 291 | * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is | ||
| 292 | * hardware-enabled. | ||
| 293 | * | ||
| 294 | ******************************************************************************/ | ||
| 295 | acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) | ||
| 296 | { | ||
| 297 | acpi_status status = AE_BAD_PARAMETER; | ||
| 298 | struct acpi_gpe_event_info *gpe_event_info; | ||
| 299 | acpi_cpu_flags flags; | ||
| 300 | |||
| 301 | ACPI_FUNCTION_TRACE(acpi_enable_gpe); | ||
| 302 | |||
| 303 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
| 304 | |||
| 305 | /* Ensure that we have a valid GPE number */ | ||
| 306 | |||
| 307 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
| 308 | if (gpe_event_info) { | ||
| 309 | status = acpi_raw_enable_gpe(gpe_event_info); | ||
| 310 | } | ||
| 311 | |||
| 312 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
| 313 | return_ACPI_STATUS(status); | ||
| 314 | } | ||
| 315 | ACPI_EXPORT_SYMBOL(acpi_enable_gpe) | ||
| 316 | |||
| 317 | /******************************************************************************* | ||
| 318 | * | ||
| 319 | * FUNCTION: acpi_disable_gpe | ||
| 320 | * | ||
| 321 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
| 322 | * gpe_number - GPE level within the GPE block | ||
| 323 | * | ||
| 324 | * RETURN: Status | ||
| 325 | * | ||
| 326 | * DESCRIPTION: Remove a reference to a GPE. When the last reference is | ||
| 327 | * removed, only then is the GPE disabled (for runtime GPEs), or | ||
| 328 | * the GPE mask bit disabled (for wake GPEs) | ||
| 329 | * | ||
| 330 | ******************************************************************************/ | ||
| 331 | acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) | ||
| 332 | { | ||
| 333 | acpi_status status = AE_BAD_PARAMETER; | ||
| 334 | struct acpi_gpe_event_info *gpe_event_info; | ||
| 335 | acpi_cpu_flags flags; | ||
| 336 | |||
| 337 | ACPI_FUNCTION_TRACE(acpi_disable_gpe); | ||
| 338 | |||
| 339 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
| 340 | |||
| 341 | /* Ensure that we have a valid GPE number */ | ||
| 342 | |||
| 343 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
| 344 | if (gpe_event_info) { | ||
| 345 | status = acpi_raw_disable_gpe(gpe_event_info) ; | ||
| 346 | } | ||
| 347 | |||
| 348 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
| 349 | return_ACPI_STATUS(status); | ||
| 350 | } | ||
| 351 | ACPI_EXPORT_SYMBOL(acpi_disable_gpe) | ||
| 352 | |||
| 353 | /******************************************************************************* | ||
| 354 | * | ||
| 355 | * FUNCTION: acpi_gpe_can_wake | ||
| 356 | * | ||
| 357 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
| 358 | * gpe_number - GPE level within the GPE block | ||
| 359 | * | ||
| 360 | * RETURN: Status | ||
| 361 | * | ||
| 362 | * DESCRIPTION: Set the ACPI_GPE_CAN_WAKE flag for the given GPE. If the GPE | ||
| 363 | * has a corresponding method and is currently enabled, disable it | ||
| 364 | * (GPEs with corresponding methods are enabled unconditionally | ||
| 365 | * during initialization, but GPEs that can wake up are expected | ||
| 366 | * to be initially disabled). | ||
| 367 | * | ||
| 368 | ******************************************************************************/ | ||
| 369 | acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number) | ||
| 370 | { | ||
| 371 | acpi_status status = AE_OK; | ||
| 372 | struct acpi_gpe_event_info *gpe_event_info; | ||
| 373 | acpi_cpu_flags flags; | ||
| 374 | |||
| 375 | ACPI_FUNCTION_TRACE(acpi_gpe_can_wake); | ||
| 376 | |||
| 377 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
| 378 | |||
| 379 | /* Ensure that we have a valid GPE number */ | ||
| 380 | |||
| 381 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
| 382 | if (gpe_event_info) { | ||
| 383 | gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; | ||
| 384 | } else { | ||
| 385 | status = AE_BAD_PARAMETER; | ||
| 386 | } | ||
| 387 | |||
| 388 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
| 389 | return_ACPI_STATUS(status); | ||
| 390 | } | ||
| 391 | ACPI_EXPORT_SYMBOL(acpi_gpe_can_wake) | ||
| 392 | |||
| 393 | /******************************************************************************* | ||
| 394 | * | ||
| 395 | * FUNCTION: acpi_disable_event | 209 | * FUNCTION: acpi_disable_event |
| 396 | * | 210 | * |
| 397 | * PARAMETERS: Event - The fixed eventto be enabled | 211 | * PARAMETERS: Event - The fixed eventto be enabled |
| @@ -483,44 +297,6 @@ ACPI_EXPORT_SYMBOL(acpi_clear_event) | |||
| 483 | 297 | ||
| 484 | /******************************************************************************* | 298 | /******************************************************************************* |
| 485 | * | 299 | * |
| 486 | * FUNCTION: acpi_clear_gpe | ||
| 487 | * | ||
| 488 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
| 489 | * gpe_number - GPE level within the GPE block | ||
| 490 | * | ||
| 491 | * RETURN: Status | ||
| 492 | * | ||
| 493 | * DESCRIPTION: Clear an ACPI event (general purpose) | ||
| 494 | * | ||
| 495 | ******************************************************************************/ | ||
| 496 | acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number) | ||
| 497 | { | ||
| 498 | acpi_status status = AE_OK; | ||
| 499 | struct acpi_gpe_event_info *gpe_event_info; | ||
| 500 | acpi_cpu_flags flags; | ||
| 501 | |||
| 502 | ACPI_FUNCTION_TRACE(acpi_clear_gpe); | ||
| 503 | |||
| 504 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
| 505 | |||
| 506 | /* Ensure that we have a valid GPE number */ | ||
| 507 | |||
| 508 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
| 509 | if (!gpe_event_info) { | ||
| 510 | status = AE_BAD_PARAMETER; | ||
| 511 | goto unlock_and_exit; | ||
| 512 | } | ||
| 513 | |||
| 514 | status = acpi_hw_clear_gpe(gpe_event_info); | ||
| 515 | |||
| 516 | unlock_and_exit: | ||
| 517 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
| 518 | return_ACPI_STATUS(status); | ||
| 519 | } | ||
| 520 | |||
| 521 | ACPI_EXPORT_SYMBOL(acpi_clear_gpe) | ||
| 522 | /******************************************************************************* | ||
| 523 | * | ||
| 524 | * FUNCTION: acpi_get_event_status | 300 | * FUNCTION: acpi_get_event_status |
| 525 | * | 301 | * |
| 526 | * PARAMETERS: Event - The fixed event | 302 | * PARAMETERS: Event - The fixed event |
| @@ -575,379 +351,3 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) | |||
| 575 | } | 351 | } |
| 576 | 352 | ||
| 577 | ACPI_EXPORT_SYMBOL(acpi_get_event_status) | 353 | ACPI_EXPORT_SYMBOL(acpi_get_event_status) |
| 578 | |||
| 579 | /******************************************************************************* | ||
| 580 | * | ||
| 581 | * FUNCTION: acpi_get_gpe_status | ||
| 582 | * | ||
| 583 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
| 584 | * gpe_number - GPE level within the GPE block | ||
| 585 | * event_status - Where the current status of the event will | ||
| 586 | * be returned | ||
| 587 | * | ||
| 588 | * RETURN: Status | ||
| 589 | * | ||
| 590 | * DESCRIPTION: Get status of an event (general purpose) | ||
| 591 | * | ||
| 592 | ******************************************************************************/ | ||
| 593 | acpi_status | ||
| 594 | acpi_get_gpe_status(acpi_handle gpe_device, | ||
| 595 | u32 gpe_number, acpi_event_status *event_status) | ||
| 596 | { | ||
| 597 | acpi_status status = AE_OK; | ||
| 598 | struct acpi_gpe_event_info *gpe_event_info; | ||
| 599 | acpi_cpu_flags flags; | ||
| 600 | |||
| 601 | ACPI_FUNCTION_TRACE(acpi_get_gpe_status); | ||
| 602 | |||
| 603 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
| 604 | |||
| 605 | /* Ensure that we have a valid GPE number */ | ||
| 606 | |||
| 607 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
| 608 | if (!gpe_event_info) { | ||
| 609 | status = AE_BAD_PARAMETER; | ||
| 610 | goto unlock_and_exit; | ||
| 611 | } | ||
| 612 | |||
| 613 | /* Obtain status on the requested GPE number */ | ||
| 614 | |||
| 615 | status = acpi_hw_get_gpe_status(gpe_event_info, event_status); | ||
| 616 | |||
| 617 | if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) | ||
| 618 | *event_status |= ACPI_EVENT_FLAG_HANDLE; | ||
| 619 | |||
| 620 | unlock_and_exit: | ||
| 621 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
| 622 | return_ACPI_STATUS(status); | ||
| 623 | } | ||
| 624 | |||
| 625 | ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) | ||
| 626 | /******************************************************************************* | ||
| 627 | * | ||
| 628 | * FUNCTION: acpi_install_gpe_block | ||
| 629 | * | ||
| 630 | * PARAMETERS: gpe_device - Handle to the parent GPE Block Device | ||
| 631 | * gpe_block_address - Address and space_iD | ||
| 632 | * register_count - Number of GPE register pairs in the block | ||
| 633 | * interrupt_number - H/W interrupt for the block | ||
| 634 | * | ||
| 635 | * RETURN: Status | ||
| 636 | * | ||
| 637 | * DESCRIPTION: Create and Install a block of GPE registers | ||
| 638 | * | ||
| 639 | ******************************************************************************/ | ||
| 640 | acpi_status | ||
| 641 | acpi_install_gpe_block(acpi_handle gpe_device, | ||
| 642 | struct acpi_generic_address *gpe_block_address, | ||
| 643 | u32 register_count, u32 interrupt_number) | ||
| 644 | { | ||
| 645 | acpi_status status = AE_OK; | ||
| 646 | union acpi_operand_object *obj_desc; | ||
| 647 | struct acpi_namespace_node *node; | ||
| 648 | struct acpi_gpe_block_info *gpe_block; | ||
| 649 | |||
| 650 | ACPI_FUNCTION_TRACE(acpi_install_gpe_block); | ||
| 651 | |||
| 652 | if ((!gpe_device) || (!gpe_block_address) || (!register_count)) { | ||
| 653 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
| 654 | } | ||
| 655 | |||
| 656 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
| 657 | if (ACPI_FAILURE(status)) { | ||
| 658 | return (status); | ||
| 659 | } | ||
| 660 | |||
| 661 | node = acpi_ns_validate_handle(gpe_device); | ||
| 662 | if (!node) { | ||
| 663 | status = AE_BAD_PARAMETER; | ||
| 664 | goto unlock_and_exit; | ||
| 665 | } | ||
| 666 | |||
| 667 | /* | ||
| 668 | * For user-installed GPE Block Devices, the gpe_block_base_number | ||
| 669 | * is always zero | ||
| 670 | */ | ||
| 671 | status = | ||
| 672 | acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0, | ||
| 673 | interrupt_number, &gpe_block); | ||
| 674 | if (ACPI_FAILURE(status)) { | ||
| 675 | goto unlock_and_exit; | ||
| 676 | } | ||
| 677 | |||
| 678 | /* Install block in the device_object attached to the node */ | ||
| 679 | |||
| 680 | obj_desc = acpi_ns_get_attached_object(node); | ||
| 681 | if (!obj_desc) { | ||
| 682 | |||
| 683 | /* | ||
| 684 | * No object, create a new one (Device nodes do not always have | ||
| 685 | * an attached object) | ||
| 686 | */ | ||
| 687 | obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE); | ||
| 688 | if (!obj_desc) { | ||
| 689 | status = AE_NO_MEMORY; | ||
| 690 | goto unlock_and_exit; | ||
| 691 | } | ||
| 692 | |||
| 693 | status = | ||
| 694 | acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE); | ||
| 695 | |||
| 696 | /* Remove local reference to the object */ | ||
| 697 | |||
| 698 | acpi_ut_remove_reference(obj_desc); | ||
| 699 | |||
| 700 | if (ACPI_FAILURE(status)) { | ||
| 701 | goto unlock_and_exit; | ||
| 702 | } | ||
| 703 | } | ||
| 704 | |||
| 705 | /* Now install the GPE block in the device_object */ | ||
| 706 | |||
| 707 | obj_desc->device.gpe_block = gpe_block; | ||
| 708 | |||
| 709 | unlock_and_exit: | ||
| 710 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
| 711 | return_ACPI_STATUS(status); | ||
| 712 | } | ||
| 713 | |||
| 714 | ACPI_EXPORT_SYMBOL(acpi_install_gpe_block) | ||
| 715 | |||
| 716 | /******************************************************************************* | ||
| 717 | * | ||
| 718 | * FUNCTION: acpi_remove_gpe_block | ||
| 719 | * | ||
| 720 | * PARAMETERS: gpe_device - Handle to the parent GPE Block Device | ||
| 721 | * | ||
| 722 | * RETURN: Status | ||
| 723 | * | ||
| 724 | * DESCRIPTION: Remove a previously installed block of GPE registers | ||
| 725 | * | ||
| 726 | ******************************************************************************/ | ||
| 727 | acpi_status acpi_remove_gpe_block(acpi_handle gpe_device) | ||
| 728 | { | ||
| 729 | union acpi_operand_object *obj_desc; | ||
| 730 | acpi_status status; | ||
| 731 | struct acpi_namespace_node *node; | ||
| 732 | |||
| 733 | ACPI_FUNCTION_TRACE(acpi_remove_gpe_block); | ||
| 734 | |||
| 735 | if (!gpe_device) { | ||
| 736 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
| 737 | } | ||
| 738 | |||
| 739 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
| 740 | if (ACPI_FAILURE(status)) { | ||
| 741 | return (status); | ||
| 742 | } | ||
| 743 | |||
| 744 | node = acpi_ns_validate_handle(gpe_device); | ||
| 745 | if (!node) { | ||
| 746 | status = AE_BAD_PARAMETER; | ||
| 747 | goto unlock_and_exit; | ||
| 748 | } | ||
| 749 | |||
| 750 | /* Get the device_object attached to the node */ | ||
| 751 | |||
| 752 | obj_desc = acpi_ns_get_attached_object(node); | ||
| 753 | if (!obj_desc || !obj_desc->device.gpe_block) { | ||
| 754 | return_ACPI_STATUS(AE_NULL_OBJECT); | ||
| 755 | } | ||
| 756 | |||
| 757 | /* Delete the GPE block (but not the device_object) */ | ||
| 758 | |||
| 759 | status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block); | ||
| 760 | if (ACPI_SUCCESS(status)) { | ||
| 761 | obj_desc->device.gpe_block = NULL; | ||
| 762 | } | ||
| 763 | |||
| 764 | unlock_and_exit: | ||
| 765 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
| 766 | return_ACPI_STATUS(status); | ||
| 767 | } | ||
| 768 | |||
| 769 | ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block) | ||
| 770 | |||
| 771 | /******************************************************************************* | ||
| 772 | * | ||
| 773 | * FUNCTION: acpi_get_gpe_device | ||
| 774 | * | ||
| 775 | * PARAMETERS: Index - System GPE index (0-current_gpe_count) | ||
| 776 | * gpe_device - Where the parent GPE Device is returned | ||
| 777 | * | ||
| 778 | * RETURN: Status | ||
| 779 | * | ||
| 780 | * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL | ||
| 781 | * gpe device indicates that the gpe number is contained in one of | ||
| 782 | * the FADT-defined gpe blocks. Otherwise, the GPE block device. | ||
| 783 | * | ||
| 784 | ******************************************************************************/ | ||
| 785 | acpi_status | ||
| 786 | acpi_get_gpe_device(u32 index, acpi_handle *gpe_device) | ||
| 787 | { | ||
| 788 | struct acpi_gpe_device_info info; | ||
| 789 | acpi_status status; | ||
| 790 | |||
| 791 | ACPI_FUNCTION_TRACE(acpi_get_gpe_device); | ||
| 792 | |||
| 793 | if (!gpe_device) { | ||
| 794 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
| 795 | } | ||
| 796 | |||
| 797 | if (index >= acpi_current_gpe_count) { | ||
| 798 | return_ACPI_STATUS(AE_NOT_EXIST); | ||
| 799 | } | ||
| 800 | |||
| 801 | /* Setup and walk the GPE list */ | ||
| 802 | |||
| 803 | info.index = index; | ||
| 804 | info.status = AE_NOT_EXIST; | ||
| 805 | info.gpe_device = NULL; | ||
| 806 | info.next_block_base_index = 0; | ||
| 807 | |||
| 808 | status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info); | ||
| 809 | if (ACPI_FAILURE(status)) { | ||
| 810 | return_ACPI_STATUS(status); | ||
| 811 | } | ||
| 812 | |||
| 813 | *gpe_device = info.gpe_device; | ||
| 814 | return_ACPI_STATUS(info.status); | ||
| 815 | } | ||
| 816 | |||
| 817 | ACPI_EXPORT_SYMBOL(acpi_get_gpe_device) | ||
| 818 | |||
| 819 | /******************************************************************************* | ||
| 820 | * | ||
| 821 | * FUNCTION: acpi_ev_get_gpe_device | ||
| 822 | * | ||
| 823 | * PARAMETERS: GPE_WALK_CALLBACK | ||
| 824 | * | ||
| 825 | * RETURN: Status | ||
| 826 | * | ||
| 827 | * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE | ||
| 828 | * block device. NULL if the GPE is one of the FADT-defined GPEs. | ||
| 829 | * | ||
| 830 | ******************************************************************************/ | ||
| 831 | static acpi_status | ||
| 832 | acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | ||
| 833 | struct acpi_gpe_block_info *gpe_block, void *context) | ||
| 834 | { | ||
| 835 | struct acpi_gpe_device_info *info = context; | ||
| 836 | |||
| 837 | /* Increment Index by the number of GPEs in this block */ | ||
| 838 | |||
| 839 | info->next_block_base_index += gpe_block->gpe_count; | ||
| 840 | |||
| 841 | if (info->index < info->next_block_base_index) { | ||
| 842 | /* | ||
| 843 | * The GPE index is within this block, get the node. Leave the node | ||
| 844 | * NULL for the FADT-defined GPEs | ||
| 845 | */ | ||
| 846 | if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) { | ||
| 847 | info->gpe_device = gpe_block->node; | ||
| 848 | } | ||
| 849 | |||
| 850 | info->status = AE_OK; | ||
| 851 | return (AE_CTRL_END); | ||
| 852 | } | ||
| 853 | |||
| 854 | return (AE_OK); | ||
| 855 | } | ||
| 856 | |||
| 857 | /****************************************************************************** | ||
| 858 | * | ||
| 859 | * FUNCTION: acpi_disable_all_gpes | ||
| 860 | * | ||
| 861 | * PARAMETERS: None | ||
| 862 | * | ||
| 863 | * RETURN: Status | ||
| 864 | * | ||
| 865 | * DESCRIPTION: Disable and clear all GPEs in all GPE blocks | ||
| 866 | * | ||
| 867 | ******************************************************************************/ | ||
| 868 | |||
| 869 | acpi_status acpi_disable_all_gpes(void) | ||
| 870 | { | ||
| 871 | acpi_status status; | ||
| 872 | |||
| 873 | ACPI_FUNCTION_TRACE(acpi_disable_all_gpes); | ||
| 874 | |||
| 875 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
| 876 | if (ACPI_FAILURE(status)) { | ||
| 877 | return_ACPI_STATUS(status); | ||
| 878 | } | ||
| 879 | |||
| 880 | status = acpi_hw_disable_all_gpes(); | ||
| 881 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
| 882 | |||
| 883 | return_ACPI_STATUS(status); | ||
| 884 | } | ||
| 885 | |||
| 886 | /****************************************************************************** | ||
| 887 | * | ||
| 888 | * FUNCTION: acpi_enable_all_runtime_gpes | ||
| 889 | * | ||
| 890 | * PARAMETERS: None | ||
| 891 | * | ||
| 892 | * RETURN: Status | ||
| 893 | * | ||
| 894 | * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks | ||
| 895 | * | ||
| 896 | ******************************************************************************/ | ||
| 897 | |||
| 898 | acpi_status acpi_enable_all_runtime_gpes(void) | ||
| 899 | { | ||
| 900 | acpi_status status; | ||
| 901 | |||
| 902 | ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes); | ||
| 903 | |||
| 904 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
| 905 | if (ACPI_FAILURE(status)) { | ||
| 906 | return_ACPI_STATUS(status); | ||
| 907 | } | ||
| 908 | |||
| 909 | status = acpi_hw_enable_all_runtime_gpes(); | ||
| 910 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
| 911 | |||
| 912 | return_ACPI_STATUS(status); | ||
| 913 | } | ||
| 914 | |||
| 915 | /****************************************************************************** | ||
| 916 | * | ||
| 917 | * FUNCTION: acpi_update_gpes | ||
| 918 | * | ||
| 919 | * PARAMETERS: None | ||
| 920 | * | ||
| 921 | * RETURN: None | ||
| 922 | * | ||
| 923 | * DESCRIPTION: Enable all GPEs that have associated _Lxx or _Exx methods and | ||
| 924 | * are not pointed to by any device _PRW methods indicating that | ||
| 925 | * these GPEs are generally intended for system or device wakeup | ||
| 926 | * (such GPEs have to be enabled directly when the devices whose | ||
| 927 | * _PRW methods point to them are set up for wakeup signaling). | ||
| 928 | * | ||
| 929 | ******************************************************************************/ | ||
| 930 | |||
| 931 | acpi_status acpi_update_gpes(void) | ||
| 932 | { | ||
| 933 | acpi_status status; | ||
| 934 | |||
| 935 | ACPI_FUNCTION_TRACE(acpi_update_gpes); | ||
| 936 | |||
| 937 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
| 938 | if (ACPI_FAILURE(status)) { | ||
| 939 | return_ACPI_STATUS(status); | ||
| 940 | } else if (acpi_all_gpes_initialized) { | ||
| 941 | goto unlock; | ||
| 942 | } | ||
| 943 | |||
| 944 | status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL); | ||
| 945 | if (ACPI_SUCCESS(status)) { | ||
| 946 | acpi_all_gpes_initialized = TRUE; | ||
| 947 | } | ||
| 948 | |||
| 949 | unlock: | ||
| 950 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
| 951 | |||
| 952 | return_ACPI_STATUS(status); | ||
| 953 | } | ||
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c new file mode 100644 index 000000000000..416845bc9c1f --- /dev/null +++ b/drivers/acpi/acpica/evxfgpe.c | |||
| @@ -0,0 +1,669 @@ | |||
| 1 | /****************************************************************************** | ||
| 2 | * | ||
| 3 | * Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs) | ||
| 4 | * | ||
| 5 | *****************************************************************************/ | ||
| 6 | |||
| 7 | /* | ||
| 8 | * Copyright (C) 2000 - 2010, Intel Corp. | ||
| 9 | * All rights reserved. | ||
| 10 | * | ||
| 11 | * Redistribution and use in source and binary forms, with or without | ||
| 12 | * modification, are permitted provided that the following conditions | ||
| 13 | * are met: | ||
| 14 | * 1. Redistributions of source code must retain the above copyright | ||
| 15 | * notice, this list of conditions, and the following disclaimer, | ||
| 16 | * without modification. | ||
| 17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
| 18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
| 19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
| 20 | * including a substantially similar Disclaimer requirement for further | ||
| 21 | * binary redistribution. | ||
| 22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
| 23 | * of any contributors may be used to endorse or promote products derived | ||
| 24 | * from this software without specific prior written permission. | ||
| 25 | * | ||
| 26 | * Alternatively, this software may be distributed under the terms of the | ||
| 27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
| 28 | * Software Foundation. | ||
| 29 | * | ||
| 30 | * NO WARRANTY | ||
| 31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
| 34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
| 36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
| 37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
| 38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
| 39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
| 40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 41 | * POSSIBILITY OF SUCH DAMAGES. | ||
| 42 | */ | ||
| 43 | |||
| 44 | #include <acpi/acpi.h> | ||
| 45 | #include "accommon.h" | ||
| 46 | #include "acevents.h" | ||
| 47 | #include "acnamesp.h" | ||
| 48 | |||
| 49 | #define _COMPONENT ACPI_EVENTS | ||
| 50 | ACPI_MODULE_NAME("evxfgpe") | ||
| 51 | |||
| 52 | /****************************************************************************** | ||
| 53 | * | ||
| 54 | * FUNCTION: acpi_update_all_gpes | ||
| 55 | * | ||
| 56 | * PARAMETERS: None | ||
| 57 | * | ||
| 58 | * RETURN: Status | ||
| 59 | * | ||
| 60 | * DESCRIPTION: Complete GPE initialization and enable all GPEs that have | ||
| 61 | * associated _Lxx or _Exx methods and are not pointed to by any | ||
| 62 | * device _PRW methods (this indicates that these GPEs are | ||
| 63 | * generally intended for system or device wakeup. Such GPEs | ||
| 64 | * have to be enabled directly when the devices whose _PRW | ||
| 65 | * methods point to them are set up for wakeup signaling.) | ||
| 66 | * | ||
| 67 | * NOTE: Should be called after any GPEs are added to the system. Primarily, | ||
| 68 | * after the system _PRW methods have been run, but also after a GPE Block | ||
| 69 | * Device has been added or if any new GPE methods have been added via a | ||
| 70 | * dynamic table load. | ||
| 71 | * | ||
| 72 | ******************************************************************************/ | ||
| 73 | |||
| 74 | acpi_status acpi_update_all_gpes(void) | ||
| 75 | { | ||
| 76 | acpi_status status; | ||
| 77 | |||
| 78 | ACPI_FUNCTION_TRACE(acpi_update_all_gpes); | ||
| 79 | |||
| 80 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
| 81 | if (ACPI_FAILURE(status)) { | ||
| 82 | return_ACPI_STATUS(status); | ||
| 83 | } | ||
| 84 | |||
| 85 | if (acpi_gbl_all_gpes_initialized) { | ||
| 86 | goto unlock_and_exit; | ||
| 87 | } | ||
| 88 | |||
| 89 | status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL); | ||
| 90 | if (ACPI_SUCCESS(status)) { | ||
| 91 | acpi_gbl_all_gpes_initialized = TRUE; | ||
| 92 | } | ||
| 93 | |||
| 94 | unlock_and_exit: | ||
| 95 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
| 96 | |||
| 97 | return_ACPI_STATUS(status); | ||
| 98 | } | ||
| 99 | |||
| 100 | ACPI_EXPORT_SYMBOL(acpi_update_all_gpes) | ||
| 101 | |||
| 102 | /******************************************************************************* | ||
| 103 | * | ||
| 104 | * FUNCTION: acpi_enable_gpe | ||
| 105 | * | ||
| 106 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
| 107 | * gpe_number - GPE level within the GPE block | ||
| 108 | * | ||
| 109 | * RETURN: Status | ||
| 110 | * | ||
| 111 | * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is | ||
| 112 | * hardware-enabled. | ||
| 113 | * | ||
| 114 | ******************************************************************************/ | ||
| 115 | |||
| 116 | acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) | ||
| 117 | { | ||
| 118 | acpi_status status = AE_BAD_PARAMETER; | ||
| 119 | struct acpi_gpe_event_info *gpe_event_info; | ||
| 120 | acpi_cpu_flags flags; | ||
| 121 | |||
| 122 | ACPI_FUNCTION_TRACE(acpi_enable_gpe); | ||
| 123 | |||
| 124 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
| 125 | |||
| 126 | /* Ensure that we have a valid GPE number */ | ||
| 127 | |||
| 128 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
| 129 | if (gpe_event_info) { | ||
| 130 | status = acpi_ev_add_gpe_reference(gpe_event_info); | ||
| 131 | } | ||
| 132 | |||
| 133 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
| 134 | return_ACPI_STATUS(status); | ||
| 135 | } | ||
| 136 | ACPI_EXPORT_SYMBOL(acpi_enable_gpe) | ||
| 137 | |||
| 138 | /******************************************************************************* | ||
| 139 | * | ||
| 140 | * FUNCTION: acpi_disable_gpe | ||
| 141 | * | ||
| 142 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
| 143 | * gpe_number - GPE level within the GPE block | ||
| 144 | * | ||
| 145 | * RETURN: Status | ||
| 146 | * | ||
| 147 | * DESCRIPTION: Remove a reference to a GPE. When the last reference is | ||
| 148 | * removed, only then is the GPE disabled (for runtime GPEs), or | ||
| 149 | * the GPE mask bit disabled (for wake GPEs) | ||
| 150 | * | ||
| 151 | ******************************************************************************/ | ||
| 152 | |||
| 153 | acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) | ||
| 154 | { | ||
| 155 | acpi_status status = AE_BAD_PARAMETER; | ||
| 156 | struct acpi_gpe_event_info *gpe_event_info; | ||
| 157 | acpi_cpu_flags flags; | ||
| 158 | |||
| 159 | ACPI_FUNCTION_TRACE(acpi_disable_gpe); | ||
| 160 | |||
| 161 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
| 162 | |||
| 163 | /* Ensure that we have a valid GPE number */ | ||
| 164 | |||
| 165 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
| 166 | if (gpe_event_info) { | ||
| 167 | status = acpi_ev_remove_gpe_reference(gpe_event_info) ; | ||
| 168 | } | ||
| 169 | |||
| 170 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
| 171 | return_ACPI_STATUS(status); | ||
| 172 | } | ||
| 173 | ACPI_EXPORT_SYMBOL(acpi_disable_gpe) | ||
| 174 | |||
| 175 | |||
| 176 | /******************************************************************************* | ||
| 177 | * | ||
| 178 | * FUNCTION: acpi_setup_gpe_for_wake | ||
| 179 | * | ||
| 180 | * PARAMETERS: wake_device - Device associated with the GPE (via _PRW) | ||
| 181 | * gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
| 182 | * gpe_number - GPE level within the GPE block | ||
| 183 | * | ||
| 184 | * RETURN: Status | ||
| 185 | * | ||
| 186 | * DESCRIPTION: Mark a GPE as having the ability to wake the system. This | ||
| 187 | * interface is intended to be used as the host executes the | ||
| 188 | * _PRW methods (Power Resources for Wake) in the system tables. | ||
| 189 | * Each _PRW appears under a Device Object (The wake_device), and | ||
| 190 | * contains the info for the wake GPE associated with the | ||
| 191 | * wake_device. | ||
| 192 | * | ||
| 193 | ******************************************************************************/ | ||
| 194 | acpi_status | ||
| 195 | acpi_setup_gpe_for_wake(acpi_handle wake_device, | ||
| 196 | acpi_handle gpe_device, u32 gpe_number) | ||
| 197 | { | ||
| 198 | acpi_status status = AE_BAD_PARAMETER; | ||
| 199 | struct acpi_gpe_event_info *gpe_event_info; | ||
| 200 | struct acpi_namespace_node *device_node; | ||
| 201 | acpi_cpu_flags flags; | ||
| 202 | |||
| 203 | ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake); | ||
| 204 | |||
| 205 | /* Parameter Validation */ | ||
| 206 | |||
| 207 | if (!wake_device) { | ||
| 208 | /* | ||
| 209 | * By forcing wake_device to be valid, we automatically enable the | ||
| 210 | * implicit notify feature on all hosts. | ||
| 211 | */ | ||
| 212 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
| 213 | } | ||
| 214 | |||
| 215 | /* Validate wake_device is of type Device */ | ||
| 216 | |||
| 217 | device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); | ||
| 218 | if (device_node->type != ACPI_TYPE_DEVICE) { | ||
| 219 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
| 220 | } | ||
| 221 | |||
| 222 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
| 223 | |||
| 224 | /* Ensure that we have a valid GPE number */ | ||
| 225 | |||
| 226 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
| 227 | if (gpe_event_info) { | ||
| 228 | /* | ||
| 229 | * If there is no method or handler for this GPE, then the | ||
| 230 | * wake_device will be notified whenever this GPE fires (aka | ||
| 231 | * "implicit notify") Note: The GPE is assumed to be | ||
| 232 | * level-triggered (for windows compatibility). | ||
| 233 | */ | ||
| 234 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | ||
| 235 | ACPI_GPE_DISPATCH_NONE) { | ||
| 236 | gpe_event_info->flags = | ||
| 237 | (ACPI_GPE_DISPATCH_NOTIFY | | ||
| 238 | ACPI_GPE_LEVEL_TRIGGERED); | ||
| 239 | gpe_event_info->dispatch.device_node = device_node; | ||
| 240 | } | ||
| 241 | |||
| 242 | gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; | ||
| 243 | status = AE_OK; | ||
| 244 | } | ||
| 245 | |||
| 246 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
| 247 | return_ACPI_STATUS(status); | ||
| 248 | } | ||
| 249 | ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake) | ||
| 250 | |||
| 251 | /******************************************************************************* | ||
| 252 | * | ||
| 253 | * FUNCTION: acpi_set_gpe_wake_mask | ||
| 254 | * | ||
| 255 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
| 256 | * gpe_number - GPE level within the GPE block | ||
| 257 | * Action - Enable or Disable | ||
| 258 | * | ||
| 259 | * RETURN: Status | ||
| 260 | * | ||
| 261 | * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit. The GPE must | ||
| 262 | * already be marked as a WAKE GPE. | ||
| 263 | * | ||
| 264 | ******************************************************************************/ | ||
| 265 | |||
| 266 | acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action) | ||
| 267 | { | ||
| 268 | acpi_status status = AE_OK; | ||
| 269 | struct acpi_gpe_event_info *gpe_event_info; | ||
| 270 | struct acpi_gpe_register_info *gpe_register_info; | ||
| 271 | acpi_cpu_flags flags; | ||
| 272 | u32 register_bit; | ||
| 273 | |||
| 274 | ACPI_FUNCTION_TRACE(acpi_set_gpe_wake_mask); | ||
| 275 | |||
| 276 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
| 277 | |||
| 278 | /* | ||
| 279 | * Ensure that we have a valid GPE number and that this GPE is in | ||
| 280 | * fact a wake GPE | ||
| 281 | */ | ||
| 282 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
| 283 | if (!gpe_event_info) { | ||
| 284 | status = AE_BAD_PARAMETER; | ||
| 285 | goto unlock_and_exit; | ||
| 286 | } | ||
| 287 | |||
| 288 | if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { | ||
| 289 | status = AE_TYPE; | ||
| 290 | goto unlock_and_exit; | ||
| 291 | } | ||
| 292 | |||
| 293 | gpe_register_info = gpe_event_info->register_info; | ||
| 294 | if (!gpe_register_info) { | ||
| 295 | status = AE_NOT_EXIST; | ||
| 296 | goto unlock_and_exit; | ||
| 297 | } | ||
| 298 | |||
| 299 | register_bit = | ||
| 300 | acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); | ||
| 301 | |||
| 302 | /* Perform the action */ | ||
| 303 | |||
| 304 | switch (action) { | ||
| 305 | case ACPI_GPE_ENABLE: | ||
| 306 | ACPI_SET_BIT(gpe_register_info->enable_for_wake, | ||
| 307 | (u8)register_bit); | ||
| 308 | break; | ||
| 309 | |||
| 310 | case ACPI_GPE_DISABLE: | ||
| 311 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, | ||
| 312 | (u8)register_bit); | ||
| 313 | break; | ||
| 314 | |||
| 315 | default: | ||
| 316 | ACPI_ERROR((AE_INFO, "%u, Invalid action", action)); | ||
| 317 | status = AE_BAD_PARAMETER; | ||
| 318 | break; | ||
| 319 | } | ||
| 320 | |||
| 321 | unlock_and_exit: | ||
| 322 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
| 323 | return_ACPI_STATUS(status); | ||
| 324 | } | ||
| 325 | |||
| 326 | ACPI_EXPORT_SYMBOL(acpi_set_gpe_wake_mask) | ||
| 327 | |||
| 328 | /******************************************************************************* | ||
| 329 | * | ||
| 330 | * FUNCTION: acpi_clear_gpe | ||
| 331 | * | ||
| 332 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
| 333 | * gpe_number - GPE level within the GPE block | ||
| 334 | * | ||
| 335 | * RETURN: Status | ||
| 336 | * | ||
| 337 | * DESCRIPTION: Clear an ACPI event (general purpose) | ||
| 338 | * | ||
| 339 | ******************************************************************************/ | ||
| 340 | acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number) | ||
| 341 | { | ||
| 342 | acpi_status status = AE_OK; | ||
| 343 | struct acpi_gpe_event_info *gpe_event_info; | ||
| 344 | acpi_cpu_flags flags; | ||
| 345 | |||
| 346 | ACPI_FUNCTION_TRACE(acpi_clear_gpe); | ||
| 347 | |||
| 348 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
| 349 | |||
| 350 | /* Ensure that we have a valid GPE number */ | ||
| 351 | |||
| 352 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
| 353 | if (!gpe_event_info) { | ||
| 354 | status = AE_BAD_PARAMETER; | ||
| 355 | goto unlock_and_exit; | ||
| 356 | } | ||
| 357 | |||
| 358 | status = acpi_hw_clear_gpe(gpe_event_info); | ||
| 359 | |||
| 360 | unlock_and_exit: | ||
| 361 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
| 362 | return_ACPI_STATUS(status); | ||
| 363 | } | ||
| 364 | |||
| 365 | ACPI_EXPORT_SYMBOL(acpi_clear_gpe) | ||
| 366 | |||
| 367 | /******************************************************************************* | ||
| 368 | * | ||
| 369 | * FUNCTION: acpi_get_gpe_status | ||
| 370 | * | ||
| 371 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
| 372 | * gpe_number - GPE level within the GPE block | ||
| 373 | * event_status - Where the current status of the event will | ||
| 374 | * be returned | ||
| 375 | * | ||
| 376 | * RETURN: Status | ||
| 377 | * | ||
| 378 | * DESCRIPTION: Get the current status of a GPE (signalled/not_signalled) | ||
| 379 | * | ||
| 380 | ******************************************************************************/ | ||
| 381 | acpi_status | ||
| 382 | acpi_get_gpe_status(acpi_handle gpe_device, | ||
| 383 | u32 gpe_number, acpi_event_status *event_status) | ||
| 384 | { | ||
| 385 | acpi_status status = AE_OK; | ||
| 386 | struct acpi_gpe_event_info *gpe_event_info; | ||
| 387 | acpi_cpu_flags flags; | ||
| 388 | |||
| 389 | ACPI_FUNCTION_TRACE(acpi_get_gpe_status); | ||
| 390 | |||
| 391 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
| 392 | |||
| 393 | /* Ensure that we have a valid GPE number */ | ||
| 394 | |||
| 395 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
| 396 | if (!gpe_event_info) { | ||
| 397 | status = AE_BAD_PARAMETER; | ||
| 398 | goto unlock_and_exit; | ||
| 399 | } | ||
| 400 | |||
| 401 | /* Obtain status on the requested GPE number */ | ||
| 402 | |||
| 403 | status = acpi_hw_get_gpe_status(gpe_event_info, event_status); | ||
| 404 | |||
| 405 | if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) | ||
| 406 | *event_status |= ACPI_EVENT_FLAG_HANDLE; | ||
| 407 | |||
| 408 | unlock_and_exit: | ||
| 409 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
| 410 | return_ACPI_STATUS(status); | ||
| 411 | } | ||
| 412 | |||
| 413 | ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) | ||
| 414 | |||
| 415 | /****************************************************************************** | ||
| 416 | * | ||
| 417 | * FUNCTION: acpi_disable_all_gpes | ||
| 418 | * | ||
| 419 | * PARAMETERS: None | ||
| 420 | * | ||
| 421 | * RETURN: Status | ||
| 422 | * | ||
| 423 | * DESCRIPTION: Disable and clear all GPEs in all GPE blocks | ||
| 424 | * | ||
| 425 | ******************************************************************************/ | ||
| 426 | |||
| 427 | acpi_status acpi_disable_all_gpes(void) | ||
| 428 | { | ||
| 429 | acpi_status status; | ||
| 430 | |||
| 431 | ACPI_FUNCTION_TRACE(acpi_disable_all_gpes); | ||
| 432 | |||
| 433 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
| 434 | if (ACPI_FAILURE(status)) { | ||
| 435 | return_ACPI_STATUS(status); | ||
| 436 | } | ||
| 437 | |||
| 438 | status = acpi_hw_disable_all_gpes(); | ||
| 439 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
| 440 | |||
| 441 | return_ACPI_STATUS(status); | ||
| 442 | } | ||
| 443 | |||
| 444 | ACPI_EXPORT_SYMBOL(acpi_disable_all_gpes) | ||
| 445 | |||
| 446 | /****************************************************************************** | ||
| 447 | * | ||
| 448 | * FUNCTION: acpi_enable_all_runtime_gpes | ||
| 449 | * | ||
| 450 | * PARAMETERS: None | ||
| 451 | * | ||
| 452 | * RETURN: Status | ||
| 453 | * | ||
| 454 | * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks | ||
| 455 | * | ||
| 456 | ******************************************************************************/ | ||
| 457 | |||
| 458 | acpi_status acpi_enable_all_runtime_gpes(void) | ||
| 459 | { | ||
| 460 | acpi_status status; | ||
| 461 | |||
| 462 | ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes); | ||
| 463 | |||
| 464 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
| 465 | if (ACPI_FAILURE(status)) { | ||
| 466 | return_ACPI_STATUS(status); | ||
| 467 | } | ||
| 468 | |||
| 469 | status = acpi_hw_enable_all_runtime_gpes(); | ||
| 470 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
| 471 | |||
| 472 | return_ACPI_STATUS(status); | ||
| 473 | } | ||
| 474 | |||
| 475 | ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes) | ||
| 476 | |||
| 477 | /******************************************************************************* | ||
| 478 | * | ||
| 479 | * FUNCTION: acpi_install_gpe_block | ||
| 480 | * | ||
| 481 | * PARAMETERS: gpe_device - Handle to the parent GPE Block Device | ||
| 482 | * gpe_block_address - Address and space_iD | ||
| 483 | * register_count - Number of GPE register pairs in the block | ||
| 484 | * interrupt_number - H/W interrupt for the block | ||
| 485 | * | ||
| 486 | * RETURN: Status | ||
| 487 | * | ||
| 488 | * DESCRIPTION: Create and Install a block of GPE registers. The GPEs are not | ||
| 489 | * enabled here. | ||
| 490 | * | ||
| 491 | ******************************************************************************/ | ||
| 492 | acpi_status | ||
| 493 | acpi_install_gpe_block(acpi_handle gpe_device, | ||
| 494 | struct acpi_generic_address *gpe_block_address, | ||
| 495 | u32 register_count, u32 interrupt_number) | ||
| 496 | { | ||
| 497 | acpi_status status; | ||
| 498 | union acpi_operand_object *obj_desc; | ||
| 499 | struct acpi_namespace_node *node; | ||
| 500 | struct acpi_gpe_block_info *gpe_block; | ||
| 501 | |||
| 502 | ACPI_FUNCTION_TRACE(acpi_install_gpe_block); | ||
| 503 | |||
| 504 | if ((!gpe_device) || (!gpe_block_address) || (!register_count)) { | ||
| 505 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
| 506 | } | ||
| 507 | |||
| 508 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
| 509 | if (ACPI_FAILURE(status)) { | ||
| 510 | return (status); | ||
| 511 | } | ||
| 512 | |||
| 513 | node = acpi_ns_validate_handle(gpe_device); | ||
| 514 | if (!node) { | ||
| 515 | status = AE_BAD_PARAMETER; | ||
| 516 | goto unlock_and_exit; | ||
| 517 | } | ||
| 518 | |||
| 519 | /* | ||
| 520 | * For user-installed GPE Block Devices, the gpe_block_base_number | ||
| 521 | * is always zero | ||
| 522 | */ | ||
| 523 | status = | ||
| 524 | acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0, | ||
| 525 | interrupt_number, &gpe_block); | ||
| 526 | if (ACPI_FAILURE(status)) { | ||
| 527 | goto unlock_and_exit; | ||
| 528 | } | ||
| 529 | |||
| 530 | /* Install block in the device_object attached to the node */ | ||
| 531 | |||
| 532 | obj_desc = acpi_ns_get_attached_object(node); | ||
| 533 | if (!obj_desc) { | ||
| 534 | |||
| 535 | /* | ||
| 536 | * No object, create a new one (Device nodes do not always have | ||
| 537 | * an attached object) | ||
| 538 | */ | ||
| 539 | obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE); | ||
| 540 | if (!obj_desc) { | ||
| 541 | status = AE_NO_MEMORY; | ||
| 542 | goto unlock_and_exit; | ||
| 543 | } | ||
| 544 | |||
| 545 | status = | ||
| 546 | acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE); | ||
| 547 | |||
| 548 | /* Remove local reference to the object */ | ||
| 549 | |||
| 550 | acpi_ut_remove_reference(obj_desc); | ||
| 551 | |||
| 552 | if (ACPI_FAILURE(status)) { | ||
| 553 | goto unlock_and_exit; | ||
| 554 | } | ||
| 555 | } | ||
| 556 | |||
| 557 | /* Now install the GPE block in the device_object */ | ||
| 558 | |||
| 559 | obj_desc->device.gpe_block = gpe_block; | ||
| 560 | |||
| 561 | unlock_and_exit: | ||
| 562 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
| 563 | return_ACPI_STATUS(status); | ||
| 564 | } | ||
| 565 | |||
| 566 | ACPI_EXPORT_SYMBOL(acpi_install_gpe_block) | ||
| 567 | |||
| 568 | /******************************************************************************* | ||
| 569 | * | ||
| 570 | * FUNCTION: acpi_remove_gpe_block | ||
| 571 | * | ||
| 572 | * PARAMETERS: gpe_device - Handle to the parent GPE Block Device | ||
| 573 | * | ||
| 574 | * RETURN: Status | ||
| 575 | * | ||
| 576 | * DESCRIPTION: Remove a previously installed block of GPE registers | ||
| 577 | * | ||
| 578 | ******************************************************************************/ | ||
| 579 | acpi_status acpi_remove_gpe_block(acpi_handle gpe_device) | ||
| 580 | { | ||
| 581 | union acpi_operand_object *obj_desc; | ||
| 582 | acpi_status status; | ||
| 583 | struct acpi_namespace_node *node; | ||
| 584 | |||
| 585 | ACPI_FUNCTION_TRACE(acpi_remove_gpe_block); | ||
| 586 | |||
| 587 | if (!gpe_device) { | ||
| 588 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
| 589 | } | ||
| 590 | |||
| 591 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
| 592 | if (ACPI_FAILURE(status)) { | ||
| 593 | return (status); | ||
| 594 | } | ||
| 595 | |||
| 596 | node = acpi_ns_validate_handle(gpe_device); | ||
| 597 | if (!node) { | ||
| 598 | status = AE_BAD_PARAMETER; | ||
| 599 | goto unlock_and_exit; | ||
| 600 | } | ||
| 601 | |||
| 602 | /* Get the device_object attached to the node */ | ||
| 603 | |||
| 604 | obj_desc = acpi_ns_get_attached_object(node); | ||
| 605 | if (!obj_desc || !obj_desc->device.gpe_block) { | ||
| 606 | return_ACPI_STATUS(AE_NULL_OBJECT); | ||
| 607 | } | ||
| 608 | |||
| 609 | /* Delete the GPE block (but not the device_object) */ | ||
| 610 | |||
| 611 | status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block); | ||
| 612 | if (ACPI_SUCCESS(status)) { | ||
| 613 | obj_desc->device.gpe_block = NULL; | ||
| 614 | } | ||
| 615 | |||
| 616 | unlock_and_exit: | ||
| 617 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
| 618 | return_ACPI_STATUS(status); | ||
| 619 | } | ||
| 620 | |||
| 621 | ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block) | ||
| 622 | |||
| 623 | /******************************************************************************* | ||
| 624 | * | ||
| 625 | * FUNCTION: acpi_get_gpe_device | ||
| 626 | * | ||
| 627 | * PARAMETERS: Index - System GPE index (0-current_gpe_count) | ||
| 628 | * gpe_device - Where the parent GPE Device is returned | ||
| 629 | * | ||
| 630 | * RETURN: Status | ||
| 631 | * | ||
| 632 | * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL | ||
| 633 | * gpe device indicates that the gpe number is contained in one of | ||
| 634 | * the FADT-defined gpe blocks. Otherwise, the GPE block device. | ||
| 635 | * | ||
| 636 | ******************************************************************************/ | ||
| 637 | acpi_status | ||
| 638 | acpi_get_gpe_device(u32 index, acpi_handle *gpe_device) | ||
| 639 | { | ||
| 640 | struct acpi_gpe_device_info info; | ||
| 641 | acpi_status status; | ||
| 642 | |||
| 643 | ACPI_FUNCTION_TRACE(acpi_get_gpe_device); | ||
| 644 | |||
| 645 | if (!gpe_device) { | ||
| 646 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
| 647 | } | ||
| 648 | |||
| 649 | if (index >= acpi_current_gpe_count) { | ||
| 650 | return_ACPI_STATUS(AE_NOT_EXIST); | ||
| 651 | } | ||
| 652 | |||
| 653 | /* Setup and walk the GPE list */ | ||
| 654 | |||
| 655 | info.index = index; | ||
| 656 | info.status = AE_NOT_EXIST; | ||
| 657 | info.gpe_device = NULL; | ||
| 658 | info.next_block_base_index = 0; | ||
| 659 | |||
| 660 | status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info); | ||
| 661 | if (ACPI_FAILURE(status)) { | ||
| 662 | return_ACPI_STATUS(status); | ||
| 663 | } | ||
| 664 | |||
| 665 | *gpe_device = ACPI_CAST_PTR(acpi_handle, info.gpe_device); | ||
| 666 | return_ACPI_STATUS(info.status); | ||
| 667 | } | ||
| 668 | |||
| 669 | ACPI_EXPORT_SYMBOL(acpi_get_gpe_device) | ||
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index 14750db2a1b8..85c3cbd4304d 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c | |||
| @@ -62,10 +62,10 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
| 62 | * PARAMETERS: gpe_event_info - Info block for the GPE | 62 | * PARAMETERS: gpe_event_info - Info block for the GPE |
| 63 | * gpe_register_info - Info block for the GPE register | 63 | * gpe_register_info - Info block for the GPE register |
| 64 | * | 64 | * |
| 65 | * RETURN: Status | 65 | * RETURN: Register mask with a one in the GPE bit position |
| 66 | * | 66 | * |
| 67 | * DESCRIPTION: Compute GPE enable mask with one bit corresponding to the given | 67 | * DESCRIPTION: Compute the register mask for this GPE. One bit is set in the |
| 68 | * GPE set. | 68 | * correct position for the input GPE. |
| 69 | * | 69 | * |
| 70 | ******************************************************************************/ | 70 | ******************************************************************************/ |
| 71 | 71 | ||
| @@ -85,12 +85,12 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info, | |||
| 85 | * | 85 | * |
| 86 | * RETURN: Status | 86 | * RETURN: Status |
| 87 | * | 87 | * |
| 88 | * DESCRIPTION: Enable or disable a single GPE in its enable register. | 88 | * DESCRIPTION: Enable or disable a single GPE in the parent enable register. |
| 89 | * | 89 | * |
| 90 | ******************************************************************************/ | 90 | ******************************************************************************/ |
| 91 | 91 | ||
| 92 | acpi_status | 92 | acpi_status |
| 93 | acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action) | 93 | acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action) |
| 94 | { | 94 | { |
| 95 | struct acpi_gpe_register_info *gpe_register_info; | 95 | struct acpi_gpe_register_info *gpe_register_info; |
| 96 | acpi_status status; | 96 | acpi_status status; |
| @@ -113,14 +113,20 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action) | |||
| 113 | return (status); | 113 | return (status); |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | /* Set ot clear just the bit that corresponds to this GPE */ | 116 | /* Set or clear just the bit that corresponds to this GPE */ |
| 117 | 117 | ||
| 118 | register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, | 118 | register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, |
| 119 | gpe_register_info); | 119 | gpe_register_info); |
| 120 | switch (action) { | 120 | switch (action) { |
| 121 | case ACPI_GPE_COND_ENABLE: | 121 | case ACPI_GPE_CONDITIONAL_ENABLE: |
| 122 | if (!(register_bit & gpe_register_info->enable_for_run)) | 122 | |
| 123 | /* Only enable if the enable_for_run bit is set */ | ||
| 124 | |||
| 125 | if (!(register_bit & gpe_register_info->enable_for_run)) { | ||
| 123 | return (AE_BAD_PARAMETER); | 126 | return (AE_BAD_PARAMETER); |
| 127 | } | ||
| 128 | |||
| 129 | /*lint -fallthrough */ | ||
| 124 | 130 | ||
| 125 | case ACPI_GPE_ENABLE: | 131 | case ACPI_GPE_ENABLE: |
| 126 | ACPI_SET_BIT(enable_mask, register_bit); | 132 | ACPI_SET_BIT(enable_mask, register_bit); |
| @@ -131,7 +137,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action) | |||
| 131 | break; | 137 | break; |
| 132 | 138 | ||
| 133 | default: | 139 | default: |
| 134 | ACPI_ERROR((AE_INFO, "Invalid action\n")); | 140 | ACPI_ERROR((AE_INFO, "Invalid GPE Action, %u\n", action)); |
| 135 | return (AE_BAD_PARAMETER); | 141 | return (AE_BAD_PARAMETER); |
| 136 | } | 142 | } |
| 137 | 143 | ||
| @@ -168,13 +174,13 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info) | |||
| 168 | return (AE_NOT_EXIST); | 174 | return (AE_NOT_EXIST); |
| 169 | } | 175 | } |
| 170 | 176 | ||
| 171 | register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, | ||
| 172 | gpe_register_info); | ||
| 173 | |||
| 174 | /* | 177 | /* |
| 175 | * Write a one to the appropriate bit in the status register to | 178 | * Write a one to the appropriate bit in the status register to |
| 176 | * clear this GPE. | 179 | * clear this GPE. |
| 177 | */ | 180 | */ |
| 181 | register_bit = | ||
| 182 | acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); | ||
| 183 | |||
| 178 | status = acpi_hw_write(register_bit, | 184 | status = acpi_hw_write(register_bit, |
| 179 | &gpe_register_info->status_address); | 185 | &gpe_register_info->status_address); |
| 180 | 186 | ||
| @@ -201,8 +207,8 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, | |||
| 201 | u32 in_byte; | 207 | u32 in_byte; |
| 202 | u32 register_bit; | 208 | u32 register_bit; |
| 203 | struct acpi_gpe_register_info *gpe_register_info; | 209 | struct acpi_gpe_register_info *gpe_register_info; |
| 204 | acpi_status status; | ||
| 205 | acpi_event_status local_event_status = 0; | 210 | acpi_event_status local_event_status = 0; |
| 211 | acpi_status status; | ||
| 206 | 212 | ||
| 207 | ACPI_FUNCTION_ENTRY(); | 213 | ACPI_FUNCTION_ENTRY(); |
| 208 | 214 | ||
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c index e87bc6760be6..508537f884ac 100644 --- a/drivers/acpi/acpica/utglobal.c +++ b/drivers/acpi/acpica/utglobal.c | |||
| @@ -768,7 +768,7 @@ acpi_status acpi_ut_init_globals(void) | |||
| 768 | acpi_gbl_gpe_fadt_blocks[0] = NULL; | 768 | acpi_gbl_gpe_fadt_blocks[0] = NULL; |
| 769 | acpi_gbl_gpe_fadt_blocks[1] = NULL; | 769 | acpi_gbl_gpe_fadt_blocks[1] = NULL; |
| 770 | acpi_current_gpe_count = 0; | 770 | acpi_current_gpe_count = 0; |
| 771 | acpi_all_gpes_initialized = FALSE; | 771 | acpi_gbl_all_gpes_initialized = FALSE; |
| 772 | 772 | ||
| 773 | /* Global handlers */ | 773 | /* Global handlers */ |
| 774 | 774 | ||
| @@ -778,6 +778,7 @@ acpi_status acpi_ut_init_globals(void) | |||
| 778 | acpi_gbl_init_handler = NULL; | 778 | acpi_gbl_init_handler = NULL; |
| 779 | acpi_gbl_table_handler = NULL; | 779 | acpi_gbl_table_handler = NULL; |
| 780 | acpi_gbl_interface_handler = NULL; | 780 | acpi_gbl_interface_handler = NULL; |
| 781 | acpi_gbl_global_event_handler = NULL; | ||
| 781 | 782 | ||
| 782 | /* Global Lock support */ | 783 | /* Global Lock support */ |
| 783 | 784 | ||
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h index 18df1e940276..ef0581f2094d 100644 --- a/drivers/acpi/apei/apei-internal.h +++ b/drivers/acpi/apei/apei-internal.h | |||
| @@ -109,6 +109,8 @@ static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus) | |||
| 109 | return sizeof(*estatus) + estatus->data_length; | 109 | return sizeof(*estatus) + estatus->data_length; |
| 110 | } | 110 | } |
| 111 | 111 | ||
| 112 | void apei_estatus_print(const char *pfx, | ||
| 113 | const struct acpi_hest_generic_status *estatus); | ||
| 112 | int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus); | 114 | int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus); |
| 113 | int apei_estatus_check(const struct acpi_hest_generic_status *estatus); | 115 | int apei_estatus_check(const struct acpi_hest_generic_status *estatus); |
| 114 | #endif | 116 | #endif |
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c index f4cf2fc4c8c1..31464a006d76 100644 --- a/drivers/acpi/apei/cper.c +++ b/drivers/acpi/apei/cper.c | |||
| @@ -46,6 +46,317 @@ u64 cper_next_record_id(void) | |||
| 46 | } | 46 | } |
| 47 | EXPORT_SYMBOL_GPL(cper_next_record_id); | 47 | EXPORT_SYMBOL_GPL(cper_next_record_id); |
| 48 | 48 | ||
| 49 | static const char *cper_severity_strs[] = { | ||
| 50 | "recoverable", | ||
| 51 | "fatal", | ||
| 52 | "corrected", | ||
| 53 | "info", | ||
| 54 | }; | ||
| 55 | |||
| 56 | static const char *cper_severity_str(unsigned int severity) | ||
| 57 | { | ||
| 58 | return severity < ARRAY_SIZE(cper_severity_strs) ? | ||
| 59 | cper_severity_strs[severity] : "unknown"; | ||
| 60 | } | ||
| 61 | |||
| 62 | /* | ||
| 63 | * cper_print_bits - print strings for set bits | ||
| 64 | * @pfx: prefix for each line, including log level and prefix string | ||
| 65 | * @bits: bit mask | ||
| 66 | * @strs: string array, indexed by bit position | ||
| 67 | * @strs_size: size of the string array: @strs | ||
| 68 | * | ||
| 69 | * For each set bit in @bits, print the corresponding string in @strs. | ||
| 70 | * If the output length is longer than 80, multiple line will be | ||
| 71 | * printed, with @pfx is printed at the beginning of each line. | ||
| 72 | */ | ||
| 73 | static void cper_print_bits(const char *pfx, unsigned int bits, | ||
| 74 | const char *strs[], unsigned int strs_size) | ||
| 75 | { | ||
| 76 | int i, len = 0; | ||
| 77 | const char *str; | ||
| 78 | char buf[84]; | ||
| 79 | |||
| 80 | for (i = 0; i < strs_size; i++) { | ||
| 81 | if (!(bits & (1U << i))) | ||
| 82 | continue; | ||
| 83 | str = strs[i]; | ||
| 84 | if (len && len + strlen(str) + 2 > 80) { | ||
| 85 | printk("%s\n", buf); | ||
| 86 | len = 0; | ||
| 87 | } | ||
| 88 | if (!len) | ||
| 89 | len = snprintf(buf, sizeof(buf), "%s%s", pfx, str); | ||
| 90 | else | ||
| 91 | len += snprintf(buf+len, sizeof(buf)-len, ", %s", str); | ||
| 92 | } | ||
| 93 | if (len) | ||
| 94 | printk("%s\n", buf); | ||
| 95 | } | ||
| 96 | |||
| 97 | static const char *cper_proc_type_strs[] = { | ||
| 98 | "IA32/X64", | ||
| 99 | "IA64", | ||
| 100 | }; | ||
| 101 | |||
| 102 | static const char *cper_proc_isa_strs[] = { | ||
| 103 | "IA32", | ||
| 104 | "IA64", | ||
| 105 | "X64", | ||
| 106 | }; | ||
| 107 | |||
| 108 | static const char *cper_proc_error_type_strs[] = { | ||
| 109 | "cache error", | ||
| 110 | "TLB error", | ||
| 111 | "bus error", | ||
| 112 | "micro-architectural error", | ||
| 113 | }; | ||
| 114 | |||
| 115 | static const char *cper_proc_op_strs[] = { | ||
| 116 | "unknown or generic", | ||
| 117 | "data read", | ||
| 118 | "data write", | ||
| 119 | "instruction execution", | ||
| 120 | }; | ||
| 121 | |||
| 122 | static const char *cper_proc_flag_strs[] = { | ||
| 123 | "restartable", | ||
| 124 | "precise IP", | ||
| 125 | "overflow", | ||
| 126 | "corrected", | ||
| 127 | }; | ||
| 128 | |||
| 129 | static void cper_print_proc_generic(const char *pfx, | ||
| 130 | const struct cper_sec_proc_generic *proc) | ||
| 131 | { | ||
| 132 | if (proc->validation_bits & CPER_PROC_VALID_TYPE) | ||
| 133 | printk("%s""processor_type: %d, %s\n", pfx, proc->proc_type, | ||
| 134 | proc->proc_type < ARRAY_SIZE(cper_proc_type_strs) ? | ||
| 135 | cper_proc_type_strs[proc->proc_type] : "unknown"); | ||
| 136 | if (proc->validation_bits & CPER_PROC_VALID_ISA) | ||
| 137 | printk("%s""processor_isa: %d, %s\n", pfx, proc->proc_isa, | ||
| 138 | proc->proc_isa < ARRAY_SIZE(cper_proc_isa_strs) ? | ||
| 139 | cper_proc_isa_strs[proc->proc_isa] : "unknown"); | ||
| 140 | if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) { | ||
| 141 | printk("%s""error_type: 0x%02x\n", pfx, proc->proc_error_type); | ||
| 142 | cper_print_bits(pfx, proc->proc_error_type, | ||
| 143 | cper_proc_error_type_strs, | ||
| 144 | ARRAY_SIZE(cper_proc_error_type_strs)); | ||
| 145 | } | ||
| 146 | if (proc->validation_bits & CPER_PROC_VALID_OPERATION) | ||
| 147 | printk("%s""operation: %d, %s\n", pfx, proc->operation, | ||
| 148 | proc->operation < ARRAY_SIZE(cper_proc_op_strs) ? | ||
| 149 | cper_proc_op_strs[proc->operation] : "unknown"); | ||
| 150 | if (proc->validation_bits & CPER_PROC_VALID_FLAGS) { | ||
| 151 | printk("%s""flags: 0x%02x\n", pfx, proc->flags); | ||
| 152 | cper_print_bits(pfx, proc->flags, cper_proc_flag_strs, | ||
| 153 | ARRAY_SIZE(cper_proc_flag_strs)); | ||
| 154 | } | ||
| 155 | if (proc->validation_bits & CPER_PROC_VALID_LEVEL) | ||
| 156 | printk("%s""level: %d\n", pfx, proc->level); | ||
| 157 | if (proc->validation_bits & CPER_PROC_VALID_VERSION) | ||
| 158 | printk("%s""version_info: 0x%016llx\n", pfx, proc->cpu_version); | ||
| 159 | if (proc->validation_bits & CPER_PROC_VALID_ID) | ||
| 160 | printk("%s""processor_id: 0x%016llx\n", pfx, proc->proc_id); | ||
| 161 | if (proc->validation_bits & CPER_PROC_VALID_TARGET_ADDRESS) | ||
| 162 | printk("%s""target_address: 0x%016llx\n", | ||
| 163 | pfx, proc->target_addr); | ||
| 164 | if (proc->validation_bits & CPER_PROC_VALID_REQUESTOR_ID) | ||
| 165 | printk("%s""requestor_id: 0x%016llx\n", | ||
| 166 | pfx, proc->requestor_id); | ||
| 167 | if (proc->validation_bits & CPER_PROC_VALID_RESPONDER_ID) | ||
| 168 | printk("%s""responder_id: 0x%016llx\n", | ||
| 169 | pfx, proc->responder_id); | ||
| 170 | if (proc->validation_bits & CPER_PROC_VALID_IP) | ||
| 171 | printk("%s""IP: 0x%016llx\n", pfx, proc->ip); | ||
| 172 | } | ||
| 173 | |||
| 174 | static const char *cper_mem_err_type_strs[] = { | ||
| 175 | "unknown", | ||
| 176 | "no error", | ||
| 177 | "single-bit ECC", | ||
| 178 | "multi-bit ECC", | ||
| 179 | "single-symbol chipkill ECC", | ||
| 180 | "multi-symbol chipkill ECC", | ||
| 181 | "master abort", | ||
| 182 | "target abort", | ||
| 183 | "parity error", | ||
| 184 | "watchdog timeout", | ||
| 185 | "invalid address", | ||
| 186 | "mirror Broken", | ||
| 187 | "memory sparing", | ||
| 188 | "scrub corrected error", | ||
| 189 | "scrub uncorrected error", | ||
| 190 | }; | ||
| 191 | |||
| 192 | static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem) | ||
| 193 | { | ||
| 194 | if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS) | ||
| 195 | printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status); | ||
| 196 | if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) | ||
| 197 | printk("%s""physical_address: 0x%016llx\n", | ||
| 198 | pfx, mem->physical_addr); | ||
| 199 | if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK) | ||
| 200 | printk("%s""physical_address_mask: 0x%016llx\n", | ||
| 201 | pfx, mem->physical_addr_mask); | ||
| 202 | if (mem->validation_bits & CPER_MEM_VALID_NODE) | ||
| 203 | printk("%s""node: %d\n", pfx, mem->node); | ||
| 204 | if (mem->validation_bits & CPER_MEM_VALID_CARD) | ||
| 205 | printk("%s""card: %d\n", pfx, mem->card); | ||
| 206 | if (mem->validation_bits & CPER_MEM_VALID_MODULE) | ||
| 207 | printk("%s""module: %d\n", pfx, mem->module); | ||
| 208 | if (mem->validation_bits & CPER_MEM_VALID_BANK) | ||
| 209 | printk("%s""bank: %d\n", pfx, mem->bank); | ||
| 210 | if (mem->validation_bits & CPER_MEM_VALID_DEVICE) | ||
| 211 | printk("%s""device: %d\n", pfx, mem->device); | ||
| 212 | if (mem->validation_bits & CPER_MEM_VALID_ROW) | ||
| 213 | printk("%s""row: %d\n", pfx, mem->row); | ||
| 214 | if (mem->validation_bits & CPER_MEM_VALID_COLUMN) | ||
| 215 | printk("%s""column: %d\n", pfx, mem->column); | ||
| 216 | if (mem->validation_bits & CPER_MEM_VALID_BIT_POSITION) | ||
| 217 | printk("%s""bit_position: %d\n", pfx, mem->bit_pos); | ||
| 218 | if (mem->validation_bits & CPER_MEM_VALID_REQUESTOR_ID) | ||
| 219 | printk("%s""requestor_id: 0x%016llx\n", pfx, mem->requestor_id); | ||
| 220 | if (mem->validation_bits & CPER_MEM_VALID_RESPONDER_ID) | ||
| 221 | printk("%s""responder_id: 0x%016llx\n", pfx, mem->responder_id); | ||
| 222 | if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID) | ||
| 223 | printk("%s""target_id: 0x%016llx\n", pfx, mem->target_id); | ||
| 224 | if (mem->validation_bits & CPER_MEM_VALID_ERROR_TYPE) { | ||
| 225 | u8 etype = mem->error_type; | ||
| 226 | printk("%s""error_type: %d, %s\n", pfx, etype, | ||
| 227 | etype < ARRAY_SIZE(cper_mem_err_type_strs) ? | ||
| 228 | cper_mem_err_type_strs[etype] : "unknown"); | ||
| 229 | } | ||
| 230 | } | ||
| 231 | |||
| 232 | static const char *cper_pcie_port_type_strs[] = { | ||
| 233 | "PCIe end point", | ||
| 234 | "legacy PCI end point", | ||
| 235 | "unknown", | ||
| 236 | "unknown", | ||
| 237 | "root port", | ||
| 238 | "upstream switch port", | ||
| 239 | "downstream switch port", | ||
| 240 | "PCIe to PCI/PCI-X bridge", | ||
| 241 | "PCI/PCI-X to PCIe bridge", | ||
| 242 | "root complex integrated endpoint device", | ||
| 243 | "root complex event collector", | ||
| 244 | }; | ||
| 245 | |||
| 246 | static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie) | ||
| 247 | { | ||
| 248 | if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE) | ||
| 249 | printk("%s""port_type: %d, %s\n", pfx, pcie->port_type, | ||
| 250 | pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ? | ||
| 251 | cper_pcie_port_type_strs[pcie->port_type] : "unknown"); | ||
| 252 | if (pcie->validation_bits & CPER_PCIE_VALID_VERSION) | ||
| 253 | printk("%s""version: %d.%d\n", pfx, | ||
| 254 | pcie->version.major, pcie->version.minor); | ||
| 255 | if (pcie->validation_bits & CPER_PCIE_VALID_COMMAND_STATUS) | ||
| 256 | printk("%s""command: 0x%04x, status: 0x%04x\n", pfx, | ||
| 257 | pcie->command, pcie->status); | ||
| 258 | if (pcie->validation_bits & CPER_PCIE_VALID_DEVICE_ID) { | ||
| 259 | const __u8 *p; | ||
| 260 | printk("%s""device_id: %04x:%02x:%02x.%x\n", pfx, | ||
| 261 | pcie->device_id.segment, pcie->device_id.bus, | ||
| 262 | pcie->device_id.device, pcie->device_id.function); | ||
| 263 | printk("%s""slot: %d\n", pfx, | ||
| 264 | pcie->device_id.slot >> CPER_PCIE_SLOT_SHIFT); | ||
| 265 | printk("%s""secondary_bus: 0x%02x\n", pfx, | ||
| 266 | pcie->device_id.secondary_bus); | ||
| 267 | printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx, | ||
| 268 | pcie->device_id.vendor_id, pcie->device_id.device_id); | ||
| 269 | p = pcie->device_id.class_code; | ||
| 270 | printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]); | ||
| 271 | } | ||
| 272 | if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER) | ||
| 273 | printk("%s""serial number: 0x%04x, 0x%04x\n", pfx, | ||
| 274 | pcie->serial_number.lower, pcie->serial_number.upper); | ||
| 275 | if (pcie->validation_bits & CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS) | ||
| 276 | printk( | ||
| 277 | "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n", | ||
| 278 | pfx, pcie->bridge.secondary_status, pcie->bridge.control); | ||
| 279 | } | ||
| 280 | |||
| 281 | static const char *apei_estatus_section_flag_strs[] = { | ||
| 282 | "primary", | ||
| 283 | "containment warning", | ||
| 284 | "reset", | ||
| 285 | "threshold exceeded", | ||
| 286 | "resource not accessible", | ||
| 287 | "latent error", | ||
| 288 | }; | ||
| 289 | |||
| 290 | static void apei_estatus_print_section( | ||
| 291 | const char *pfx, const struct acpi_hest_generic_data *gdata, int sec_no) | ||
| 292 | { | ||
| 293 | uuid_le *sec_type = (uuid_le *)gdata->section_type; | ||
| 294 | __u16 severity; | ||
| 295 | |||
| 296 | severity = gdata->error_severity; | ||
| 297 | printk("%s""section: %d, severity: %d, %s\n", pfx, sec_no, severity, | ||
| 298 | cper_severity_str(severity)); | ||
| 299 | printk("%s""flags: 0x%02x\n", pfx, gdata->flags); | ||
| 300 | cper_print_bits(pfx, gdata->flags, apei_estatus_section_flag_strs, | ||
| 301 | ARRAY_SIZE(apei_estatus_section_flag_strs)); | ||
| 302 | if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) | ||
| 303 | printk("%s""fru_id: %pUl\n", pfx, (uuid_le *)gdata->fru_id); | ||
| 304 | if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) | ||
| 305 | printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text); | ||
| 306 | |||
| 307 | if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_GENERIC)) { | ||
| 308 | struct cper_sec_proc_generic *proc_err = (void *)(gdata + 1); | ||
| 309 | printk("%s""section_type: general processor error\n", pfx); | ||
| 310 | if (gdata->error_data_length >= sizeof(*proc_err)) | ||
| 311 | cper_print_proc_generic(pfx, proc_err); | ||
| 312 | else | ||
| 313 | goto err_section_too_small; | ||
| 314 | } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) { | ||
| 315 | struct cper_sec_mem_err *mem_err = (void *)(gdata + 1); | ||
| 316 | printk("%s""section_type: memory error\n", pfx); | ||
| 317 | if (gdata->error_data_length >= sizeof(*mem_err)) | ||
| 318 | cper_print_mem(pfx, mem_err); | ||
| 319 | else | ||
| 320 | goto err_section_too_small; | ||
| 321 | } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) { | ||
| 322 | struct cper_sec_pcie *pcie = (void *)(gdata + 1); | ||
| 323 | printk("%s""section_type: PCIe error\n", pfx); | ||
| 324 | if (gdata->error_data_length >= sizeof(*pcie)) | ||
| 325 | cper_print_pcie(pfx, pcie); | ||
| 326 | else | ||
| 327 | goto err_section_too_small; | ||
| 328 | } else | ||
| 329 | printk("%s""section type: unknown, %pUl\n", pfx, sec_type); | ||
| 330 | |||
| 331 | return; | ||
| 332 | |||
| 333 | err_section_too_small: | ||
| 334 | pr_err(FW_WARN "error section length is too small\n"); | ||
| 335 | } | ||
| 336 | |||
| 337 | void apei_estatus_print(const char *pfx, | ||
| 338 | const struct acpi_hest_generic_status *estatus) | ||
| 339 | { | ||
| 340 | struct acpi_hest_generic_data *gdata; | ||
| 341 | unsigned int data_len, gedata_len; | ||
| 342 | int sec_no = 0; | ||
| 343 | __u16 severity; | ||
| 344 | |||
| 345 | printk("%s""APEI generic hardware error status\n", pfx); | ||
| 346 | severity = estatus->error_severity; | ||
| 347 | printk("%s""severity: %d, %s\n", pfx, severity, | ||
| 348 | cper_severity_str(severity)); | ||
| 349 | data_len = estatus->data_length; | ||
| 350 | gdata = (struct acpi_hest_generic_data *)(estatus + 1); | ||
| 351 | while (data_len > sizeof(*gdata)) { | ||
| 352 | gedata_len = gdata->error_data_length; | ||
| 353 | apei_estatus_print_section(pfx, gdata, sec_no); | ||
| 354 | data_len -= gedata_len + sizeof(*gdata); | ||
| 355 | sec_no++; | ||
| 356 | } | ||
| 357 | } | ||
| 358 | EXPORT_SYMBOL_GPL(apei_estatus_print); | ||
| 359 | |||
| 49 | int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus) | 360 | int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus) |
| 50 | { | 361 | { |
| 51 | if (estatus->data_length && | 362 | if (estatus->data_length && |
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 0d505e59214d..d1d484d4a06a 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c | |||
| @@ -12,10 +12,6 @@ | |||
| 12 | * For more information about Generic Hardware Error Source, please | 12 | * For more information about Generic Hardware Error Source, please |
| 13 | * refer to ACPI Specification version 4.0, section 17.3.2.6 | 13 | * refer to ACPI Specification version 4.0, section 17.3.2.6 |
| 14 | * | 14 | * |
| 15 | * Now, only SCI notification type and memory errors are | ||
| 16 | * supported. More notification type and hardware error type will be | ||
| 17 | * added later. | ||
| 18 | * | ||
| 19 | * Copyright 2010 Intel Corp. | 15 | * Copyright 2010 Intel Corp. |
| 20 | * Author: Huang Ying <ying.huang@intel.com> | 16 | * Author: Huang Ying <ying.huang@intel.com> |
| 21 | * | 17 | * |
| @@ -39,14 +35,18 @@ | |||
| 39 | #include <linux/acpi.h> | 35 | #include <linux/acpi.h> |
| 40 | #include <linux/io.h> | 36 | #include <linux/io.h> |
| 41 | #include <linux/interrupt.h> | 37 | #include <linux/interrupt.h> |
| 38 | #include <linux/timer.h> | ||
| 42 | #include <linux/cper.h> | 39 | #include <linux/cper.h> |
| 43 | #include <linux/kdebug.h> | 40 | #include <linux/kdebug.h> |
| 44 | #include <linux/platform_device.h> | 41 | #include <linux/platform_device.h> |
| 45 | #include <linux/mutex.h> | 42 | #include <linux/mutex.h> |
| 43 | #include <linux/ratelimit.h> | ||
| 44 | #include <linux/vmalloc.h> | ||
| 46 | #include <acpi/apei.h> | 45 | #include <acpi/apei.h> |
| 47 | #include <acpi/atomicio.h> | 46 | #include <acpi/atomicio.h> |
| 48 | #include <acpi/hed.h> | 47 | #include <acpi/hed.h> |
| 49 | #include <asm/mce.h> | 48 | #include <asm/mce.h> |
| 49 | #include <asm/tlbflush.h> | ||
| 50 | 50 | ||
| 51 | #include "apei-internal.h" | 51 | #include "apei-internal.h" |
| 52 | 52 | ||
| @@ -55,42 +55,131 @@ | |||
| 55 | #define GHES_ESTATUS_MAX_SIZE 65536 | 55 | #define GHES_ESTATUS_MAX_SIZE 65536 |
| 56 | 56 | ||
| 57 | /* | 57 | /* |
| 58 | * One struct ghes is created for each generic hardware error | 58 | * One struct ghes is created for each generic hardware error source. |
| 59 | * source. | ||
| 60 | * | ||
| 61 | * It provides the context for APEI hardware error timer/IRQ/SCI/NMI | 59 | * It provides the context for APEI hardware error timer/IRQ/SCI/NMI |
| 62 | * handler. Handler for one generic hardware error source is only | 60 | * handler. |
| 63 | * triggered after the previous one is done. So handler can uses | ||
| 64 | * struct ghes without locking. | ||
| 65 | * | 61 | * |
| 66 | * estatus: memory buffer for error status block, allocated during | 62 | * estatus: memory buffer for error status block, allocated during |
| 67 | * HEST parsing. | 63 | * HEST parsing. |
| 68 | */ | 64 | */ |
| 69 | #define GHES_TO_CLEAR 0x0001 | 65 | #define GHES_TO_CLEAR 0x0001 |
| 66 | #define GHES_EXITING 0x0002 | ||
| 70 | 67 | ||
| 71 | struct ghes { | 68 | struct ghes { |
| 72 | struct acpi_hest_generic *generic; | 69 | struct acpi_hest_generic *generic; |
| 73 | struct acpi_hest_generic_status *estatus; | 70 | struct acpi_hest_generic_status *estatus; |
| 74 | struct list_head list; | ||
| 75 | u64 buffer_paddr; | 71 | u64 buffer_paddr; |
| 76 | unsigned long flags; | 72 | unsigned long flags; |
| 73 | union { | ||
| 74 | struct list_head list; | ||
| 75 | struct timer_list timer; | ||
| 76 | unsigned int irq; | ||
| 77 | }; | ||
| 77 | }; | 78 | }; |
| 78 | 79 | ||
| 80 | static int ghes_panic_timeout __read_mostly = 30; | ||
| 81 | |||
| 79 | /* | 82 | /* |
| 80 | * Error source lists, one list for each notification method. The | 83 | * All error sources notified with SCI shares one notifier function, |
| 81 | * members in lists are struct ghes. | 84 | * so they need to be linked and checked one by one. This is applied |
| 85 | * to NMI too. | ||
| 82 | * | 86 | * |
| 83 | * The list members are only added in HEST parsing and deleted during | 87 | * RCU is used for these lists, so ghes_list_mutex is only used for |
| 84 | * module_exit, that is, single-threaded. So no lock is needed for | 88 | * list changing, not for traversing. |
| 85 | * that. | ||
| 86 | * | ||
| 87 | * But the mutual exclusion is needed between members adding/deleting | ||
| 88 | * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is | ||
| 89 | * used for that. | ||
| 90 | */ | 89 | */ |
| 91 | static LIST_HEAD(ghes_sci); | 90 | static LIST_HEAD(ghes_sci); |
| 91 | static LIST_HEAD(ghes_nmi); | ||
| 92 | static DEFINE_MUTEX(ghes_list_mutex); | 92 | static DEFINE_MUTEX(ghes_list_mutex); |
| 93 | 93 | ||
| 94 | /* | ||
| 95 | * NMI may be triggered on any CPU, so ghes_nmi_lock is used for | ||
| 96 | * mutual exclusion. | ||
| 97 | */ | ||
| 98 | static DEFINE_RAW_SPINLOCK(ghes_nmi_lock); | ||
| 99 | |||
| 100 | /* | ||
| 101 | * Because the memory area used to transfer hardware error information | ||
| 102 | * from BIOS to Linux can be determined only in NMI, IRQ or timer | ||
| 103 | * handler, but general ioremap can not be used in atomic context, so | ||
| 104 | * a special version of atomic ioremap is implemented for that. | ||
| 105 | */ | ||
| 106 | |||
| 107 | /* | ||
| 108 | * Two virtual pages are used, one for NMI context, the other for | ||
| 109 | * IRQ/PROCESS context | ||
| 110 | */ | ||
| 111 | #define GHES_IOREMAP_PAGES 2 | ||
| 112 | #define GHES_IOREMAP_NMI_PAGE(base) (base) | ||
| 113 | #define GHES_IOREMAP_IRQ_PAGE(base) ((base) + PAGE_SIZE) | ||
| 114 | |||
| 115 | /* virtual memory area for atomic ioremap */ | ||
| 116 | static struct vm_struct *ghes_ioremap_area; | ||
| 117 | /* | ||
| 118 | * These 2 spinlock is used to prevent atomic ioremap virtual memory | ||
| 119 | * area from being mapped simultaneously. | ||
| 120 | */ | ||
| 121 | static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); | ||
| 122 | static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); | ||
| 123 | |||
| 124 | static int ghes_ioremap_init(void) | ||
| 125 | { | ||
| 126 | ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES, | ||
| 127 | VM_IOREMAP, VMALLOC_START, VMALLOC_END); | ||
| 128 | if (!ghes_ioremap_area) { | ||
| 129 | pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n"); | ||
| 130 | return -ENOMEM; | ||
| 131 | } | ||
| 132 | |||
| 133 | return 0; | ||
| 134 | } | ||
| 135 | |||
| 136 | static void ghes_ioremap_exit(void) | ||
| 137 | { | ||
| 138 | free_vm_area(ghes_ioremap_area); | ||
| 139 | } | ||
| 140 | |||
| 141 | static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn) | ||
| 142 | { | ||
| 143 | unsigned long vaddr; | ||
| 144 | |||
| 145 | vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr); | ||
| 146 | ioremap_page_range(vaddr, vaddr + PAGE_SIZE, | ||
| 147 | pfn << PAGE_SHIFT, PAGE_KERNEL); | ||
| 148 | |||
| 149 | return (void __iomem *)vaddr; | ||
| 150 | } | ||
| 151 | |||
| 152 | static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) | ||
| 153 | { | ||
| 154 | unsigned long vaddr; | ||
| 155 | |||
| 156 | vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr); | ||
| 157 | ioremap_page_range(vaddr, vaddr + PAGE_SIZE, | ||
| 158 | pfn << PAGE_SHIFT, PAGE_KERNEL); | ||
| 159 | |||
| 160 | return (void __iomem *)vaddr; | ||
| 161 | } | ||
| 162 | |||
| 163 | static void ghes_iounmap_nmi(void __iomem *vaddr_ptr) | ||
| 164 | { | ||
| 165 | unsigned long vaddr = (unsigned long __force)vaddr_ptr; | ||
| 166 | void *base = ghes_ioremap_area->addr; | ||
| 167 | |||
| 168 | BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base)); | ||
| 169 | unmap_kernel_range_noflush(vaddr, PAGE_SIZE); | ||
| 170 | __flush_tlb_one(vaddr); | ||
| 171 | } | ||
| 172 | |||
| 173 | static void ghes_iounmap_irq(void __iomem *vaddr_ptr) | ||
| 174 | { | ||
| 175 | unsigned long vaddr = (unsigned long __force)vaddr_ptr; | ||
| 176 | void *base = ghes_ioremap_area->addr; | ||
| 177 | |||
| 178 | BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base)); | ||
| 179 | unmap_kernel_range_noflush(vaddr, PAGE_SIZE); | ||
| 180 | __flush_tlb_one(vaddr); | ||
| 181 | } | ||
| 182 | |||
| 94 | static struct ghes *ghes_new(struct acpi_hest_generic *generic) | 183 | static struct ghes *ghes_new(struct acpi_hest_generic *generic) |
| 95 | { | 184 | { |
| 96 | struct ghes *ghes; | 185 | struct ghes *ghes; |
| @@ -101,7 +190,6 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic) | |||
| 101 | if (!ghes) | 190 | if (!ghes) |
| 102 | return ERR_PTR(-ENOMEM); | 191 | return ERR_PTR(-ENOMEM); |
| 103 | ghes->generic = generic; | 192 | ghes->generic = generic; |
| 104 | INIT_LIST_HEAD(&ghes->list); | ||
| 105 | rc = acpi_pre_map_gar(&generic->error_status_address); | 193 | rc = acpi_pre_map_gar(&generic->error_status_address); |
| 106 | if (rc) | 194 | if (rc) |
| 107 | goto err_free; | 195 | goto err_free; |
| @@ -158,22 +246,41 @@ static inline int ghes_severity(int severity) | |||
| 158 | } | 246 | } |
| 159 | } | 247 | } |
| 160 | 248 | ||
| 161 | /* SCI handler run in work queue, so ioremap can be used here */ | 249 | static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, |
| 162 | static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, | 250 | int from_phys) |
| 163 | int from_phys) | ||
| 164 | { | 251 | { |
| 165 | void *vaddr; | 252 | void __iomem *vaddr; |
| 166 | 253 | unsigned long flags = 0; | |
| 167 | vaddr = ioremap_cache(paddr, len); | 254 | int in_nmi = in_nmi(); |
| 168 | if (!vaddr) | 255 | u64 offset; |
| 169 | return -ENOMEM; | 256 | u32 trunk; |
| 170 | if (from_phys) | 257 | |
| 171 | memcpy(buffer, vaddr, len); | 258 | while (len > 0) { |
| 172 | else | 259 | offset = paddr - (paddr & PAGE_MASK); |
| 173 | memcpy(vaddr, buffer, len); | 260 | if (in_nmi) { |
| 174 | iounmap(vaddr); | 261 | raw_spin_lock(&ghes_ioremap_lock_nmi); |
| 175 | 262 | vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT); | |
| 176 | return 0; | 263 | } else { |
| 264 | spin_lock_irqsave(&ghes_ioremap_lock_irq, flags); | ||
| 265 | vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT); | ||
| 266 | } | ||
| 267 | trunk = PAGE_SIZE - offset; | ||
| 268 | trunk = min(trunk, len); | ||
| 269 | if (from_phys) | ||
| 270 | memcpy_fromio(buffer, vaddr + offset, trunk); | ||
| 271 | else | ||
| 272 | memcpy_toio(vaddr + offset, buffer, trunk); | ||
| 273 | len -= trunk; | ||
| 274 | paddr += trunk; | ||
| 275 | buffer += trunk; | ||
| 276 | if (in_nmi) { | ||
| 277 | ghes_iounmap_nmi(vaddr); | ||
| 278 | raw_spin_unlock(&ghes_ioremap_lock_nmi); | ||
| 279 | } else { | ||
| 280 | ghes_iounmap_irq(vaddr); | ||
| 281 | spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags); | ||
| 282 | } | ||
| 283 | } | ||
| 177 | } | 284 | } |
| 178 | 285 | ||
| 179 | static int ghes_read_estatus(struct ghes *ghes, int silent) | 286 | static int ghes_read_estatus(struct ghes *ghes, int silent) |
| @@ -194,10 +301,8 @@ static int ghes_read_estatus(struct ghes *ghes, int silent) | |||
| 194 | if (!buf_paddr) | 301 | if (!buf_paddr) |
| 195 | return -ENOENT; | 302 | return -ENOENT; |
| 196 | 303 | ||
| 197 | rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr, | 304 | ghes_copy_tofrom_phys(ghes->estatus, buf_paddr, |
| 198 | sizeof(*ghes->estatus), 1); | 305 | sizeof(*ghes->estatus), 1); |
| 199 | if (rc) | ||
| 200 | return rc; | ||
| 201 | if (!ghes->estatus->block_status) | 306 | if (!ghes->estatus->block_status) |
| 202 | return -ENOENT; | 307 | return -ENOENT; |
| 203 | 308 | ||
| @@ -212,17 +317,15 @@ static int ghes_read_estatus(struct ghes *ghes, int silent) | |||
| 212 | goto err_read_block; | 317 | goto err_read_block; |
| 213 | if (apei_estatus_check_header(ghes->estatus)) | 318 | if (apei_estatus_check_header(ghes->estatus)) |
| 214 | goto err_read_block; | 319 | goto err_read_block; |
| 215 | rc = ghes_copy_tofrom_phys(ghes->estatus + 1, | 320 | ghes_copy_tofrom_phys(ghes->estatus + 1, |
| 216 | buf_paddr + sizeof(*ghes->estatus), | 321 | buf_paddr + sizeof(*ghes->estatus), |
| 217 | len - sizeof(*ghes->estatus), 1); | 322 | len - sizeof(*ghes->estatus), 1); |
| 218 | if (rc) | ||
| 219 | return rc; | ||
| 220 | if (apei_estatus_check(ghes->estatus)) | 323 | if (apei_estatus_check(ghes->estatus)) |
| 221 | goto err_read_block; | 324 | goto err_read_block; |
| 222 | rc = 0; | 325 | rc = 0; |
| 223 | 326 | ||
| 224 | err_read_block: | 327 | err_read_block: |
| 225 | if (rc && !silent) | 328 | if (rc && !silent && printk_ratelimit()) |
| 226 | pr_warning(FW_WARN GHES_PFX | 329 | pr_warning(FW_WARN GHES_PFX |
| 227 | "Failed to read error status block!\n"); | 330 | "Failed to read error status block!\n"); |
| 228 | return rc; | 331 | return rc; |
| @@ -255,11 +358,26 @@ static void ghes_do_proc(struct ghes *ghes) | |||
| 255 | } | 358 | } |
| 256 | #endif | 359 | #endif |
| 257 | } | 360 | } |
| 361 | } | ||
| 258 | 362 | ||
| 259 | if (!processed && printk_ratelimit()) | 363 | static void ghes_print_estatus(const char *pfx, struct ghes *ghes) |
| 260 | pr_warning(GHES_PFX | 364 | { |
| 261 | "Unknown error record from generic hardware error source: %d\n", | 365 | /* Not more than 2 messages every 5 seconds */ |
| 262 | ghes->generic->header.source_id); | 366 | static DEFINE_RATELIMIT_STATE(ratelimit, 5*HZ, 2); |
| 367 | |||
| 368 | if (pfx == NULL) { | ||
| 369 | if (ghes_severity(ghes->estatus->error_severity) <= | ||
| 370 | GHES_SEV_CORRECTED) | ||
| 371 | pfx = KERN_WARNING HW_ERR; | ||
| 372 | else | ||
| 373 | pfx = KERN_ERR HW_ERR; | ||
| 374 | } | ||
| 375 | if (__ratelimit(&ratelimit)) { | ||
| 376 | printk( | ||
| 377 | "%s""Hardware error from APEI Generic Hardware Error Source: %d\n", | ||
| 378 | pfx, ghes->generic->header.source_id); | ||
| 379 | apei_estatus_print(pfx, ghes->estatus); | ||
| 380 | } | ||
| 263 | } | 381 | } |
| 264 | 382 | ||
| 265 | static int ghes_proc(struct ghes *ghes) | 383 | static int ghes_proc(struct ghes *ghes) |
| @@ -269,6 +387,7 @@ static int ghes_proc(struct ghes *ghes) | |||
| 269 | rc = ghes_read_estatus(ghes, 0); | 387 | rc = ghes_read_estatus(ghes, 0); |
| 270 | if (rc) | 388 | if (rc) |
| 271 | goto out; | 389 | goto out; |
| 390 | ghes_print_estatus(NULL, ghes); | ||
| 272 | ghes_do_proc(ghes); | 391 | ghes_do_proc(ghes); |
| 273 | 392 | ||
| 274 | out: | 393 | out: |
| @@ -276,6 +395,42 @@ out: | |||
| 276 | return 0; | 395 | return 0; |
| 277 | } | 396 | } |
| 278 | 397 | ||
| 398 | static void ghes_add_timer(struct ghes *ghes) | ||
| 399 | { | ||
| 400 | struct acpi_hest_generic *g = ghes->generic; | ||
| 401 | unsigned long expire; | ||
| 402 | |||
| 403 | if (!g->notify.poll_interval) { | ||
| 404 | pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n", | ||
| 405 | g->header.source_id); | ||
| 406 | return; | ||
| 407 | } | ||
| 408 | expire = jiffies + msecs_to_jiffies(g->notify.poll_interval); | ||
| 409 | ghes->timer.expires = round_jiffies_relative(expire); | ||
| 410 | add_timer(&ghes->timer); | ||
| 411 | } | ||
| 412 | |||
| 413 | static void ghes_poll_func(unsigned long data) | ||
| 414 | { | ||
| 415 | struct ghes *ghes = (void *)data; | ||
| 416 | |||
| 417 | ghes_proc(ghes); | ||
| 418 | if (!(ghes->flags & GHES_EXITING)) | ||
| 419 | ghes_add_timer(ghes); | ||
| 420 | } | ||
| 421 | |||
| 422 | static irqreturn_t ghes_irq_func(int irq, void *data) | ||
| 423 | { | ||
| 424 | struct ghes *ghes = data; | ||
| 425 | int rc; | ||
| 426 | |||
| 427 | rc = ghes_proc(ghes); | ||
| 428 | if (rc) | ||
| 429 | return IRQ_NONE; | ||
| 430 | |||
| 431 | return IRQ_HANDLED; | ||
| 432 | } | ||
| 433 | |||
| 279 | static int ghes_notify_sci(struct notifier_block *this, | 434 | static int ghes_notify_sci(struct notifier_block *this, |
| 280 | unsigned long event, void *data) | 435 | unsigned long event, void *data) |
| 281 | { | 436 | { |
| @@ -292,10 +447,63 @@ static int ghes_notify_sci(struct notifier_block *this, | |||
| 292 | return ret; | 447 | return ret; |
| 293 | } | 448 | } |
| 294 | 449 | ||
| 450 | static int ghes_notify_nmi(struct notifier_block *this, | ||
| 451 | unsigned long cmd, void *data) | ||
| 452 | { | ||
| 453 | struct ghes *ghes, *ghes_global = NULL; | ||
| 454 | int sev, sev_global = -1; | ||
| 455 | int ret = NOTIFY_DONE; | ||
| 456 | |||
| 457 | if (cmd != DIE_NMI) | ||
| 458 | return ret; | ||
| 459 | |||
| 460 | raw_spin_lock(&ghes_nmi_lock); | ||
| 461 | list_for_each_entry_rcu(ghes, &ghes_nmi, list) { | ||
| 462 | if (ghes_read_estatus(ghes, 1)) { | ||
| 463 | ghes_clear_estatus(ghes); | ||
| 464 | continue; | ||
| 465 | } | ||
| 466 | sev = ghes_severity(ghes->estatus->error_severity); | ||
| 467 | if (sev > sev_global) { | ||
| 468 | sev_global = sev; | ||
| 469 | ghes_global = ghes; | ||
| 470 | } | ||
| 471 | ret = NOTIFY_STOP; | ||
| 472 | } | ||
| 473 | |||
| 474 | if (ret == NOTIFY_DONE) | ||
| 475 | goto out; | ||
| 476 | |||
| 477 | if (sev_global >= GHES_SEV_PANIC) { | ||
| 478 | oops_begin(); | ||
| 479 | ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global); | ||
| 480 | /* reboot to log the error! */ | ||
| 481 | if (panic_timeout == 0) | ||
| 482 | panic_timeout = ghes_panic_timeout; | ||
| 483 | panic("Fatal hardware error!"); | ||
| 484 | } | ||
| 485 | |||
| 486 | list_for_each_entry_rcu(ghes, &ghes_nmi, list) { | ||
| 487 | if (!(ghes->flags & GHES_TO_CLEAR)) | ||
| 488 | continue; | ||
| 489 | /* Do not print estatus because printk is not NMI safe */ | ||
| 490 | ghes_do_proc(ghes); | ||
| 491 | ghes_clear_estatus(ghes); | ||
| 492 | } | ||
| 493 | |||
| 494 | out: | ||
| 495 | raw_spin_unlock(&ghes_nmi_lock); | ||
| 496 | return ret; | ||
| 497 | } | ||
| 498 | |||
| 295 | static struct notifier_block ghes_notifier_sci = { | 499 | static struct notifier_block ghes_notifier_sci = { |
| 296 | .notifier_call = ghes_notify_sci, | 500 | .notifier_call = ghes_notify_sci, |
| 297 | }; | 501 | }; |
| 298 | 502 | ||
| 503 | static struct notifier_block ghes_notifier_nmi = { | ||
| 504 | .notifier_call = ghes_notify_nmi, | ||
| 505 | }; | ||
| 506 | |||
| 299 | static int __devinit ghes_probe(struct platform_device *ghes_dev) | 507 | static int __devinit ghes_probe(struct platform_device *ghes_dev) |
| 300 | { | 508 | { |
| 301 | struct acpi_hest_generic *generic; | 509 | struct acpi_hest_generic *generic; |
| @@ -306,18 +514,27 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev) | |||
| 306 | if (!generic->enabled) | 514 | if (!generic->enabled) |
| 307 | return -ENODEV; | 515 | return -ENODEV; |
| 308 | 516 | ||
| 309 | if (generic->error_block_length < | 517 | switch (generic->notify.type) { |
| 310 | sizeof(struct acpi_hest_generic_status)) { | 518 | case ACPI_HEST_NOTIFY_POLLED: |
| 311 | pr_warning(FW_BUG GHES_PFX | 519 | case ACPI_HEST_NOTIFY_EXTERNAL: |
| 312 | "Invalid error block length: %u for generic hardware error source: %d\n", | 520 | case ACPI_HEST_NOTIFY_SCI: |
| 313 | generic->error_block_length, | 521 | case ACPI_HEST_NOTIFY_NMI: |
| 522 | break; | ||
| 523 | case ACPI_HEST_NOTIFY_LOCAL: | ||
| 524 | pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n", | ||
| 314 | generic->header.source_id); | 525 | generic->header.source_id); |
| 315 | goto err; | 526 | goto err; |
| 527 | default: | ||
| 528 | pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n", | ||
| 529 | generic->notify.type, generic->header.source_id); | ||
| 530 | goto err; | ||
| 316 | } | 531 | } |
| 317 | if (generic->records_to_preallocate == 0) { | 532 | |
| 318 | pr_warning(FW_BUG GHES_PFX | 533 | rc = -EIO; |
| 319 | "Invalid records to preallocate: %u for generic hardware error source: %d\n", | 534 | if (generic->error_block_length < |
| 320 | generic->records_to_preallocate, | 535 | sizeof(struct acpi_hest_generic_status)) { |
| 536 | pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n", | ||
| 537 | generic->error_block_length, | ||
| 321 | generic->header.source_id); | 538 | generic->header.source_id); |
| 322 | goto err; | 539 | goto err; |
| 323 | } | 540 | } |
| @@ -327,38 +544,43 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev) | |||
| 327 | ghes = NULL; | 544 | ghes = NULL; |
| 328 | goto err; | 545 | goto err; |
| 329 | } | 546 | } |
| 330 | if (generic->notify.type == ACPI_HEST_NOTIFY_SCI) { | 547 | switch (generic->notify.type) { |
| 548 | case ACPI_HEST_NOTIFY_POLLED: | ||
| 549 | ghes->timer.function = ghes_poll_func; | ||
| 550 | ghes->timer.data = (unsigned long)ghes; | ||
| 551 | init_timer_deferrable(&ghes->timer); | ||
| 552 | ghes_add_timer(ghes); | ||
| 553 | break; | ||
| 554 | case ACPI_HEST_NOTIFY_EXTERNAL: | ||
| 555 | /* External interrupt vector is GSI */ | ||
| 556 | if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) { | ||
| 557 | pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n", | ||
| 558 | generic->header.source_id); | ||
| 559 | goto err; | ||
| 560 | } | ||
| 561 | if (request_irq(ghes->irq, ghes_irq_func, | ||
| 562 | 0, "GHES IRQ", ghes)) { | ||
| 563 | pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n", | ||
| 564 | generic->header.source_id); | ||
| 565 | goto err; | ||
| 566 | } | ||
| 567 | break; | ||
| 568 | case ACPI_HEST_NOTIFY_SCI: | ||
| 331 | mutex_lock(&ghes_list_mutex); | 569 | mutex_lock(&ghes_list_mutex); |
| 332 | if (list_empty(&ghes_sci)) | 570 | if (list_empty(&ghes_sci)) |
| 333 | register_acpi_hed_notifier(&ghes_notifier_sci); | 571 | register_acpi_hed_notifier(&ghes_notifier_sci); |
| 334 | list_add_rcu(&ghes->list, &ghes_sci); | 572 | list_add_rcu(&ghes->list, &ghes_sci); |
| 335 | mutex_unlock(&ghes_list_mutex); | 573 | mutex_unlock(&ghes_list_mutex); |
| 336 | } else { | 574 | break; |
| 337 | unsigned char *notify = NULL; | 575 | case ACPI_HEST_NOTIFY_NMI: |
| 338 | 576 | mutex_lock(&ghes_list_mutex); | |
| 339 | switch (generic->notify.type) { | 577 | if (list_empty(&ghes_nmi)) |
| 340 | case ACPI_HEST_NOTIFY_POLLED: | 578 | register_die_notifier(&ghes_notifier_nmi); |
| 341 | notify = "POLL"; | 579 | list_add_rcu(&ghes->list, &ghes_nmi); |
| 342 | break; | 580 | mutex_unlock(&ghes_list_mutex); |
| 343 | case ACPI_HEST_NOTIFY_EXTERNAL: | 581 | break; |
| 344 | case ACPI_HEST_NOTIFY_LOCAL: | 582 | default: |
| 345 | notify = "IRQ"; | 583 | BUG(); |
| 346 | break; | ||
| 347 | case ACPI_HEST_NOTIFY_NMI: | ||
| 348 | notify = "NMI"; | ||
| 349 | break; | ||
| 350 | } | ||
| 351 | if (notify) { | ||
| 352 | pr_warning(GHES_PFX | ||
| 353 | "Generic hardware error source: %d notified via %s is not supported!\n", | ||
| 354 | generic->header.source_id, notify); | ||
| 355 | } else { | ||
| 356 | pr_warning(FW_WARN GHES_PFX | ||
| 357 | "Unknown notification type: %u for generic hardware error source: %d\n", | ||
| 358 | generic->notify.type, generic->header.source_id); | ||
| 359 | } | ||
| 360 | rc = -ENODEV; | ||
| 361 | goto err; | ||
| 362 | } | 584 | } |
| 363 | platform_set_drvdata(ghes_dev, ghes); | 585 | platform_set_drvdata(ghes_dev, ghes); |
| 364 | 586 | ||
| @@ -379,7 +601,14 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev) | |||
| 379 | ghes = platform_get_drvdata(ghes_dev); | 601 | ghes = platform_get_drvdata(ghes_dev); |
| 380 | generic = ghes->generic; | 602 | generic = ghes->generic; |
| 381 | 603 | ||
| 604 | ghes->flags |= GHES_EXITING; | ||
| 382 | switch (generic->notify.type) { | 605 | switch (generic->notify.type) { |
| 606 | case ACPI_HEST_NOTIFY_POLLED: | ||
| 607 | del_timer_sync(&ghes->timer); | ||
| 608 | break; | ||
| 609 | case ACPI_HEST_NOTIFY_EXTERNAL: | ||
| 610 | free_irq(ghes->irq, ghes); | ||
| 611 | break; | ||
| 383 | case ACPI_HEST_NOTIFY_SCI: | 612 | case ACPI_HEST_NOTIFY_SCI: |
| 384 | mutex_lock(&ghes_list_mutex); | 613 | mutex_lock(&ghes_list_mutex); |
| 385 | list_del_rcu(&ghes->list); | 614 | list_del_rcu(&ghes->list); |
| @@ -387,12 +616,23 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev) | |||
| 387 | unregister_acpi_hed_notifier(&ghes_notifier_sci); | 616 | unregister_acpi_hed_notifier(&ghes_notifier_sci); |
| 388 | mutex_unlock(&ghes_list_mutex); | 617 | mutex_unlock(&ghes_list_mutex); |
| 389 | break; | 618 | break; |
| 619 | case ACPI_HEST_NOTIFY_NMI: | ||
| 620 | mutex_lock(&ghes_list_mutex); | ||
| 621 | list_del_rcu(&ghes->list); | ||
| 622 | if (list_empty(&ghes_nmi)) | ||
| 623 | unregister_die_notifier(&ghes_notifier_nmi); | ||
| 624 | mutex_unlock(&ghes_list_mutex); | ||
| 625 | /* | ||
| 626 | * To synchronize with NMI handler, ghes can only be | ||
| 627 | * freed after NMI handler finishes. | ||
| 628 | */ | ||
| 629 | synchronize_rcu(); | ||
| 630 | break; | ||
| 390 | default: | 631 | default: |
| 391 | BUG(); | 632 | BUG(); |
| 392 | break; | 633 | break; |
| 393 | } | 634 | } |
| 394 | 635 | ||
| 395 | synchronize_rcu(); | ||
| 396 | ghes_fini(ghes); | 636 | ghes_fini(ghes); |
| 397 | kfree(ghes); | 637 | kfree(ghes); |
| 398 | 638 | ||
| @@ -412,6 +652,8 @@ static struct platform_driver ghes_platform_driver = { | |||
| 412 | 652 | ||
| 413 | static int __init ghes_init(void) | 653 | static int __init ghes_init(void) |
| 414 | { | 654 | { |
| 655 | int rc; | ||
| 656 | |||
| 415 | if (acpi_disabled) | 657 | if (acpi_disabled) |
| 416 | return -ENODEV; | 658 | return -ENODEV; |
| 417 | 659 | ||
| @@ -420,12 +662,25 @@ static int __init ghes_init(void) | |||
| 420 | return -EINVAL; | 662 | return -EINVAL; |
| 421 | } | 663 | } |
| 422 | 664 | ||
| 423 | return platform_driver_register(&ghes_platform_driver); | 665 | rc = ghes_ioremap_init(); |
| 666 | if (rc) | ||
| 667 | goto err; | ||
| 668 | |||
| 669 | rc = platform_driver_register(&ghes_platform_driver); | ||
| 670 | if (rc) | ||
| 671 | goto err_ioremap_exit; | ||
| 672 | |||
| 673 | return 0; | ||
| 674 | err_ioremap_exit: | ||
| 675 | ghes_ioremap_exit(); | ||
| 676 | err: | ||
| 677 | return rc; | ||
| 424 | } | 678 | } |
| 425 | 679 | ||
| 426 | static void __exit ghes_exit(void) | 680 | static void __exit ghes_exit(void) |
| 427 | { | 681 | { |
| 428 | platform_driver_unregister(&ghes_platform_driver); | 682 | platform_driver_unregister(&ghes_platform_driver); |
| 683 | ghes_ioremap_exit(); | ||
| 429 | } | 684 | } |
| 430 | 685 | ||
| 431 | module_init(ghes_init); | 686 | module_init(ghes_init); |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 95649d373071..68bc227e7c4c 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
| @@ -631,6 +631,17 @@ static int acpi_battery_update(struct acpi_battery *battery) | |||
| 631 | return result; | 631 | return result; |
| 632 | } | 632 | } |
| 633 | 633 | ||
| 634 | static void acpi_battery_refresh(struct acpi_battery *battery) | ||
| 635 | { | ||
| 636 | if (!battery->bat.dev) | ||
| 637 | return; | ||
| 638 | |||
| 639 | acpi_battery_get_info(battery); | ||
| 640 | /* The battery may have changed its reporting units. */ | ||
| 641 | sysfs_remove_battery(battery); | ||
| 642 | sysfs_add_battery(battery); | ||
| 643 | } | ||
| 644 | |||
| 634 | /* -------------------------------------------------------------------------- | 645 | /* -------------------------------------------------------------------------- |
| 635 | FS Interface (/proc) | 646 | FS Interface (/proc) |
| 636 | -------------------------------------------------------------------------- */ | 647 | -------------------------------------------------------------------------- */ |
| @@ -868,6 +879,8 @@ static int acpi_battery_add_fs(struct acpi_device *device) | |||
| 868 | struct proc_dir_entry *entry = NULL; | 879 | struct proc_dir_entry *entry = NULL; |
| 869 | int i; | 880 | int i; |
| 870 | 881 | ||
| 882 | printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded," | ||
| 883 | " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); | ||
| 871 | if (!acpi_device_dir(device)) { | 884 | if (!acpi_device_dir(device)) { |
| 872 | acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), | 885 | acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), |
| 873 | acpi_battery_dir); | 886 | acpi_battery_dir); |
| @@ -914,6 +927,8 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event) | |||
| 914 | if (!battery) | 927 | if (!battery) |
| 915 | return; | 928 | return; |
| 916 | old = battery->bat.dev; | 929 | old = battery->bat.dev; |
| 930 | if (event == ACPI_BATTERY_NOTIFY_INFO) | ||
| 931 | acpi_battery_refresh(battery); | ||
| 917 | acpi_battery_update(battery); | 932 | acpi_battery_update(battery); |
| 918 | acpi_bus_generate_proc_event(device, event, | 933 | acpi_bus_generate_proc_event(device, event, |
| 919 | acpi_battery_present(battery)); | 934 | acpi_battery_present(battery)); |
| @@ -983,6 +998,7 @@ static int acpi_battery_resume(struct acpi_device *device) | |||
| 983 | if (!device) | 998 | if (!device) |
| 984 | return -EINVAL; | 999 | return -EINVAL; |
| 985 | battery = acpi_driver_data(device); | 1000 | battery = acpi_driver_data(device); |
| 1001 | acpi_battery_refresh(battery); | ||
| 986 | battery->update_time = 0; | 1002 | battery->update_time = 0; |
| 987 | acpi_battery_update(battery); | 1003 | acpi_battery_update(battery); |
| 988 | return 0; | 1004 | return 0; |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index d68bd61072bb..7ced61f39492 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
| @@ -52,22 +52,6 @@ EXPORT_SYMBOL(acpi_root_dir); | |||
| 52 | 52 | ||
| 53 | #define STRUCT_TO_INT(s) (*((int*)&s)) | 53 | #define STRUCT_TO_INT(s) (*((int*)&s)) |
| 54 | 54 | ||
| 55 | static int set_power_nocheck(const struct dmi_system_id *id) | ||
| 56 | { | ||
| 57 | printk(KERN_NOTICE PREFIX "%s detected - " | ||
| 58 | "disable power check in power transition\n", id->ident); | ||
| 59 | acpi_power_nocheck = 1; | ||
| 60 | return 0; | ||
| 61 | } | ||
| 62 | static struct dmi_system_id __cpuinitdata power_nocheck_dmi_table[] = { | ||
| 63 | { | ||
| 64 | set_power_nocheck, "HP Pavilion 05", { | ||
| 65 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), | ||
| 66 | DMI_MATCH(DMI_SYS_VENDOR, "HP Pavilion 05"), | ||
| 67 | DMI_MATCH(DMI_PRODUCT_VERSION, "2001211RE101GLEND") }, NULL}, | ||
| 68 | {}, | ||
| 69 | }; | ||
| 70 | |||
| 71 | 55 | ||
| 72 | #ifdef CONFIG_X86 | 56 | #ifdef CONFIG_X86 |
| 73 | static int set_copy_dsdt(const struct dmi_system_id *id) | 57 | static int set_copy_dsdt(const struct dmi_system_id *id) |
| @@ -196,33 +180,24 @@ EXPORT_SYMBOL(acpi_bus_get_private_data); | |||
| 196 | Power Management | 180 | Power Management |
| 197 | -------------------------------------------------------------------------- */ | 181 | -------------------------------------------------------------------------- */ |
| 198 | 182 | ||
| 199 | int acpi_bus_get_power(acpi_handle handle, int *state) | 183 | static int __acpi_bus_get_power(struct acpi_device *device, int *state) |
| 200 | { | 184 | { |
| 201 | int result = 0; | 185 | int result = 0; |
| 202 | acpi_status status = 0; | 186 | acpi_status status = 0; |
| 203 | struct acpi_device *device = NULL; | ||
| 204 | unsigned long long psc = 0; | 187 | unsigned long long psc = 0; |
| 205 | 188 | ||
| 206 | 189 | if (!device || !state) | |
| 207 | result = acpi_bus_get_device(handle, &device); | 190 | return -EINVAL; |
| 208 | if (result) | ||
| 209 | return result; | ||
| 210 | 191 | ||
| 211 | *state = ACPI_STATE_UNKNOWN; | 192 | *state = ACPI_STATE_UNKNOWN; |
| 212 | 193 | ||
| 213 | if (!device->flags.power_manageable) { | 194 | if (device->flags.power_manageable) { |
| 214 | /* TBD: Non-recursive algorithm for walking up hierarchy */ | ||
| 215 | if (device->parent) | ||
| 216 | *state = device->parent->power.state; | ||
| 217 | else | ||
| 218 | *state = ACPI_STATE_D0; | ||
| 219 | } else { | ||
| 220 | /* | 195 | /* |
| 221 | * Get the device's power state either directly (via _PSC) or | 196 | * Get the device's power state either directly (via _PSC) or |
| 222 | * indirectly (via power resources). | 197 | * indirectly (via power resources). |
| 223 | */ | 198 | */ |
| 224 | if (device->power.flags.power_resources) { | 199 | if (device->power.flags.power_resources) { |
| 225 | result = acpi_power_get_inferred_state(device); | 200 | result = acpi_power_get_inferred_state(device, state); |
| 226 | if (result) | 201 | if (result) |
| 227 | return result; | 202 | return result; |
| 228 | } else if (device->power.flags.explicit_get) { | 203 | } else if (device->power.flags.explicit_get) { |
| @@ -230,59 +205,33 @@ int acpi_bus_get_power(acpi_handle handle, int *state) | |||
| 230 | NULL, &psc); | 205 | NULL, &psc); |
| 231 | if (ACPI_FAILURE(status)) | 206 | if (ACPI_FAILURE(status)) |
| 232 | return -ENODEV; | 207 | return -ENODEV; |
| 233 | device->power.state = (int)psc; | 208 | *state = (int)psc; |
| 234 | } | 209 | } |
| 235 | 210 | } else { | |
| 236 | *state = device->power.state; | 211 | /* TBD: Non-recursive algorithm for walking up hierarchy. */ |
| 212 | *state = device->parent ? | ||
| 213 | device->parent->power.state : ACPI_STATE_D0; | ||
| 237 | } | 214 | } |
| 238 | 215 | ||
| 239 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n", | 216 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n", |
| 240 | device->pnp.bus_id, device->power.state)); | 217 | device->pnp.bus_id, *state)); |
| 241 | 218 | ||
| 242 | return 0; | 219 | return 0; |
| 243 | } | 220 | } |
| 244 | 221 | ||
| 245 | EXPORT_SYMBOL(acpi_bus_get_power); | ||
| 246 | 222 | ||
| 247 | int acpi_bus_set_power(acpi_handle handle, int state) | 223 | static int __acpi_bus_set_power(struct acpi_device *device, int state) |
| 248 | { | 224 | { |
| 249 | int result = 0; | 225 | int result = 0; |
| 250 | acpi_status status = AE_OK; | 226 | acpi_status status = AE_OK; |
| 251 | struct acpi_device *device = NULL; | ||
| 252 | char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' }; | 227 | char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' }; |
| 253 | 228 | ||
| 254 | 229 | if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) | |
| 255 | result = acpi_bus_get_device(handle, &device); | ||
| 256 | if (result) | ||
| 257 | return result; | ||
| 258 | |||
| 259 | if ((state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) | ||
| 260 | return -EINVAL; | 230 | return -EINVAL; |
| 261 | 231 | ||
| 262 | /* Make sure this is a valid target state */ | 232 | /* Make sure this is a valid target state */ |
| 263 | 233 | ||
| 264 | if (!device->flags.power_manageable) { | 234 | if (state == device->power.state) { |
| 265 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n", | ||
| 266 | kobject_name(&device->dev.kobj))); | ||
| 267 | return -ENODEV; | ||
| 268 | } | ||
| 269 | /* | ||
| 270 | * Get device's current power state | ||
| 271 | */ | ||
| 272 | if (!acpi_power_nocheck) { | ||
| 273 | /* | ||
| 274 | * Maybe the incorrect power state is returned on the bogus | ||
| 275 | * bios, which is different with the real power state. | ||
| 276 | * For example: the bios returns D0 state and the real power | ||
| 277 | * state is D3. OS expects to set the device to D0 state. In | ||
| 278 | * such case if OS uses the power state returned by the BIOS, | ||
| 279 | * the device can't be transisted to the correct power state. | ||
| 280 | * So if the acpi_power_nocheck is set, it is unnecessary to | ||
| 281 | * get the power state by calling acpi_bus_get_power. | ||
| 282 | */ | ||
| 283 | acpi_bus_get_power(device->handle, &device->power.state); | ||
| 284 | } | ||
| 285 | if ((state == device->power.state) && !device->flags.force_power_state) { | ||
| 286 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n", | 235 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n", |
| 287 | state)); | 236 | state)); |
| 288 | return 0; | 237 | return 0; |
| @@ -351,8 +300,75 @@ int acpi_bus_set_power(acpi_handle handle, int state) | |||
| 351 | return result; | 300 | return result; |
| 352 | } | 301 | } |
| 353 | 302 | ||
| 303 | |||
| 304 | int acpi_bus_set_power(acpi_handle handle, int state) | ||
| 305 | { | ||
| 306 | struct acpi_device *device; | ||
| 307 | int result; | ||
| 308 | |||
| 309 | result = acpi_bus_get_device(handle, &device); | ||
| 310 | if (result) | ||
| 311 | return result; | ||
| 312 | |||
| 313 | if (!device->flags.power_manageable) { | ||
| 314 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
| 315 | "Device [%s] is not power manageable\n", | ||
| 316 | dev_name(&device->dev))); | ||
| 317 | return -ENODEV; | ||
| 318 | } | ||
| 319 | |||
| 320 | return __acpi_bus_set_power(device, state); | ||
| 321 | } | ||
| 354 | EXPORT_SYMBOL(acpi_bus_set_power); | 322 | EXPORT_SYMBOL(acpi_bus_set_power); |
| 355 | 323 | ||
| 324 | |||
| 325 | int acpi_bus_init_power(struct acpi_device *device) | ||
| 326 | { | ||
| 327 | int state; | ||
| 328 | int result; | ||
| 329 | |||
| 330 | if (!device) | ||
| 331 | return -EINVAL; | ||
| 332 | |||
| 333 | device->power.state = ACPI_STATE_UNKNOWN; | ||
| 334 | |||
| 335 | result = __acpi_bus_get_power(device, &state); | ||
| 336 | if (result) | ||
| 337 | return result; | ||
| 338 | |||
| 339 | if (device->power.flags.power_resources) | ||
| 340 | result = acpi_power_on_resources(device, state); | ||
| 341 | |||
| 342 | if (!result) | ||
| 343 | device->power.state = state; | ||
| 344 | |||
| 345 | return result; | ||
| 346 | } | ||
| 347 | |||
| 348 | |||
| 349 | int acpi_bus_update_power(acpi_handle handle, int *state_p) | ||
| 350 | { | ||
| 351 | struct acpi_device *device; | ||
| 352 | int state; | ||
| 353 | int result; | ||
| 354 | |||
| 355 | result = acpi_bus_get_device(handle, &device); | ||
| 356 | if (result) | ||
| 357 | return result; | ||
| 358 | |||
| 359 | result = __acpi_bus_get_power(device, &state); | ||
| 360 | if (result) | ||
| 361 | return result; | ||
| 362 | |||
| 363 | result = __acpi_bus_set_power(device, state); | ||
| 364 | if (!result && state_p) | ||
| 365 | *state_p = state; | ||
| 366 | |||
| 367 | return result; | ||
| 368 | } | ||
| 369 | EXPORT_SYMBOL_GPL(acpi_bus_update_power); | ||
| 370 | |||
| 371 | |||
| 356 | bool acpi_bus_power_manageable(acpi_handle handle) | 372 | bool acpi_bus_power_manageable(acpi_handle handle) |
| 357 | { | 373 | { |
| 358 | struct acpi_device *device; | 374 | struct acpi_device *device; |
| @@ -1023,15 +1039,8 @@ static int __init acpi_init(void) | |||
| 1023 | if (acpi_disabled) | 1039 | if (acpi_disabled) |
| 1024 | return result; | 1040 | return result; |
| 1025 | 1041 | ||
| 1026 | /* | ||
| 1027 | * If the laptop falls into the DMI check table, the power state check | ||
| 1028 | * will be disabled in the course of device power transition. | ||
| 1029 | */ | ||
| 1030 | dmi_check_system(power_nocheck_dmi_table); | ||
| 1031 | |||
| 1032 | acpi_scan_init(); | 1042 | acpi_scan_init(); |
| 1033 | acpi_ec_init(); | 1043 | acpi_ec_init(); |
| 1034 | acpi_power_init(); | ||
| 1035 | acpi_debugfs_init(); | 1044 | acpi_debugfs_init(); |
| 1036 | acpi_sleep_proc_init(); | 1045 | acpi_sleep_proc_init(); |
| 1037 | acpi_wakeup_device_init(); | 1046 | acpi_wakeup_device_init(); |
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 71ef9cd0735f..76bbb78a5ad9 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c | |||
| @@ -279,6 +279,9 @@ static int acpi_lid_send_state(struct acpi_device *device) | |||
| 279 | input_report_switch(button->input, SW_LID, !state); | 279 | input_report_switch(button->input, SW_LID, !state); |
| 280 | input_sync(button->input); | 280 | input_sync(button->input); |
| 281 | 281 | ||
| 282 | if (state) | ||
| 283 | pm_wakeup_event(&device->dev, 0); | ||
| 284 | |||
| 282 | ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); | 285 | ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); |
| 283 | if (ret == NOTIFY_DONE) | 286 | if (ret == NOTIFY_DONE) |
| 284 | ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, | 287 | ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, |
| @@ -314,6 +317,8 @@ static void acpi_button_notify(struct acpi_device *device, u32 event) | |||
| 314 | input_sync(input); | 317 | input_sync(input); |
| 315 | input_report_key(input, keycode, 0); | 318 | input_report_key(input, keycode, 0); |
| 316 | input_sync(input); | 319 | input_sync(input); |
| 320 | |||
| 321 | pm_wakeup_event(&device->dev, 0); | ||
| 317 | } | 322 | } |
| 318 | 323 | ||
| 319 | acpi_bus_generate_proc_event(device, event, ++button->pushed); | 324 | acpi_bus_generate_proc_event(device, event, ++button->pushed); |
| @@ -426,7 +431,7 @@ static int acpi_button_add(struct acpi_device *device) | |||
| 426 | acpi_enable_gpe(device->wakeup.gpe_device, | 431 | acpi_enable_gpe(device->wakeup.gpe_device, |
| 427 | device->wakeup.gpe_number); | 432 | device->wakeup.gpe_number); |
| 428 | device->wakeup.run_wake_count++; | 433 | device->wakeup.run_wake_count++; |
| 429 | device->wakeup.state.enabled = 1; | 434 | device_set_wakeup_enable(&device->dev, true); |
| 430 | } | 435 | } |
| 431 | 436 | ||
| 432 | printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); | 437 | printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); |
| @@ -449,7 +454,7 @@ static int acpi_button_remove(struct acpi_device *device, int type) | |||
| 449 | acpi_disable_gpe(device->wakeup.gpe_device, | 454 | acpi_disable_gpe(device->wakeup.gpe_device, |
| 450 | device->wakeup.gpe_number); | 455 | device->wakeup.gpe_number); |
| 451 | device->wakeup.run_wake_count--; | 456 | device->wakeup.run_wake_count--; |
| 452 | device->wakeup.state.enabled = 0; | 457 | device_set_wakeup_enable(&device->dev, false); |
| 453 | } | 458 | } |
| 454 | 459 | ||
| 455 | acpi_button_remove_fs(device); | 460 | acpi_button_remove_fs(device); |
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 81514a4918cc..1864ad3cf895 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c | |||
| @@ -725,7 +725,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data) | |||
| 725 | complete_dock(ds); | 725 | complete_dock(ds); |
| 726 | dock_event(ds, event, DOCK_EVENT); | 726 | dock_event(ds, event, DOCK_EVENT); |
| 727 | dock_lock(ds, 1); | 727 | dock_lock(ds, 1); |
| 728 | acpi_update_gpes(); | 728 | acpi_update_all_gpes(); |
| 729 | break; | 729 | break; |
| 730 | } | 730 | } |
| 731 | if (dock_present(ds) || dock_in_progress(ds)) | 731 | if (dock_present(ds) || dock_in_progress(ds)) |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 302b31ed31f1..fa848c4116a8 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -606,7 +606,8 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state) | |||
| 606 | return 0; | 606 | return 0; |
| 607 | } | 607 | } |
| 608 | 608 | ||
| 609 | static u32 acpi_ec_gpe_handler(void *data) | 609 | static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, |
| 610 | u32 gpe_number, void *data) | ||
| 610 | { | 611 | { |
| 611 | struct acpi_ec *ec = data; | 612 | struct acpi_ec *ec = data; |
| 612 | 613 | ||
| @@ -618,7 +619,7 @@ static u32 acpi_ec_gpe_handler(void *data) | |||
| 618 | wake_up(&ec->wait); | 619 | wake_up(&ec->wait); |
| 619 | ec_check_sci(ec, acpi_ec_read_status(ec)); | 620 | ec_check_sci(ec, acpi_ec_read_status(ec)); |
| 620 | } | 621 | } |
| 621 | return ACPI_INTERRUPT_HANDLED; | 622 | return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE; |
| 622 | } | 623 | } |
| 623 | 624 | ||
| 624 | /* -------------------------------------------------------------------------- | 625 | /* -------------------------------------------------------------------------- |
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 60049080c869..467479f07c1f 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c | |||
| @@ -86,7 +86,7 @@ static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long | |||
| 86 | if (!device) | 86 | if (!device) |
| 87 | return -EINVAL; | 87 | return -EINVAL; |
| 88 | 88 | ||
| 89 | result = acpi_bus_get_power(device->handle, &acpi_state); | 89 | result = acpi_bus_update_power(device->handle, &acpi_state); |
| 90 | if (result) | 90 | if (result) |
| 91 | return result; | 91 | return result; |
| 92 | 92 | ||
| @@ -123,7 +123,6 @@ static struct thermal_cooling_device_ops fan_cooling_ops = { | |||
| 123 | static int acpi_fan_add(struct acpi_device *device) | 123 | static int acpi_fan_add(struct acpi_device *device) |
| 124 | { | 124 | { |
| 125 | int result = 0; | 125 | int result = 0; |
| 126 | int state = 0; | ||
| 127 | struct thermal_cooling_device *cdev; | 126 | struct thermal_cooling_device *cdev; |
| 128 | 127 | ||
| 129 | if (!device) | 128 | if (!device) |
| @@ -132,16 +131,12 @@ static int acpi_fan_add(struct acpi_device *device) | |||
| 132 | strcpy(acpi_device_name(device), "Fan"); | 131 | strcpy(acpi_device_name(device), "Fan"); |
| 133 | strcpy(acpi_device_class(device), ACPI_FAN_CLASS); | 132 | strcpy(acpi_device_class(device), ACPI_FAN_CLASS); |
| 134 | 133 | ||
| 135 | result = acpi_bus_get_power(device->handle, &state); | 134 | result = acpi_bus_update_power(device->handle, NULL); |
| 136 | if (result) { | 135 | if (result) { |
| 137 | printk(KERN_ERR PREFIX "Reading power state\n"); | 136 | printk(KERN_ERR PREFIX "Setting initial power state\n"); |
| 138 | goto end; | 137 | goto end; |
| 139 | } | 138 | } |
| 140 | 139 | ||
| 141 | device->flags.force_power_state = 1; | ||
| 142 | acpi_bus_set_power(device->handle, state); | ||
| 143 | device->flags.force_power_state = 0; | ||
| 144 | |||
| 145 | cdev = thermal_cooling_device_register("Fan", device, | 140 | cdev = thermal_cooling_device_register("Fan", device, |
| 146 | &fan_cooling_ops); | 141 | &fan_cooling_ops); |
| 147 | if (IS_ERR(cdev)) { | 142 | if (IS_ERR(cdev)) { |
| @@ -200,22 +195,14 @@ static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state) | |||
| 200 | 195 | ||
| 201 | static int acpi_fan_resume(struct acpi_device *device) | 196 | static int acpi_fan_resume(struct acpi_device *device) |
| 202 | { | 197 | { |
| 203 | int result = 0; | 198 | int result; |
| 204 | int power_state = 0; | ||
| 205 | 199 | ||
| 206 | if (!device) | 200 | if (!device) |
| 207 | return -EINVAL; | 201 | return -EINVAL; |
| 208 | 202 | ||
| 209 | result = acpi_bus_get_power(device->handle, &power_state); | 203 | result = acpi_bus_update_power(device->handle, NULL); |
| 210 | if (result) { | 204 | if (result) |
| 211 | printk(KERN_ERR PREFIX | 205 | printk(KERN_ERR PREFIX "Error updating fan power state\n"); |
| 212 | "Error reading fan power state\n"); | ||
| 213 | return result; | ||
| 214 | } | ||
| 215 | |||
| 216 | device->flags.force_power_state = 1; | ||
| 217 | acpi_bus_set_power(device->handle, power_state); | ||
| 218 | device->flags.force_power_state = 0; | ||
| 219 | 206 | ||
| 220 | return result; | 207 | return result; |
| 221 | } | 208 | } |
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 78b0164c35b2..7c47ed55e528 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c | |||
| @@ -167,11 +167,8 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle) | |||
| 167 | "firmware_node"); | 167 | "firmware_node"); |
| 168 | ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, | 168 | ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, |
| 169 | "physical_node"); | 169 | "physical_node"); |
| 170 | if (acpi_dev->wakeup.flags.valid) { | 170 | if (acpi_dev->wakeup.flags.valid) |
| 171 | device_set_wakeup_capable(dev, true); | 171 | device_set_wakeup_capable(dev, true); |
| 172 | device_set_wakeup_enable(dev, | ||
| 173 | acpi_dev->wakeup.state.enabled); | ||
| 174 | } | ||
| 175 | } | 172 | } |
| 176 | 173 | ||
| 177 | return 0; | 174 | return 0; |
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index a212bfeddf8c..b1cc81a0431b 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
| @@ -41,9 +41,10 @@ static inline int acpi_debugfs_init(void) { return 0; } | |||
| 41 | int acpi_power_init(void); | 41 | int acpi_power_init(void); |
| 42 | int acpi_device_sleep_wake(struct acpi_device *dev, | 42 | int acpi_device_sleep_wake(struct acpi_device *dev, |
| 43 | int enable, int sleep_state, int dev_state); | 43 | int enable, int sleep_state, int dev_state); |
| 44 | int acpi_power_get_inferred_state(struct acpi_device *device); | 44 | int acpi_power_get_inferred_state(struct acpi_device *device, int *state); |
| 45 | int acpi_power_on_resources(struct acpi_device *device, int state); | ||
| 45 | int acpi_power_transition(struct acpi_device *device, int state); | 46 | int acpi_power_transition(struct acpi_device *device, int state); |
| 46 | extern int acpi_power_nocheck; | 47 | int acpi_bus_init_power(struct acpi_device *device); |
| 47 | 48 | ||
| 48 | int acpi_wakeup_device_init(void); | 49 | int acpi_wakeup_device_init(void); |
| 49 | void acpi_early_processor_set_pdc(void); | 50 | void acpi_early_processor_set_pdc(void); |
| @@ -82,8 +83,16 @@ extern int acpi_sleep_init(void); | |||
| 82 | 83 | ||
| 83 | #ifdef CONFIG_ACPI_SLEEP | 84 | #ifdef CONFIG_ACPI_SLEEP |
| 84 | int acpi_sleep_proc_init(void); | 85 | int acpi_sleep_proc_init(void); |
| 86 | int suspend_nvs_alloc(void); | ||
| 87 | void suspend_nvs_free(void); | ||
| 88 | int suspend_nvs_save(void); | ||
| 89 | void suspend_nvs_restore(void); | ||
| 85 | #else | 90 | #else |
| 86 | static inline int acpi_sleep_proc_init(void) { return 0; } | 91 | static inline int acpi_sleep_proc_init(void) { return 0; } |
| 92 | static inline int suspend_nvs_alloc(void) { return 0; } | ||
| 93 | static inline void suspend_nvs_free(void) {} | ||
| 94 | static inline int suspend_nvs_save(void) { return 0; } | ||
| 95 | static inline void suspend_nvs_restore(void) {} | ||
| 87 | #endif | 96 | #endif |
| 88 | 97 | ||
| 89 | #endif /* _ACPI_INTERNAL_H_ */ | 98 | #endif /* _ACPI_INTERNAL_H_ */ |
diff --git a/kernel/power/nvs.c b/drivers/acpi/nvs.c index 1836db60bbb6..54b6ab8040a6 100644 --- a/kernel/power/nvs.c +++ b/drivers/acpi/nvs.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * linux/kernel/power/hibernate_nvs.c - Routines for handling NVS memory | 2 | * nvs.c - Routines for saving and restoring ACPI NVS memory region |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2008,2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | 4 | * Copyright (C) 2008-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. |
| 5 | * | 5 | * |
| 6 | * This file is released under the GPLv2. | 6 | * This file is released under the GPLv2. |
| 7 | */ | 7 | */ |
| @@ -11,7 +11,8 @@ | |||
| 11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
| 12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
| 13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
| 14 | #include <linux/suspend.h> | 14 | #include <linux/acpi.h> |
| 15 | #include <acpi/acpiosxf.h> | ||
| 15 | 16 | ||
| 16 | /* | 17 | /* |
| 17 | * Platforms, like ACPI, may want us to save some memory used by them during | 18 | * Platforms, like ACPI, may want us to save some memory used by them during |
| @@ -79,7 +80,7 @@ void suspend_nvs_free(void) | |||
| 79 | free_page((unsigned long)entry->data); | 80 | free_page((unsigned long)entry->data); |
| 80 | entry->data = NULL; | 81 | entry->data = NULL; |
| 81 | if (entry->kaddr) { | 82 | if (entry->kaddr) { |
| 82 | iounmap(entry->kaddr); | 83 | acpi_os_unmap_memory(entry->kaddr, entry->size); |
| 83 | entry->kaddr = NULL; | 84 | entry->kaddr = NULL; |
| 84 | } | 85 | } |
| 85 | } | 86 | } |
| @@ -105,7 +106,7 @@ int suspend_nvs_alloc(void) | |||
| 105 | /** | 106 | /** |
| 106 | * suspend_nvs_save - save NVS memory regions | 107 | * suspend_nvs_save - save NVS memory regions |
| 107 | */ | 108 | */ |
| 108 | void suspend_nvs_save(void) | 109 | int suspend_nvs_save(void) |
| 109 | { | 110 | { |
| 110 | struct nvs_page *entry; | 111 | struct nvs_page *entry; |
| 111 | 112 | ||
| @@ -113,9 +114,16 @@ void suspend_nvs_save(void) | |||
| 113 | 114 | ||
| 114 | list_for_each_entry(entry, &nvs_list, node) | 115 | list_for_each_entry(entry, &nvs_list, node) |
| 115 | if (entry->data) { | 116 | if (entry->data) { |
| 116 | entry->kaddr = ioremap(entry->phys_start, entry->size); | 117 | entry->kaddr = acpi_os_map_memory(entry->phys_start, |
| 118 | entry->size); | ||
| 119 | if (!entry->kaddr) { | ||
| 120 | suspend_nvs_free(); | ||
| 121 | return -ENOMEM; | ||
| 122 | } | ||
| 117 | memcpy(entry->data, entry->kaddr, entry->size); | 123 | memcpy(entry->data, entry->kaddr, entry->size); |
| 118 | } | 124 | } |
| 125 | |||
| 126 | return 0; | ||
| 119 | } | 127 | } |
| 120 | 128 | ||
| 121 | /** | 129 | /** |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 055d7b701fff..e2dd6de5d50c 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
| @@ -320,7 +320,7 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size) | |||
| 320 | 320 | ||
| 321 | pg_off = round_down(phys, PAGE_SIZE); | 321 | pg_off = round_down(phys, PAGE_SIZE); |
| 322 | pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; | 322 | pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; |
| 323 | virt = ioremap(pg_off, pg_sz); | 323 | virt = ioremap_cache(pg_off, pg_sz); |
| 324 | if (!virt) { | 324 | if (!virt) { |
| 325 | kfree(map); | 325 | kfree(map); |
| 326 | return NULL; | 326 | return NULL; |
| @@ -642,7 +642,7 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) | |||
| 642 | virt_addr = acpi_map_vaddr_lookup(phys_addr, size); | 642 | virt_addr = acpi_map_vaddr_lookup(phys_addr, size); |
| 643 | rcu_read_unlock(); | 643 | rcu_read_unlock(); |
| 644 | if (!virt_addr) { | 644 | if (!virt_addr) { |
| 645 | virt_addr = ioremap(phys_addr, size); | 645 | virt_addr = ioremap_cache(phys_addr, size); |
| 646 | unmap = 1; | 646 | unmap = 1; |
| 647 | } | 647 | } |
| 648 | if (!value) | 648 | if (!value) |
| @@ -678,7 +678,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) | |||
| 678 | virt_addr = acpi_map_vaddr_lookup(phys_addr, size); | 678 | virt_addr = acpi_map_vaddr_lookup(phys_addr, size); |
| 679 | rcu_read_unlock(); | 679 | rcu_read_unlock(); |
| 680 | if (!virt_addr) { | 680 | if (!virt_addr) { |
| 681 | virt_addr = ioremap(phys_addr, size); | 681 | virt_addr = ioremap_cache(phys_addr, size); |
| 682 | unmap = 1; | 682 | unmap = 1; |
| 683 | } | 683 | } |
| 684 | 684 | ||
| @@ -1233,8 +1233,7 @@ __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); | |||
| 1233 | int acpi_check_resource_conflict(const struct resource *res) | 1233 | int acpi_check_resource_conflict(const struct resource *res) |
| 1234 | { | 1234 | { |
| 1235 | struct acpi_res_list *res_list_elem; | 1235 | struct acpi_res_list *res_list_elem; |
| 1236 | int ioport; | 1236 | int ioport = 0, clash = 0; |
| 1237 | int clash = 0; | ||
| 1238 | 1237 | ||
| 1239 | if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) | 1238 | if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) |
| 1240 | return 0; | 1239 | return 0; |
| @@ -1264,9 +1263,13 @@ int acpi_check_resource_conflict(const struct resource *res) | |||
| 1264 | if (clash) { | 1263 | if (clash) { |
| 1265 | if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { | 1264 | if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { |
| 1266 | printk(KERN_WARNING "ACPI: resource %s %pR" | 1265 | printk(KERN_WARNING "ACPI: resource %s %pR" |
| 1267 | " conflicts with ACPI region %s %pR\n", | 1266 | " conflicts with ACPI region %s " |
| 1267 | "[%s 0x%zx-0x%zx]\n", | ||
| 1268 | res->name, res, res_list_elem->name, | 1268 | res->name, res, res_list_elem->name, |
| 1269 | res_list_elem); | 1269 | (res_list_elem->resource_type == |
| 1270 | ACPI_ADR_SPACE_SYSTEM_IO) ? "io" : "mem", | ||
| 1271 | (size_t) res_list_elem->start, | ||
| 1272 | (size_t) res_list_elem->end); | ||
| 1270 | if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) | 1273 | if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) |
| 1271 | printk(KERN_NOTICE "ACPI: This conflict may" | 1274 | printk(KERN_NOTICE "ACPI: This conflict may" |
| 1272 | " cause random problems and system" | 1275 | " cause random problems and system" |
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 4c9c2fb5d98f..9ac2a9fa90ff 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c | |||
| @@ -56,9 +56,6 @@ ACPI_MODULE_NAME("power"); | |||
| 56 | #define ACPI_POWER_RESOURCE_STATE_ON 0x01 | 56 | #define ACPI_POWER_RESOURCE_STATE_ON 0x01 |
| 57 | #define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF | 57 | #define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF |
| 58 | 58 | ||
| 59 | int acpi_power_nocheck; | ||
| 60 | module_param_named(power_nocheck, acpi_power_nocheck, bool, 000); | ||
| 61 | |||
| 62 | static int acpi_power_add(struct acpi_device *device); | 59 | static int acpi_power_add(struct acpi_device *device); |
| 63 | static int acpi_power_remove(struct acpi_device *device, int type); | 60 | static int acpi_power_remove(struct acpi_device *device, int type); |
| 64 | static int acpi_power_resume(struct acpi_device *device); | 61 | static int acpi_power_resume(struct acpi_device *device); |
| @@ -148,9 +145,8 @@ static int acpi_power_get_state(acpi_handle handle, int *state) | |||
| 148 | 145 | ||
| 149 | static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state) | 146 | static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state) |
| 150 | { | 147 | { |
| 151 | int result = 0, state1; | 148 | int cur_state; |
| 152 | u32 i = 0; | 149 | int i = 0; |
| 153 | |||
| 154 | 150 | ||
| 155 | if (!list || !state) | 151 | if (!list || !state) |
| 156 | return -EINVAL; | 152 | return -EINVAL; |
| @@ -158,25 +154,33 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state) | |||
| 158 | /* The state of the list is 'on' IFF all resources are 'on'. */ | 154 | /* The state of the list is 'on' IFF all resources are 'on'. */ |
| 159 | 155 | ||
| 160 | for (i = 0; i < list->count; i++) { | 156 | for (i = 0; i < list->count; i++) { |
| 161 | /* | 157 | struct acpi_power_resource *resource; |
| 162 | * The state of the power resource can be obtained by | 158 | acpi_handle handle = list->handles[i]; |
| 163 | * using the ACPI handle. In such case it is unnecessary to | 159 | int result; |
| 164 | * get the Power resource first and then get its state again. | 160 | |
| 165 | */ | 161 | result = acpi_power_get_context(handle, &resource); |
| 166 | result = acpi_power_get_state(list->handles[i], &state1); | ||
| 167 | if (result) | 162 | if (result) |
| 168 | return result; | 163 | return result; |
| 169 | 164 | ||
| 170 | *state = state1; | 165 | mutex_lock(&resource->resource_lock); |
| 171 | 166 | ||
| 172 | if (*state != ACPI_POWER_RESOURCE_STATE_ON) | 167 | result = acpi_power_get_state(handle, &cur_state); |
| 168 | |||
| 169 | mutex_unlock(&resource->resource_lock); | ||
| 170 | |||
| 171 | if (result) | ||
| 172 | return result; | ||
| 173 | |||
| 174 | if (cur_state != ACPI_POWER_RESOURCE_STATE_ON) | ||
| 173 | break; | 175 | break; |
| 174 | } | 176 | } |
| 175 | 177 | ||
| 176 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource list is %s\n", | 178 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource list is %s\n", |
| 177 | *state ? "on" : "off")); | 179 | cur_state ? "on" : "off")); |
| 178 | 180 | ||
| 179 | return result; | 181 | *state = cur_state; |
| 182 | |||
| 183 | return 0; | ||
| 180 | } | 184 | } |
| 181 | 185 | ||
| 182 | static int __acpi_power_on(struct acpi_power_resource *resource) | 186 | static int __acpi_power_on(struct acpi_power_resource *resource) |
| @@ -222,7 +226,7 @@ static int acpi_power_on(acpi_handle handle) | |||
| 222 | return result; | 226 | return result; |
| 223 | } | 227 | } |
| 224 | 228 | ||
| 225 | static int acpi_power_off_device(acpi_handle handle) | 229 | static int acpi_power_off(acpi_handle handle) |
| 226 | { | 230 | { |
| 227 | int result = 0; | 231 | int result = 0; |
| 228 | acpi_status status = AE_OK; | 232 | acpi_status status = AE_OK; |
| @@ -266,6 +270,35 @@ static int acpi_power_off_device(acpi_handle handle) | |||
| 266 | return result; | 270 | return result; |
| 267 | } | 271 | } |
| 268 | 272 | ||
| 273 | static void __acpi_power_off_list(struct acpi_handle_list *list, int num_res) | ||
| 274 | { | ||
| 275 | int i; | ||
| 276 | |||
| 277 | for (i = num_res - 1; i >= 0 ; i--) | ||
| 278 | acpi_power_off(list->handles[i]); | ||
| 279 | } | ||
| 280 | |||
| 281 | static void acpi_power_off_list(struct acpi_handle_list *list) | ||
| 282 | { | ||
| 283 | __acpi_power_off_list(list, list->count); | ||
| 284 | } | ||
| 285 | |||
| 286 | static int acpi_power_on_list(struct acpi_handle_list *list) | ||
| 287 | { | ||
| 288 | int result = 0; | ||
| 289 | int i; | ||
| 290 | |||
| 291 | for (i = 0; i < list->count; i++) { | ||
| 292 | result = acpi_power_on(list->handles[i]); | ||
| 293 | if (result) { | ||
| 294 | __acpi_power_off_list(list, i); | ||
| 295 | break; | ||
| 296 | } | ||
| 297 | } | ||
| 298 | |||
| 299 | return result; | ||
| 300 | } | ||
| 301 | |||
| 269 | /** | 302 | /** |
| 270 | * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in | 303 | * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in |
| 271 | * ACPI 3.0) _PSW (Power State Wake) | 304 | * ACPI 3.0) _PSW (Power State Wake) |
| @@ -404,8 +437,7 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev) | |||
| 404 | 437 | ||
| 405 | /* Close power resource */ | 438 | /* Close power resource */ |
| 406 | for (i = 0; i < dev->wakeup.resources.count; i++) { | 439 | for (i = 0; i < dev->wakeup.resources.count; i++) { |
| 407 | int ret = acpi_power_off_device( | 440 | int ret = acpi_power_off(dev->wakeup.resources.handles[i]); |
| 408 | dev->wakeup.resources.handles[i]); | ||
| 409 | if (ret) { | 441 | if (ret) { |
| 410 | printk(KERN_ERR PREFIX "Transition power state\n"); | 442 | printk(KERN_ERR PREFIX "Transition power state\n"); |
| 411 | dev->wakeup.flags.valid = 0; | 443 | dev->wakeup.flags.valid = 0; |
| @@ -423,19 +455,16 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev) | |||
| 423 | Device Power Management | 455 | Device Power Management |
| 424 | -------------------------------------------------------------------------- */ | 456 | -------------------------------------------------------------------------- */ |
| 425 | 457 | ||
| 426 | int acpi_power_get_inferred_state(struct acpi_device *device) | 458 | int acpi_power_get_inferred_state(struct acpi_device *device, int *state) |
| 427 | { | 459 | { |
| 428 | int result = 0; | 460 | int result = 0; |
| 429 | struct acpi_handle_list *list = NULL; | 461 | struct acpi_handle_list *list = NULL; |
| 430 | int list_state = 0; | 462 | int list_state = 0; |
| 431 | int i = 0; | 463 | int i = 0; |
| 432 | 464 | ||
| 433 | 465 | if (!device || !state) | |
| 434 | if (!device) | ||
| 435 | return -EINVAL; | 466 | return -EINVAL; |
| 436 | 467 | ||
| 437 | device->power.state = ACPI_STATE_UNKNOWN; | ||
| 438 | |||
| 439 | /* | 468 | /* |
| 440 | * We know a device's inferred power state when all the resources | 469 | * We know a device's inferred power state when all the resources |
| 441 | * required for a given D-state are 'on'. | 470 | * required for a given D-state are 'on'. |
| @@ -450,22 +479,26 @@ int acpi_power_get_inferred_state(struct acpi_device *device) | |||
| 450 | return result; | 479 | return result; |
| 451 | 480 | ||
| 452 | if (list_state == ACPI_POWER_RESOURCE_STATE_ON) { | 481 | if (list_state == ACPI_POWER_RESOURCE_STATE_ON) { |
| 453 | device->power.state = i; | 482 | *state = i; |
| 454 | return 0; | 483 | return 0; |
| 455 | } | 484 | } |
| 456 | } | 485 | } |
| 457 | 486 | ||
| 458 | device->power.state = ACPI_STATE_D3; | 487 | *state = ACPI_STATE_D3; |
| 459 | |||
| 460 | return 0; | 488 | return 0; |
| 461 | } | 489 | } |
| 462 | 490 | ||
| 491 | int acpi_power_on_resources(struct acpi_device *device, int state) | ||
| 492 | { | ||
| 493 | if (!device || state < ACPI_STATE_D0 || state > ACPI_STATE_D3) | ||
| 494 | return -EINVAL; | ||
| 495 | |||
| 496 | return acpi_power_on_list(&device->power.states[state].resources); | ||
| 497 | } | ||
| 498 | |||
| 463 | int acpi_power_transition(struct acpi_device *device, int state) | 499 | int acpi_power_transition(struct acpi_device *device, int state) |
| 464 | { | 500 | { |
| 465 | int result = 0; | 501 | int result; |
| 466 | struct acpi_handle_list *cl = NULL; /* Current Resources */ | ||
| 467 | struct acpi_handle_list *tl = NULL; /* Target Resources */ | ||
| 468 | int i = 0; | ||
| 469 | 502 | ||
| 470 | if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) | 503 | if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) |
| 471 | return -EINVAL; | 504 | return -EINVAL; |
| @@ -477,37 +510,20 @@ int acpi_power_transition(struct acpi_device *device, int state) | |||
| 477 | || (device->power.state > ACPI_STATE_D3)) | 510 | || (device->power.state > ACPI_STATE_D3)) |
| 478 | return -ENODEV; | 511 | return -ENODEV; |
| 479 | 512 | ||
| 480 | cl = &device->power.states[device->power.state].resources; | ||
| 481 | tl = &device->power.states[state].resources; | ||
| 482 | |||
| 483 | /* TBD: Resources must be ordered. */ | 513 | /* TBD: Resources must be ordered. */ |
| 484 | 514 | ||
| 485 | /* | 515 | /* |
| 486 | * First we reference all power resources required in the target list | 516 | * First we reference all power resources required in the target list |
| 487 | * (e.g. so the device doesn't lose power while transitioning). | 517 | * (e.g. so the device doesn't lose power while transitioning). Then, |
| 518 | * we dereference all power resources used in the current list. | ||
| 488 | */ | 519 | */ |
| 489 | for (i = 0; i < tl->count; i++) { | 520 | result = acpi_power_on_list(&device->power.states[state].resources); |
| 490 | result = acpi_power_on(tl->handles[i]); | 521 | if (!result) |
| 491 | if (result) | 522 | acpi_power_off_list( |
| 492 | goto end; | 523 | &device->power.states[device->power.state].resources); |
| 493 | } | ||
| 494 | 524 | ||
| 495 | /* | 525 | /* We shouldn't change the state unless the above operations succeed. */ |
| 496 | * Then we dereference all power resources used in the current list. | 526 | device->power.state = result ? ACPI_STATE_UNKNOWN : state; |
| 497 | */ | ||
| 498 | for (i = 0; i < cl->count; i++) { | ||
| 499 | result = acpi_power_off_device(cl->handles[i]); | ||
| 500 | if (result) | ||
| 501 | goto end; | ||
| 502 | } | ||
| 503 | |||
| 504 | end: | ||
| 505 | if (result) | ||
| 506 | device->power.state = ACPI_STATE_UNKNOWN; | ||
| 507 | else { | ||
| 508 | /* We shouldn't change the state till all above operations succeed */ | ||
| 509 | device->power.state = state; | ||
| 510 | } | ||
| 511 | 527 | ||
| 512 | return result; | 528 | return result; |
| 513 | } | 529 | } |
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index afad67769db6..f5f986991b52 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c | |||
| @@ -311,7 +311,9 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) | |||
| 311 | dev->pnp.bus_id, | 311 | dev->pnp.bus_id, |
| 312 | (u32) dev->wakeup.sleep_state, | 312 | (u32) dev->wakeup.sleep_state, |
| 313 | dev->wakeup.flags.run_wake ? '*' : ' ', | 313 | dev->wakeup.flags.run_wake ? '*' : ' ', |
| 314 | dev->wakeup.state.enabled ? "enabled" : "disabled"); | 314 | (device_may_wakeup(&dev->dev) |
| 315 | || (ldev && device_may_wakeup(ldev))) ? | ||
| 316 | "enabled" : "disabled"); | ||
| 315 | if (ldev) | 317 | if (ldev) |
| 316 | seq_printf(seq, "%s:%s", | 318 | seq_printf(seq, "%s:%s", |
| 317 | ldev->bus ? ldev->bus->name : "no-bus", | 319 | ldev->bus ? ldev->bus->name : "no-bus", |
| @@ -328,8 +330,10 @@ static void physical_device_enable_wakeup(struct acpi_device *adev) | |||
| 328 | { | 330 | { |
| 329 | struct device *dev = acpi_get_physical_device(adev->handle); | 331 | struct device *dev = acpi_get_physical_device(adev->handle); |
| 330 | 332 | ||
| 331 | if (dev && device_can_wakeup(dev)) | 333 | if (dev && device_can_wakeup(dev)) { |
| 332 | device_set_wakeup_enable(dev, adev->wakeup.state.enabled); | 334 | bool enable = !device_may_wakeup(dev); |
| 335 | device_set_wakeup_enable(dev, enable); | ||
| 336 | } | ||
| 333 | } | 337 | } |
| 334 | 338 | ||
| 335 | static ssize_t | 339 | static ssize_t |
| @@ -341,7 +345,6 @@ acpi_system_write_wakeup_device(struct file *file, | |||
| 341 | char strbuf[5]; | 345 | char strbuf[5]; |
| 342 | char str[5] = ""; | 346 | char str[5] = ""; |
| 343 | unsigned int len = count; | 347 | unsigned int len = count; |
| 344 | struct acpi_device *found_dev = NULL; | ||
| 345 | 348 | ||
| 346 | if (len > 4) | 349 | if (len > 4) |
| 347 | len = 4; | 350 | len = 4; |
| @@ -361,33 +364,13 @@ acpi_system_write_wakeup_device(struct file *file, | |||
| 361 | continue; | 364 | continue; |
| 362 | 365 | ||
| 363 | if (!strncmp(dev->pnp.bus_id, str, 4)) { | 366 | if (!strncmp(dev->pnp.bus_id, str, 4)) { |
| 364 | dev->wakeup.state.enabled = | 367 | if (device_can_wakeup(&dev->dev)) { |
| 365 | dev->wakeup.state.enabled ? 0 : 1; | 368 | bool enable = !device_may_wakeup(&dev->dev); |
| 366 | found_dev = dev; | 369 | device_set_wakeup_enable(&dev->dev, enable); |
| 367 | break; | 370 | } else { |
| 368 | } | ||
| 369 | } | ||
| 370 | if (found_dev) { | ||
| 371 | physical_device_enable_wakeup(found_dev); | ||
| 372 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { | ||
| 373 | struct acpi_device *dev = container_of(node, | ||
| 374 | struct | ||
| 375 | acpi_device, | ||
| 376 | wakeup_list); | ||
| 377 | |||
| 378 | if ((dev != found_dev) && | ||
| 379 | (dev->wakeup.gpe_number == | ||
| 380 | found_dev->wakeup.gpe_number) | ||
| 381 | && (dev->wakeup.gpe_device == | ||
| 382 | found_dev->wakeup.gpe_device)) { | ||
| 383 | printk(KERN_WARNING | ||
| 384 | "ACPI: '%s' and '%s' have the same GPE, " | ||
| 385 | "can't disable/enable one separately\n", | ||
| 386 | dev->pnp.bus_id, found_dev->pnp.bus_id); | ||
| 387 | dev->wakeup.state.enabled = | ||
| 388 | found_dev->wakeup.state.enabled; | ||
| 389 | physical_device_enable_wakeup(dev); | 371 | physical_device_enable_wakeup(dev); |
| 390 | } | 372 | } |
| 373 | break; | ||
| 391 | } | 374 | } |
| 392 | } | 375 | } |
| 393 | mutex_unlock(&acpi_device_lock); | 376 | mutex_unlock(&acpi_device_lock); |
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 85e48047d7b0..360a74e6add0 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c | |||
| @@ -40,10 +40,6 @@ | |||
| 40 | #include <linux/pm.h> | 40 | #include <linux/pm.h> |
| 41 | #include <linux/cpufreq.h> | 41 | #include <linux/cpufreq.h> |
| 42 | #include <linux/cpu.h> | 42 | #include <linux/cpu.h> |
| 43 | #ifdef CONFIG_ACPI_PROCFS | ||
| 44 | #include <linux/proc_fs.h> | ||
| 45 | #include <linux/seq_file.h> | ||
| 46 | #endif | ||
| 47 | #include <linux/dmi.h> | 43 | #include <linux/dmi.h> |
| 48 | #include <linux/moduleparam.h> | 44 | #include <linux/moduleparam.h> |
| 49 | #include <linux/cpuidle.h> | 45 | #include <linux/cpuidle.h> |
| @@ -246,53 +242,6 @@ static int acpi_processor_errata(struct acpi_processor *pr) | |||
| 246 | return result; | 242 | return result; |
| 247 | } | 243 | } |
| 248 | 244 | ||
| 249 | #ifdef CONFIG_ACPI_PROCFS | ||
| 250 | static struct proc_dir_entry *acpi_processor_dir = NULL; | ||
| 251 | |||
| 252 | static int __cpuinit acpi_processor_add_fs(struct acpi_device *device) | ||
| 253 | { | ||
| 254 | struct proc_dir_entry *entry = NULL; | ||
| 255 | |||
| 256 | |||
| 257 | if (!acpi_device_dir(device)) { | ||
| 258 | acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), | ||
| 259 | acpi_processor_dir); | ||
| 260 | if (!acpi_device_dir(device)) | ||
| 261 | return -ENODEV; | ||
| 262 | } | ||
| 263 | |||
| 264 | /* 'throttling' [R/W] */ | ||
| 265 | entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING, | ||
| 266 | S_IFREG | S_IRUGO | S_IWUSR, | ||
| 267 | acpi_device_dir(device), | ||
| 268 | &acpi_processor_throttling_fops, | ||
| 269 | acpi_driver_data(device)); | ||
| 270 | if (!entry) | ||
| 271 | return -EIO; | ||
| 272 | return 0; | ||
| 273 | } | ||
| 274 | static int acpi_processor_remove_fs(struct acpi_device *device) | ||
| 275 | { | ||
| 276 | |||
| 277 | if (acpi_device_dir(device)) { | ||
| 278 | remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING, | ||
| 279 | acpi_device_dir(device)); | ||
| 280 | remove_proc_entry(acpi_device_bid(device), acpi_processor_dir); | ||
| 281 | acpi_device_dir(device) = NULL; | ||
| 282 | } | ||
| 283 | |||
| 284 | return 0; | ||
| 285 | } | ||
| 286 | #else | ||
| 287 | static inline int acpi_processor_add_fs(struct acpi_device *device) | ||
| 288 | { | ||
| 289 | return 0; | ||
| 290 | } | ||
| 291 | static inline int acpi_processor_remove_fs(struct acpi_device *device) | ||
| 292 | { | ||
| 293 | return 0; | ||
| 294 | } | ||
| 295 | #endif | ||
| 296 | /* -------------------------------------------------------------------------- | 245 | /* -------------------------------------------------------------------------- |
| 297 | Driver Interface | 246 | Driver Interface |
| 298 | -------------------------------------------------------------------------- */ | 247 | -------------------------------------------------------------------------- */ |
| @@ -478,8 +427,13 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, | |||
| 478 | if (action == CPU_ONLINE && pr) { | 427 | if (action == CPU_ONLINE && pr) { |
| 479 | acpi_processor_ppc_has_changed(pr, 0); | 428 | acpi_processor_ppc_has_changed(pr, 0); |
| 480 | acpi_processor_cst_has_changed(pr); | 429 | acpi_processor_cst_has_changed(pr); |
| 430 | acpi_processor_reevaluate_tstate(pr, action); | ||
| 481 | acpi_processor_tstate_has_changed(pr); | 431 | acpi_processor_tstate_has_changed(pr); |
| 482 | } | 432 | } |
| 433 | if (action == CPU_DEAD && pr) { | ||
| 434 | /* invalidate the flag.throttling after one CPU is offline */ | ||
| 435 | acpi_processor_reevaluate_tstate(pr, action); | ||
| 436 | } | ||
| 483 | return NOTIFY_OK; | 437 | return NOTIFY_OK; |
| 484 | } | 438 | } |
| 485 | 439 | ||
| @@ -537,14 +491,10 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) | |||
| 537 | 491 | ||
| 538 | per_cpu(processors, pr->id) = pr; | 492 | per_cpu(processors, pr->id) = pr; |
| 539 | 493 | ||
| 540 | result = acpi_processor_add_fs(device); | ||
| 541 | if (result) | ||
| 542 | goto err_free_cpumask; | ||
| 543 | |||
| 544 | sysdev = get_cpu_sysdev(pr->id); | 494 | sysdev = get_cpu_sysdev(pr->id); |
| 545 | if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) { | 495 | if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) { |
| 546 | result = -EFAULT; | 496 | result = -EFAULT; |
| 547 | goto err_remove_fs; | 497 | goto err_free_cpumask; |
| 548 | } | 498 | } |
| 549 | 499 | ||
| 550 | #ifdef CONFIG_CPU_FREQ | 500 | #ifdef CONFIG_CPU_FREQ |
| @@ -590,8 +540,6 @@ err_thermal_unregister: | |||
| 590 | thermal_cooling_device_unregister(pr->cdev); | 540 | thermal_cooling_device_unregister(pr->cdev); |
| 591 | err_power_exit: | 541 | err_power_exit: |
| 592 | acpi_processor_power_exit(pr, device); | 542 | acpi_processor_power_exit(pr, device); |
| 593 | err_remove_fs: | ||
| 594 | acpi_processor_remove_fs(device); | ||
| 595 | err_free_cpumask: | 543 | err_free_cpumask: |
| 596 | free_cpumask_var(pr->throttling.shared_cpu_map); | 544 | free_cpumask_var(pr->throttling.shared_cpu_map); |
| 597 | 545 | ||
| @@ -620,8 +568,6 @@ static int acpi_processor_remove(struct acpi_device *device, int type) | |||
| 620 | 568 | ||
| 621 | sysfs_remove_link(&device->dev.kobj, "sysdev"); | 569 | sysfs_remove_link(&device->dev.kobj, "sysdev"); |
| 622 | 570 | ||
| 623 | acpi_processor_remove_fs(device); | ||
| 624 | |||
| 625 | if (pr->cdev) { | 571 | if (pr->cdev) { |
| 626 | sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); | 572 | sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); |
| 627 | sysfs_remove_link(&pr->cdev->device.kobj, "device"); | 573 | sysfs_remove_link(&pr->cdev->device.kobj, "device"); |
| @@ -854,12 +800,6 @@ static int __init acpi_processor_init(void) | |||
| 854 | 800 | ||
| 855 | memset(&errata, 0, sizeof(errata)); | 801 | memset(&errata, 0, sizeof(errata)); |
| 856 | 802 | ||
| 857 | #ifdef CONFIG_ACPI_PROCFS | ||
| 858 | acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir); | ||
| 859 | if (!acpi_processor_dir) | ||
| 860 | return -ENOMEM; | ||
| 861 | #endif | ||
| 862 | |||
| 863 | if (!cpuidle_register_driver(&acpi_idle_driver)) { | 803 | if (!cpuidle_register_driver(&acpi_idle_driver)) { |
| 864 | printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", | 804 | printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", |
| 865 | acpi_idle_driver.name); | 805 | acpi_idle_driver.name); |
| @@ -885,10 +825,6 @@ static int __init acpi_processor_init(void) | |||
| 885 | out_cpuidle: | 825 | out_cpuidle: |
| 886 | cpuidle_unregister_driver(&acpi_idle_driver); | 826 | cpuidle_unregister_driver(&acpi_idle_driver); |
| 887 | 827 | ||
| 888 | #ifdef CONFIG_ACPI_PROCFS | ||
| 889 | remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); | ||
| 890 | #endif | ||
| 891 | |||
| 892 | return result; | 828 | return result; |
| 893 | } | 829 | } |
| 894 | 830 | ||
| @@ -907,10 +843,6 @@ static void __exit acpi_processor_exit(void) | |||
| 907 | 843 | ||
| 908 | cpuidle_unregister_driver(&acpi_idle_driver); | 844 | cpuidle_unregister_driver(&acpi_idle_driver); |
| 909 | 845 | ||
| 910 | #ifdef CONFIG_ACPI_PROCFS | ||
| 911 | remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); | ||
| 912 | #endif | ||
| 913 | |||
| 914 | return; | 846 | return; |
| 915 | } | 847 | } |
| 916 | 848 | ||
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index ff3632717c51..fa84e9744330 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
| @@ -32,10 +32,6 @@ | |||
| 32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
| 33 | #include <linux/sched.h> | 33 | #include <linux/sched.h> |
| 34 | #include <linux/cpufreq.h> | 34 | #include <linux/cpufreq.h> |
| 35 | #ifdef CONFIG_ACPI_PROCFS | ||
| 36 | #include <linux/proc_fs.h> | ||
| 37 | #include <linux/seq_file.h> | ||
| 38 | #endif | ||
| 39 | 35 | ||
| 40 | #include <asm/io.h> | 36 | #include <asm/io.h> |
| 41 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
| @@ -370,6 +366,58 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr) | |||
| 370 | } | 366 | } |
| 371 | 367 | ||
| 372 | /* | 368 | /* |
| 369 | * This function is used to reevaluate whether the T-state is valid | ||
| 370 | * after one CPU is onlined/offlined. | ||
| 371 | * It is noted that it won't reevaluate the following properties for | ||
| 372 | * the T-state. | ||
| 373 | * 1. Control method. | ||
| 374 | * 2. the number of supported T-state | ||
| 375 | * 3. TSD domain | ||
| 376 | */ | ||
| 377 | void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, | ||
| 378 | unsigned long action) | ||
| 379 | { | ||
| 380 | int result = 0; | ||
| 381 | |||
| 382 | if (action == CPU_DEAD) { | ||
| 383 | /* When one CPU is offline, the T-state throttling | ||
| 384 | * will be invalidated. | ||
| 385 | */ | ||
| 386 | pr->flags.throttling = 0; | ||
| 387 | return; | ||
| 388 | } | ||
| 389 | /* the following is to recheck whether the T-state is valid for | ||
| 390 | * the online CPU | ||
| 391 | */ | ||
| 392 | if (!pr->throttling.state_count) { | ||
| 393 | /* If the number of T-state is invalid, it is | ||
| 394 | * invalidated. | ||
| 395 | */ | ||
| 396 | pr->flags.throttling = 0; | ||
| 397 | return; | ||
| 398 | } | ||
| 399 | pr->flags.throttling = 1; | ||
| 400 | |||
| 401 | /* Disable throttling (if enabled). We'll let subsequent | ||
| 402 | * policy (e.g.thermal) decide to lower performance if it | ||
| 403 | * so chooses, but for now we'll crank up the speed. | ||
| 404 | */ | ||
| 405 | |||
| 406 | result = acpi_processor_get_throttling(pr); | ||
| 407 | if (result) | ||
| 408 | goto end; | ||
| 409 | |||
| 410 | if (pr->throttling.state) { | ||
| 411 | result = acpi_processor_set_throttling(pr, 0, false); | ||
| 412 | if (result) | ||
| 413 | goto end; | ||
| 414 | } | ||
| 415 | |||
| 416 | end: | ||
| 417 | if (result) | ||
| 418 | pr->flags.throttling = 0; | ||
| 419 | } | ||
| 420 | /* | ||
| 373 | * _PTC - Processor Throttling Control (and status) register location | 421 | * _PTC - Processor Throttling Control (and status) register location |
| 374 | */ | 422 | */ |
| 375 | static int acpi_processor_get_throttling_control(struct acpi_processor *pr) | 423 | static int acpi_processor_get_throttling_control(struct acpi_processor *pr) |
| @@ -876,7 +924,11 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr) | |||
| 876 | */ | 924 | */ |
| 877 | cpumask_copy(saved_mask, ¤t->cpus_allowed); | 925 | cpumask_copy(saved_mask, ¤t->cpus_allowed); |
| 878 | /* FIXME: use work_on_cpu() */ | 926 | /* FIXME: use work_on_cpu() */ |
| 879 | set_cpus_allowed_ptr(current, cpumask_of(pr->id)); | 927 | if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { |
| 928 | /* Can't migrate to the target pr->id CPU. Exit */ | ||
| 929 | free_cpumask_var(saved_mask); | ||
| 930 | return -ENODEV; | ||
| 931 | } | ||
| 880 | ret = pr->throttling.acpi_processor_get_throttling(pr); | 932 | ret = pr->throttling.acpi_processor_get_throttling(pr); |
| 881 | /* restore the previous state */ | 933 | /* restore the previous state */ |
| 882 | set_cpus_allowed_ptr(current, saved_mask); | 934 | set_cpus_allowed_ptr(current, saved_mask); |
| @@ -1051,6 +1103,14 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, | |||
| 1051 | return -ENOMEM; | 1103 | return -ENOMEM; |
| 1052 | } | 1104 | } |
| 1053 | 1105 | ||
| 1106 | if (cpu_is_offline(pr->id)) { | ||
| 1107 | /* | ||
| 1108 | * the cpu pointed by pr->id is offline. Unnecessary to change | ||
| 1109 | * the throttling state any more. | ||
| 1110 | */ | ||
| 1111 | return -ENODEV; | ||
| 1112 | } | ||
| 1113 | |||
| 1054 | cpumask_copy(saved_mask, ¤t->cpus_allowed); | 1114 | cpumask_copy(saved_mask, ¤t->cpus_allowed); |
| 1055 | t_state.target_state = state; | 1115 | t_state.target_state = state; |
| 1056 | p_throttling = &(pr->throttling); | 1116 | p_throttling = &(pr->throttling); |
| @@ -1074,7 +1134,11 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, | |||
| 1074 | */ | 1134 | */ |
| 1075 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { | 1135 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { |
| 1076 | /* FIXME: use work_on_cpu() */ | 1136 | /* FIXME: use work_on_cpu() */ |
| 1077 | set_cpus_allowed_ptr(current, cpumask_of(pr->id)); | 1137 | if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { |
| 1138 | /* Can't migrate to the pr->id CPU. Exit */ | ||
| 1139 | ret = -ENODEV; | ||
| 1140 | goto exit; | ||
| 1141 | } | ||
| 1078 | ret = p_throttling->acpi_processor_set_throttling(pr, | 1142 | ret = p_throttling->acpi_processor_set_throttling(pr, |
| 1079 | t_state.target_state, force); | 1143 | t_state.target_state, force); |
| 1080 | } else { | 1144 | } else { |
| @@ -1106,7 +1170,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, | |||
| 1106 | } | 1170 | } |
| 1107 | t_state.cpu = i; | 1171 | t_state.cpu = i; |
| 1108 | /* FIXME: use work_on_cpu() */ | 1172 | /* FIXME: use work_on_cpu() */ |
| 1109 | set_cpus_allowed_ptr(current, cpumask_of(i)); | 1173 | if (set_cpus_allowed_ptr(current, cpumask_of(i))) |
| 1174 | continue; | ||
| 1110 | ret = match_pr->throttling. | 1175 | ret = match_pr->throttling. |
| 1111 | acpi_processor_set_throttling( | 1176 | acpi_processor_set_throttling( |
| 1112 | match_pr, t_state.target_state, force); | 1177 | match_pr, t_state.target_state, force); |
| @@ -1126,6 +1191,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, | |||
| 1126 | /* restore the previous state */ | 1191 | /* restore the previous state */ |
| 1127 | /* FIXME: use work_on_cpu() */ | 1192 | /* FIXME: use work_on_cpu() */ |
| 1128 | set_cpus_allowed_ptr(current, saved_mask); | 1193 | set_cpus_allowed_ptr(current, saved_mask); |
| 1194 | exit: | ||
| 1129 | free_cpumask_var(online_throttling_cpus); | 1195 | free_cpumask_var(online_throttling_cpus); |
| 1130 | free_cpumask_var(saved_mask); | 1196 | free_cpumask_var(saved_mask); |
| 1131 | return ret; | 1197 | return ret; |
| @@ -1216,113 +1282,3 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) | |||
| 1216 | return result; | 1282 | return result; |
| 1217 | } | 1283 | } |
| 1218 | 1284 | ||
| 1219 | #ifdef CONFIG_ACPI_PROCFS | ||
| 1220 | /* proc interface */ | ||
| 1221 | static int acpi_processor_throttling_seq_show(struct seq_file *seq, | ||
| 1222 | void *offset) | ||
| 1223 | { | ||
| 1224 | struct acpi_processor *pr = seq->private; | ||
| 1225 | int i = 0; | ||
| 1226 | int result = 0; | ||
| 1227 | |||
| 1228 | if (!pr) | ||
| 1229 | goto end; | ||
| 1230 | |||
| 1231 | if (!(pr->throttling.state_count > 0)) { | ||
| 1232 | seq_puts(seq, "<not supported>\n"); | ||
| 1233 | goto end; | ||
| 1234 | } | ||
| 1235 | |||
| 1236 | result = acpi_processor_get_throttling(pr); | ||
| 1237 | |||
| 1238 | if (result) { | ||
| 1239 | seq_puts(seq, | ||
| 1240 | "Could not determine current throttling state.\n"); | ||
| 1241 | goto end; | ||
| 1242 | } | ||
| 1243 | |||
| 1244 | seq_printf(seq, "state count: %d\n" | ||
| 1245 | "active state: T%d\n" | ||
| 1246 | "state available: T%d to T%d\n", | ||
| 1247 | pr->throttling.state_count, pr->throttling.state, | ||
| 1248 | pr->throttling_platform_limit, | ||
| 1249 | pr->throttling.state_count - 1); | ||
| 1250 | |||
| 1251 | seq_puts(seq, "states:\n"); | ||
| 1252 | if (pr->throttling.acpi_processor_get_throttling == | ||
| 1253 | acpi_processor_get_throttling_fadt) { | ||
| 1254 | for (i = 0; i < pr->throttling.state_count; i++) | ||
| 1255 | seq_printf(seq, " %cT%d: %02d%%\n", | ||
| 1256 | (i == pr->throttling.state ? '*' : ' '), i, | ||
| 1257 | (pr->throttling.states[i].performance ? pr-> | ||
| 1258 | throttling.states[i].performance / 10 : 0)); | ||
| 1259 | } else { | ||
| 1260 | for (i = 0; i < pr->throttling.state_count; i++) | ||
| 1261 | seq_printf(seq, " %cT%d: %02d%%\n", | ||
| 1262 | (i == pr->throttling.state ? '*' : ' '), i, | ||
| 1263 | (int)pr->throttling.states_tss[i]. | ||
| 1264 | freqpercentage); | ||
| 1265 | } | ||
| 1266 | |||
| 1267 | end: | ||
| 1268 | return 0; | ||
| 1269 | } | ||
| 1270 | |||
| 1271 | static int acpi_processor_throttling_open_fs(struct inode *inode, | ||
| 1272 | struct file *file) | ||
| 1273 | { | ||
| 1274 | return single_open(file, acpi_processor_throttling_seq_show, | ||
| 1275 | PDE(inode)->data); | ||
| 1276 | } | ||
| 1277 | |||
| 1278 | static ssize_t acpi_processor_write_throttling(struct file *file, | ||
| 1279 | const char __user * buffer, | ||
| 1280 | size_t count, loff_t * data) | ||
| 1281 | { | ||
| 1282 | int result = 0; | ||
| 1283 | struct seq_file *m = file->private_data; | ||
| 1284 | struct acpi_processor *pr = m->private; | ||
| 1285 | char state_string[5] = ""; | ||
| 1286 | char *charp = NULL; | ||
| 1287 | size_t state_val = 0; | ||
| 1288 | char tmpbuf[5] = ""; | ||
| 1289 | |||
| 1290 | if (!pr || (count > sizeof(state_string) - 1)) | ||
| 1291 | return -EINVAL; | ||
| 1292 | |||
| 1293 | if (copy_from_user(state_string, buffer, count)) | ||
| 1294 | return -EFAULT; | ||
| 1295 | |||
| 1296 | state_string[count] = '\0'; | ||
| 1297 | if ((count > 0) && (state_string[count-1] == '\n')) | ||
| 1298 | state_string[count-1] = '\0'; | ||
| 1299 | |||
| 1300 | charp = state_string; | ||
| 1301 | if ((state_string[0] == 't') || (state_string[0] == 'T')) | ||
| 1302 | charp++; | ||
| 1303 | |||
| 1304 | state_val = simple_strtoul(charp, NULL, 0); | ||
| 1305 | if (state_val >= pr->throttling.state_count) | ||
| 1306 | return -EINVAL; | ||
| 1307 | |||
| 1308 | snprintf(tmpbuf, 5, "%zu", state_val); | ||
| 1309 | |||
| 1310 | if (strcmp(tmpbuf, charp) != 0) | ||
| 1311 | return -EINVAL; | ||
| 1312 | |||
| 1313 | result = acpi_processor_set_throttling(pr, state_val, false); | ||
| 1314 | if (result) | ||
| 1315 | return result; | ||
| 1316 | |||
| 1317 | return count; | ||
| 1318 | } | ||
| 1319 | |||
| 1320 | const struct file_operations acpi_processor_throttling_fops = { | ||
| 1321 | .owner = THIS_MODULE, | ||
| 1322 | .open = acpi_processor_throttling_open_fs, | ||
| 1323 | .read = seq_read, | ||
| 1324 | .write = acpi_processor_write_throttling, | ||
| 1325 | .llseek = seq_lseek, | ||
| 1326 | .release = single_release, | ||
| 1327 | }; | ||
| 1328 | #endif | ||
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index e5dbedb16bbf..51ae3794ec7f 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c | |||
| @@ -484,6 +484,8 @@ acpi_sbs_add_fs(struct proc_dir_entry **dir, | |||
| 484 | const struct file_operations *state_fops, | 484 | const struct file_operations *state_fops, |
| 485 | const struct file_operations *alarm_fops, void *data) | 485 | const struct file_operations *alarm_fops, void *data) |
| 486 | { | 486 | { |
| 487 | printk(KERN_WARNING PREFIX "Deprecated procfs I/F for SBS is loaded," | ||
| 488 | " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); | ||
| 487 | if (!*dir) { | 489 | if (!*dir) { |
| 488 | *dir = proc_mkdir(dir_name, parent_dir); | 490 | *dir = proc_mkdir(dir_name, parent_dir); |
| 489 | if (!*dir) { | 491 | if (!*dir) { |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 29ef505c487b..b99e62494607 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
| @@ -778,7 +778,7 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle, | |||
| 778 | wakeup->resources.handles[i] = element->reference.handle; | 778 | wakeup->resources.handles[i] = element->reference.handle; |
| 779 | } | 779 | } |
| 780 | 780 | ||
| 781 | acpi_gpe_can_wake(wakeup->gpe_device, wakeup->gpe_number); | 781 | acpi_setup_gpe_for_wake(handle, wakeup->gpe_device, wakeup->gpe_number); |
| 782 | 782 | ||
| 783 | out: | 783 | out: |
| 784 | kfree(buffer.pointer); | 784 | kfree(buffer.pointer); |
| @@ -803,7 +803,7 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device) | |||
| 803 | /* Power button, Lid switch always enable wakeup */ | 803 | /* Power button, Lid switch always enable wakeup */ |
| 804 | if (!acpi_match_device_ids(device, button_device_ids)) { | 804 | if (!acpi_match_device_ids(device, button_device_ids)) { |
| 805 | device->wakeup.flags.run_wake = 1; | 805 | device->wakeup.flags.run_wake = 1; |
| 806 | device->wakeup.flags.always_enabled = 1; | 806 | device_set_wakeup_capable(&device->dev, true); |
| 807 | return; | 807 | return; |
| 808 | } | 808 | } |
| 809 | 809 | ||
| @@ -815,16 +815,22 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device) | |||
| 815 | !!(event_status & ACPI_EVENT_FLAG_HANDLE); | 815 | !!(event_status & ACPI_EVENT_FLAG_HANDLE); |
| 816 | } | 816 | } |
| 817 | 817 | ||
| 818 | static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) | 818 | static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device) |
| 819 | { | 819 | { |
| 820 | acpi_handle temp; | ||
| 820 | acpi_status status = 0; | 821 | acpi_status status = 0; |
| 821 | int psw_error; | 822 | int psw_error; |
| 822 | 823 | ||
| 824 | /* Presence of _PRW indicates wake capable */ | ||
| 825 | status = acpi_get_handle(device->handle, "_PRW", &temp); | ||
| 826 | if (ACPI_FAILURE(status)) | ||
| 827 | return; | ||
| 828 | |||
| 823 | status = acpi_bus_extract_wakeup_device_power_package(device->handle, | 829 | status = acpi_bus_extract_wakeup_device_power_package(device->handle, |
| 824 | &device->wakeup); | 830 | &device->wakeup); |
| 825 | if (ACPI_FAILURE(status)) { | 831 | if (ACPI_FAILURE(status)) { |
| 826 | ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package")); | 832 | ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package")); |
| 827 | goto end; | 833 | return; |
| 828 | } | 834 | } |
| 829 | 835 | ||
| 830 | device->wakeup.flags.valid = 1; | 836 | device->wakeup.flags.valid = 1; |
| @@ -840,13 +846,10 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) | |||
| 840 | if (psw_error) | 846 | if (psw_error) |
| 841 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 847 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
| 842 | "error in _DSW or _PSW evaluation\n")); | 848 | "error in _DSW or _PSW evaluation\n")); |
| 843 | |||
| 844 | end: | ||
| 845 | if (ACPI_FAILURE(status)) | ||
| 846 | device->flags.wake_capable = 0; | ||
| 847 | return 0; | ||
| 848 | } | 849 | } |
| 849 | 850 | ||
| 851 | static void acpi_bus_add_power_resource(acpi_handle handle); | ||
| 852 | |||
| 850 | static int acpi_bus_get_power_flags(struct acpi_device *device) | 853 | static int acpi_bus_get_power_flags(struct acpi_device *device) |
| 851 | { | 854 | { |
| 852 | acpi_status status = 0; | 855 | acpi_status status = 0; |
| @@ -875,8 +878,12 @@ static int acpi_bus_get_power_flags(struct acpi_device *device) | |||
| 875 | acpi_evaluate_reference(device->handle, object_name, NULL, | 878 | acpi_evaluate_reference(device->handle, object_name, NULL, |
| 876 | &ps->resources); | 879 | &ps->resources); |
| 877 | if (ps->resources.count) { | 880 | if (ps->resources.count) { |
| 881 | int j; | ||
| 882 | |||
| 878 | device->power.flags.power_resources = 1; | 883 | device->power.flags.power_resources = 1; |
| 879 | ps->flags.valid = 1; | 884 | ps->flags.valid = 1; |
| 885 | for (j = 0; j < ps->resources.count; j++) | ||
| 886 | acpi_bus_add_power_resource(ps->resources.handles[j]); | ||
| 880 | } | 887 | } |
| 881 | 888 | ||
| 882 | /* Evaluate "_PSx" to see if we can do explicit sets */ | 889 | /* Evaluate "_PSx" to see if we can do explicit sets */ |
| @@ -901,10 +908,7 @@ static int acpi_bus_get_power_flags(struct acpi_device *device) | |||
| 901 | device->power.states[ACPI_STATE_D3].flags.valid = 1; | 908 | device->power.states[ACPI_STATE_D3].flags.valid = 1; |
| 902 | device->power.states[ACPI_STATE_D3].power = 0; | 909 | device->power.states[ACPI_STATE_D3].power = 0; |
| 903 | 910 | ||
| 904 | /* TBD: System wake support and resource requirements. */ | 911 | acpi_bus_init_power(device); |
| 905 | |||
| 906 | device->power.state = ACPI_STATE_UNKNOWN; | ||
| 907 | acpi_bus_get_power(device->handle, &(device->power.state)); | ||
| 908 | 912 | ||
| 909 | return 0; | 913 | return 0; |
| 910 | } | 914 | } |
| @@ -947,11 +951,6 @@ static int acpi_bus_get_flags(struct acpi_device *device) | |||
| 947 | if (ACPI_SUCCESS(status)) | 951 | if (ACPI_SUCCESS(status)) |
| 948 | device->flags.power_manageable = 1; | 952 | device->flags.power_manageable = 1; |
| 949 | 953 | ||
| 950 | /* Presence of _PRW indicates wake capable */ | ||
| 951 | status = acpi_get_handle(device->handle, "_PRW", &temp); | ||
| 952 | if (ACPI_SUCCESS(status)) | ||
| 953 | device->flags.wake_capable = 1; | ||
| 954 | |||
| 955 | /* TBD: Performance management */ | 954 | /* TBD: Performance management */ |
| 956 | 955 | ||
| 957 | return 0; | 956 | return 0; |
| @@ -1278,11 +1277,7 @@ static int acpi_add_single_object(struct acpi_device **child, | |||
| 1278 | * Wakeup device management | 1277 | * Wakeup device management |
| 1279 | *----------------------- | 1278 | *----------------------- |
| 1280 | */ | 1279 | */ |
| 1281 | if (device->flags.wake_capable) { | 1280 | acpi_bus_get_wakeup_device_flags(device); |
| 1282 | result = acpi_bus_get_wakeup_device_flags(device); | ||
| 1283 | if (result) | ||
| 1284 | goto end; | ||
| 1285 | } | ||
| 1286 | 1281 | ||
| 1287 | /* | 1282 | /* |
| 1288 | * Performance Management | 1283 | * Performance Management |
| @@ -1326,6 +1321,20 @@ end: | |||
| 1326 | #define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \ | 1321 | #define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \ |
| 1327 | ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING) | 1322 | ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING) |
| 1328 | 1323 | ||
| 1324 | static void acpi_bus_add_power_resource(acpi_handle handle) | ||
| 1325 | { | ||
| 1326 | struct acpi_bus_ops ops = { | ||
| 1327 | .acpi_op_add = 1, | ||
| 1328 | .acpi_op_start = 1, | ||
| 1329 | }; | ||
| 1330 | struct acpi_device *device = NULL; | ||
| 1331 | |||
| 1332 | acpi_bus_get_device(handle, &device); | ||
| 1333 | if (!device) | ||
| 1334 | acpi_add_single_object(&device, handle, ACPI_BUS_TYPE_POWER, | ||
| 1335 | ACPI_STA_DEFAULT, &ops); | ||
| 1336 | } | ||
| 1337 | |||
| 1329 | static int acpi_bus_type_and_status(acpi_handle handle, int *type, | 1338 | static int acpi_bus_type_and_status(acpi_handle handle, int *type, |
| 1330 | unsigned long long *sta) | 1339 | unsigned long long *sta) |
| 1331 | { | 1340 | { |
| @@ -1371,7 +1380,6 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl, | |||
| 1371 | struct acpi_bus_ops *ops = context; | 1380 | struct acpi_bus_ops *ops = context; |
| 1372 | int type; | 1381 | int type; |
| 1373 | unsigned long long sta; | 1382 | unsigned long long sta; |
| 1374 | struct acpi_device_wakeup wakeup; | ||
| 1375 | struct acpi_device *device; | 1383 | struct acpi_device *device; |
| 1376 | acpi_status status; | 1384 | acpi_status status; |
| 1377 | int result; | 1385 | int result; |
| @@ -1382,7 +1390,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl, | |||
| 1382 | 1390 | ||
| 1383 | if (!(sta & ACPI_STA_DEVICE_PRESENT) && | 1391 | if (!(sta & ACPI_STA_DEVICE_PRESENT) && |
| 1384 | !(sta & ACPI_STA_DEVICE_FUNCTIONING)) { | 1392 | !(sta & ACPI_STA_DEVICE_FUNCTIONING)) { |
| 1385 | acpi_bus_extract_wakeup_device_power_package(handle, &wakeup); | 1393 | struct acpi_device_wakeup wakeup; |
| 1394 | acpi_handle temp; | ||
| 1395 | |||
| 1396 | status = acpi_get_handle(handle, "_PRW", &temp); | ||
| 1397 | if (ACPI_SUCCESS(status)) | ||
| 1398 | acpi_bus_extract_wakeup_device_power_package(handle, | ||
| 1399 | &wakeup); | ||
| 1386 | return AE_CTRL_DEPTH; | 1400 | return AE_CTRL_DEPTH; |
| 1387 | } | 1401 | } |
| 1388 | 1402 | ||
| @@ -1467,7 +1481,7 @@ int acpi_bus_start(struct acpi_device *device) | |||
| 1467 | 1481 | ||
| 1468 | result = acpi_bus_scan(device->handle, &ops, NULL); | 1482 | result = acpi_bus_scan(device->handle, &ops, NULL); |
| 1469 | 1483 | ||
| 1470 | acpi_update_gpes(); | 1484 | acpi_update_all_gpes(); |
| 1471 | 1485 | ||
| 1472 | return result; | 1486 | return result; |
| 1473 | } | 1487 | } |
| @@ -1573,6 +1587,8 @@ int __init acpi_scan_init(void) | |||
| 1573 | printk(KERN_ERR PREFIX "Could not register bus type\n"); | 1587 | printk(KERN_ERR PREFIX "Could not register bus type\n"); |
| 1574 | } | 1588 | } |
| 1575 | 1589 | ||
| 1590 | acpi_power_init(); | ||
| 1591 | |||
| 1576 | /* | 1592 | /* |
| 1577 | * Enumerate devices in the ACPI namespace. | 1593 | * Enumerate devices in the ACPI namespace. |
| 1578 | */ | 1594 | */ |
| @@ -1584,7 +1600,7 @@ int __init acpi_scan_init(void) | |||
| 1584 | if (result) | 1600 | if (result) |
| 1585 | acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL); | 1601 | acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL); |
| 1586 | else | 1602 | else |
| 1587 | acpi_update_gpes(); | 1603 | acpi_update_all_gpes(); |
| 1588 | 1604 | ||
| 1589 | return result; | 1605 | return result; |
| 1590 | } | 1606 | } |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index c423231b952b..fdd3aeeb6def 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
| @@ -124,8 +124,7 @@ static int acpi_pm_freeze(void) | |||
| 124 | static int acpi_pm_pre_suspend(void) | 124 | static int acpi_pm_pre_suspend(void) |
| 125 | { | 125 | { |
| 126 | acpi_pm_freeze(); | 126 | acpi_pm_freeze(); |
| 127 | suspend_nvs_save(); | 127 | return suspend_nvs_save(); |
| 128 | return 0; | ||
| 129 | } | 128 | } |
| 130 | 129 | ||
| 131 | /** | 130 | /** |
| @@ -151,7 +150,7 @@ static int acpi_pm_prepare(void) | |||
| 151 | { | 150 | { |
| 152 | int error = __acpi_pm_prepare(); | 151 | int error = __acpi_pm_prepare(); |
| 153 | if (!error) | 152 | if (!error) |
| 154 | acpi_pm_pre_suspend(); | 153 | error = acpi_pm_pre_suspend(); |
| 155 | 154 | ||
| 156 | return error; | 155 | return error; |
| 157 | } | 156 | } |
| @@ -435,6 +434,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | |||
| 435 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), | 434 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), |
| 436 | }, | 435 | }, |
| 437 | }, | 436 | }, |
| 437 | { | ||
| 438 | .callback = init_nvs_nosave, | ||
| 439 | .ident = "Averatec AV1020-ED2", | ||
| 440 | .matches = { | ||
| 441 | DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), | ||
| 442 | DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), | ||
| 443 | }, | ||
| 444 | }, | ||
| 438 | {}, | 445 | {}, |
| 439 | }; | 446 | }; |
| 440 | #endif /* CONFIG_SUSPEND */ | 447 | #endif /* CONFIG_SUSPEND */ |
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index f8588f81048a..61891e75583d 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c | |||
| @@ -438,7 +438,7 @@ static void delete_gpe_attr_array(void) | |||
| 438 | return; | 438 | return; |
| 439 | } | 439 | } |
| 440 | 440 | ||
| 441 | void acpi_os_gpe_count(u32 gpe_number) | 441 | static void gpe_count(u32 gpe_number) |
| 442 | { | 442 | { |
| 443 | acpi_gpe_count++; | 443 | acpi_gpe_count++; |
| 444 | 444 | ||
| @@ -454,7 +454,7 @@ void acpi_os_gpe_count(u32 gpe_number) | |||
| 454 | return; | 454 | return; |
| 455 | } | 455 | } |
| 456 | 456 | ||
| 457 | void acpi_os_fixed_event_count(u32 event_number) | 457 | static void fixed_event_count(u32 event_number) |
| 458 | { | 458 | { |
| 459 | if (!all_counters) | 459 | if (!all_counters) |
| 460 | return; | 460 | return; |
| @@ -468,6 +468,16 @@ void acpi_os_fixed_event_count(u32 event_number) | |||
| 468 | return; | 468 | return; |
| 469 | } | 469 | } |
| 470 | 470 | ||
| 471 | static void acpi_gbl_event_handler(u32 event_type, acpi_handle device, | ||
| 472 | u32 event_number, void *context) | ||
| 473 | { | ||
| 474 | if (event_type == ACPI_EVENT_TYPE_GPE) | ||
| 475 | gpe_count(event_number); | ||
| 476 | |||
| 477 | if (event_type == ACPI_EVENT_TYPE_FIXED) | ||
| 478 | fixed_event_count(event_number); | ||
| 479 | } | ||
| 480 | |||
| 471 | static int get_status(u32 index, acpi_event_status *status, | 481 | static int get_status(u32 index, acpi_event_status *status, |
| 472 | acpi_handle *handle) | 482 | acpi_handle *handle) |
| 473 | { | 483 | { |
| @@ -601,6 +611,7 @@ end: | |||
| 601 | 611 | ||
| 602 | void acpi_irq_stats_init(void) | 612 | void acpi_irq_stats_init(void) |
| 603 | { | 613 | { |
| 614 | acpi_status status; | ||
| 604 | int i; | 615 | int i; |
| 605 | 616 | ||
| 606 | if (all_counters) | 617 | if (all_counters) |
| @@ -619,6 +630,10 @@ void acpi_irq_stats_init(void) | |||
| 619 | if (all_counters == NULL) | 630 | if (all_counters == NULL) |
| 620 | goto fail; | 631 | goto fail; |
| 621 | 632 | ||
| 633 | status = acpi_install_global_event_handler(acpi_gbl_event_handler, NULL); | ||
| 634 | if (ACPI_FAILURE(status)) | ||
| 635 | goto fail; | ||
| 636 | |||
| 622 | counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters), | 637 | counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters), |
| 623 | GFP_KERNEL); | 638 | GFP_KERNEL); |
| 624 | if (counter_attrs == NULL) | 639 | if (counter_attrs == NULL) |
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 5a27b0a31315..2607e17b520f 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c | |||
| @@ -1059,8 +1059,9 @@ static int acpi_thermal_resume(struct acpi_device *device) | |||
| 1059 | break; | 1059 | break; |
| 1060 | tz->trips.active[i].flags.enabled = 1; | 1060 | tz->trips.active[i].flags.enabled = 1; |
| 1061 | for (j = 0; j < tz->trips.active[i].devices.count; j++) { | 1061 | for (j = 0; j < tz->trips.active[i].devices.count; j++) { |
| 1062 | result = acpi_bus_get_power(tz->trips.active[i].devices. | 1062 | result = acpi_bus_update_power( |
| 1063 | handles[j], &power_state); | 1063 | tz->trips.active[i].devices.handles[j], |
| 1064 | &power_state); | ||
| 1064 | if (result || (power_state != ACPI_STATE_D0)) { | 1065 | if (result || (power_state != ACPI_STATE_D0)) { |
| 1065 | tz->trips.active[i].flags.enabled = 0; | 1066 | tz->trips.active[i].flags.enabled = 0; |
| 1066 | break; | 1067 | break; |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 15a0fde4b32a..90f8f7676d1f 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
| @@ -33,7 +33,6 @@ | |||
| 33 | #include <linux/input.h> | 33 | #include <linux/input.h> |
| 34 | #include <linux/backlight.h> | 34 | #include <linux/backlight.h> |
| 35 | #include <linux/thermal.h> | 35 | #include <linux/thermal.h> |
| 36 | #include <linux/video_output.h> | ||
| 37 | #include <linux/sort.h> | 36 | #include <linux/sort.h> |
| 38 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
| 39 | #include <linux/pci_ids.h> | 38 | #include <linux/pci_ids.h> |
| @@ -81,6 +80,13 @@ module_param(brightness_switch_enabled, bool, 0644); | |||
| 81 | static int allow_duplicates; | 80 | static int allow_duplicates; |
| 82 | module_param(allow_duplicates, bool, 0644); | 81 | module_param(allow_duplicates, bool, 0644); |
| 83 | 82 | ||
| 83 | /* | ||
| 84 | * Some BIOSes claim they use minimum backlight at boot, | ||
| 85 | * and this may bring dimming screen after boot | ||
| 86 | */ | ||
| 87 | static int use_bios_initial_backlight = 1; | ||
| 88 | module_param(use_bios_initial_backlight, bool, 0644); | ||
| 89 | |||
| 84 | static int register_count = 0; | 90 | static int register_count = 0; |
| 85 | static int acpi_video_bus_add(struct acpi_device *device); | 91 | static int acpi_video_bus_add(struct acpi_device *device); |
| 86 | static int acpi_video_bus_remove(struct acpi_device *device, int type); | 92 | static int acpi_video_bus_remove(struct acpi_device *device, int type); |
| @@ -172,9 +178,6 @@ struct acpi_video_device_cap { | |||
| 172 | u8 _BQC:1; /* Get current brightness level */ | 178 | u8 _BQC:1; /* Get current brightness level */ |
| 173 | u8 _BCQ:1; /* Some buggy BIOS uses _BCQ instead of _BQC */ | 179 | u8 _BCQ:1; /* Some buggy BIOS uses _BCQ instead of _BQC */ |
| 174 | u8 _DDC:1; /*Return the EDID for this device */ | 180 | u8 _DDC:1; /*Return the EDID for this device */ |
| 175 | u8 _DCS:1; /*Return status of output device */ | ||
| 176 | u8 _DGS:1; /*Query graphics state */ | ||
| 177 | u8 _DSS:1; /*Device state set */ | ||
| 178 | }; | 181 | }; |
| 179 | 182 | ||
| 180 | struct acpi_video_brightness_flags { | 183 | struct acpi_video_brightness_flags { |
| @@ -202,7 +205,6 @@ struct acpi_video_device { | |||
| 202 | struct acpi_video_device_brightness *brightness; | 205 | struct acpi_video_device_brightness *brightness; |
| 203 | struct backlight_device *backlight; | 206 | struct backlight_device *backlight; |
| 204 | struct thermal_cooling_device *cooling_dev; | 207 | struct thermal_cooling_device *cooling_dev; |
| 205 | struct output_device *output_dev; | ||
| 206 | }; | 208 | }; |
| 207 | 209 | ||
| 208 | static const char device_decode[][30] = { | 210 | static const char device_decode[][30] = { |
| @@ -226,10 +228,6 @@ static int acpi_video_get_next_level(struct acpi_video_device *device, | |||
| 226 | u32 level_current, u32 event); | 228 | u32 level_current, u32 event); |
| 227 | static int acpi_video_switch_brightness(struct acpi_video_device *device, | 229 | static int acpi_video_switch_brightness(struct acpi_video_device *device, |
| 228 | int event); | 230 | int event); |
| 229 | static int acpi_video_device_get_state(struct acpi_video_device *device, | ||
| 230 | unsigned long long *state); | ||
| 231 | static int acpi_video_output_get(struct output_device *od); | ||
| 232 | static int acpi_video_device_set_state(struct acpi_video_device *device, int state); | ||
| 233 | 231 | ||
| 234 | /*backlight device sysfs support*/ | 232 | /*backlight device sysfs support*/ |
| 235 | static int acpi_video_get_brightness(struct backlight_device *bd) | 233 | static int acpi_video_get_brightness(struct backlight_device *bd) |
| @@ -265,30 +263,6 @@ static const struct backlight_ops acpi_backlight_ops = { | |||
| 265 | .update_status = acpi_video_set_brightness, | 263 | .update_status = acpi_video_set_brightness, |
| 266 | }; | 264 | }; |
| 267 | 265 | ||
| 268 | /*video output device sysfs support*/ | ||
| 269 | static int acpi_video_output_get(struct output_device *od) | ||
| 270 | { | ||
| 271 | unsigned long long state; | ||
| 272 | struct acpi_video_device *vd = | ||
| 273 | (struct acpi_video_device *)dev_get_drvdata(&od->dev); | ||
| 274 | acpi_video_device_get_state(vd, &state); | ||
| 275 | return (int)state; | ||
| 276 | } | ||
| 277 | |||
| 278 | static int acpi_video_output_set(struct output_device *od) | ||
| 279 | { | ||
| 280 | unsigned long state = od->request_state; | ||
| 281 | struct acpi_video_device *vd= | ||
| 282 | (struct acpi_video_device *)dev_get_drvdata(&od->dev); | ||
| 283 | return acpi_video_device_set_state(vd, state); | ||
| 284 | } | ||
| 285 | |||
| 286 | static struct output_properties acpi_output_properties = { | ||
| 287 | .set_state = acpi_video_output_set, | ||
| 288 | .get_status = acpi_video_output_get, | ||
| 289 | }; | ||
| 290 | |||
| 291 | |||
| 292 | /* thermal cooling device callbacks */ | 266 | /* thermal cooling device callbacks */ |
| 293 | static int video_get_max_state(struct thermal_cooling_device *cooling_dev, unsigned | 267 | static int video_get_max_state(struct thermal_cooling_device *cooling_dev, unsigned |
| 294 | long *state) | 268 | long *state) |
| @@ -344,34 +318,6 @@ static struct thermal_cooling_device_ops video_cooling_ops = { | |||
| 344 | Video Management | 318 | Video Management |
| 345 | -------------------------------------------------------------------------- */ | 319 | -------------------------------------------------------------------------- */ |
| 346 | 320 | ||
| 347 | /* device */ | ||
| 348 | |||
| 349 | static int | ||
| 350 | acpi_video_device_get_state(struct acpi_video_device *device, | ||
| 351 | unsigned long long *state) | ||
| 352 | { | ||
| 353 | int status; | ||
| 354 | |||
| 355 | status = acpi_evaluate_integer(device->dev->handle, "_DCS", NULL, state); | ||
| 356 | |||
| 357 | return status; | ||
| 358 | } | ||
| 359 | |||
| 360 | static int | ||
| 361 | acpi_video_device_set_state(struct acpi_video_device *device, int state) | ||
| 362 | { | ||
| 363 | int status; | ||
| 364 | union acpi_object arg0 = { ACPI_TYPE_INTEGER }; | ||
| 365 | struct acpi_object_list args = { 1, &arg0 }; | ||
| 366 | unsigned long long ret; | ||
| 367 | |||
| 368 | |||
| 369 | arg0.integer.value = state; | ||
| 370 | status = acpi_evaluate_integer(device->dev->handle, "_DSS", &args, &ret); | ||
| 371 | |||
| 372 | return status; | ||
| 373 | } | ||
| 374 | |||
| 375 | static int | 321 | static int |
| 376 | acpi_video_device_lcd_query_levels(struct acpi_video_device *device, | 322 | acpi_video_device_lcd_query_levels(struct acpi_video_device *device, |
| 377 | union acpi_object **levels) | 323 | union acpi_object **levels) |
| @@ -766,9 +712,11 @@ acpi_video_init_brightness(struct acpi_video_device *device) | |||
| 766 | * when invoked for the first time, i.e. level_old is invalid. | 712 | * when invoked for the first time, i.e. level_old is invalid. |
| 767 | * set the backlight to max_level in this case | 713 | * set the backlight to max_level in this case |
| 768 | */ | 714 | */ |
| 769 | for (i = 2; i < br->count; i++) | 715 | if (use_bios_initial_backlight) { |
| 770 | if (level_old == br->levels[i]) | 716 | for (i = 2; i < br->count; i++) |
| 771 | level = level_old; | 717 | if (level_old == br->levels[i]) |
| 718 | level = level_old; | ||
| 719 | } | ||
| 772 | goto set_level; | 720 | goto set_level; |
| 773 | } | 721 | } |
| 774 | 722 | ||
| @@ -831,15 +779,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
| 831 | if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) { | 779 | if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) { |
| 832 | device->cap._DDC = 1; | 780 | device->cap._DDC = 1; |
| 833 | } | 781 | } |
| 834 | if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DCS", &h_dummy1))) { | ||
| 835 | device->cap._DCS = 1; | ||
| 836 | } | ||
| 837 | if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DGS", &h_dummy1))) { | ||
| 838 | device->cap._DGS = 1; | ||
| 839 | } | ||
| 840 | if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DSS", &h_dummy1))) { | ||
| 841 | device->cap._DSS = 1; | ||
| 842 | } | ||
| 843 | 782 | ||
| 844 | if (acpi_video_backlight_support()) { | 783 | if (acpi_video_backlight_support()) { |
| 845 | struct backlight_properties props; | 784 | struct backlight_properties props; |
| @@ -904,21 +843,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
| 904 | printk(KERN_ERR PREFIX "Create sysfs link\n"); | 843 | printk(KERN_ERR PREFIX "Create sysfs link\n"); |
| 905 | 844 | ||
| 906 | } | 845 | } |
| 907 | |||
| 908 | if (acpi_video_display_switch_support()) { | ||
| 909 | |||
| 910 | if (device->cap._DCS && device->cap._DSS) { | ||
| 911 | static int count; | ||
| 912 | char *name; | ||
| 913 | name = kasprintf(GFP_KERNEL, "acpi_video%d", count); | ||
| 914 | if (!name) | ||
| 915 | return; | ||
| 916 | count++; | ||
| 917 | device->output_dev = video_output_register(name, | ||
| 918 | NULL, device, &acpi_output_properties); | ||
| 919 | kfree(name); | ||
| 920 | } | ||
| 921 | } | ||
| 922 | } | 846 | } |
| 923 | 847 | ||
| 924 | /* | 848 | /* |
| @@ -1360,6 +1284,9 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id, | |||
| 1360 | if (!video_device) | 1284 | if (!video_device) |
| 1361 | continue; | 1285 | continue; |
| 1362 | 1286 | ||
| 1287 | if (!video_device->cap._DDC) | ||
| 1288 | continue; | ||
| 1289 | |||
| 1363 | if (type) { | 1290 | if (type) { |
| 1364 | switch (type) { | 1291 | switch (type) { |
| 1365 | case ACPI_VIDEO_DISPLAY_CRT: | 1292 | case ACPI_VIDEO_DISPLAY_CRT: |
| @@ -1452,7 +1379,6 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device) | |||
| 1452 | thermal_cooling_device_unregister(device->cooling_dev); | 1379 | thermal_cooling_device_unregister(device->cooling_dev); |
| 1453 | device->cooling_dev = NULL; | 1380 | device->cooling_dev = NULL; |
| 1454 | } | 1381 | } |
| 1455 | video_output_unregister(device->output_dev); | ||
| 1456 | 1382 | ||
| 1457 | return 0; | 1383 | return 0; |
| 1458 | } | 1384 | } |
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index b83676126598..42d3d72dae85 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c | |||
| @@ -17,15 +17,14 @@ | |||
| 17 | * capabilities the graphics cards plugged in support. The check for general | 17 | * capabilities the graphics cards plugged in support. The check for general |
| 18 | * video capabilities will be triggered by the first caller of | 18 | * video capabilities will be triggered by the first caller of |
| 19 | * acpi_video_get_capabilities(NULL); which will happen when the first | 19 | * acpi_video_get_capabilities(NULL); which will happen when the first |
| 20 | * backlight (or display output) switching supporting driver calls: | 20 | * backlight switching supporting driver calls: |
| 21 | * acpi_video_backlight_support(); | 21 | * acpi_video_backlight_support(); |
| 22 | * | 22 | * |
| 23 | * Depending on whether ACPI graphics extensions (cmp. ACPI spec Appendix B) | 23 | * Depending on whether ACPI graphics extensions (cmp. ACPI spec Appendix B) |
| 24 | * are available, video.ko should be used to handle the device. | 24 | * are available, video.ko should be used to handle the device. |
| 25 | * | 25 | * |
| 26 | * Otherwise vendor specific drivers like thinkpad_acpi, asus_acpi, | 26 | * Otherwise vendor specific drivers like thinkpad_acpi, asus_acpi, |
| 27 | * sony_acpi,... can take care about backlight brightness and display output | 27 | * sony_acpi,... can take care about backlight brightness. |
| 28 | * switching. | ||
| 29 | * | 28 | * |
| 30 | * If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m) | 29 | * If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m) |
| 31 | * this file will not be compiled, acpi_video_get_capabilities() and | 30 | * this file will not be compiled, acpi_video_get_capabilities() and |
| @@ -83,11 +82,6 @@ long acpi_is_video_device(struct acpi_device *device) | |||
| 83 | if (!device) | 82 | if (!device) |
| 84 | return 0; | 83 | return 0; |
| 85 | 84 | ||
| 86 | /* Is this device able to support video switching ? */ | ||
| 87 | if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) || | ||
| 88 | ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy))) | ||
| 89 | video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING; | ||
| 90 | |||
| 91 | /* Is this device able to retrieve a video ROM ? */ | 85 | /* Is this device able to retrieve a video ROM ? */ |
| 92 | if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy))) | 86 | if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy))) |
| 93 | video_caps |= ACPI_VIDEO_ROM_AVAILABLE; | 87 | video_caps |= ACPI_VIDEO_ROM_AVAILABLE; |
| @@ -161,8 +155,6 @@ long acpi_video_get_capabilities(acpi_handle graphics_handle) | |||
| 161 | * | 155 | * |
| 162 | * if (dmi_name_in_vendors("XY")) { | 156 | * if (dmi_name_in_vendors("XY")) { |
| 163 | * acpi_video_support |= | 157 | * acpi_video_support |= |
| 164 | * ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR; | ||
| 165 | * acpi_video_support |= | ||
| 166 | * ACPI_VIDEO_BACKLIGHT_DMI_VENDOR; | 158 | * ACPI_VIDEO_BACKLIGHT_DMI_VENDOR; |
| 167 | *} | 159 | *} |
| 168 | */ | 160 | */ |
| @@ -212,33 +204,8 @@ int acpi_video_backlight_support(void) | |||
| 212 | EXPORT_SYMBOL(acpi_video_backlight_support); | 204 | EXPORT_SYMBOL(acpi_video_backlight_support); |
| 213 | 205 | ||
| 214 | /* | 206 | /* |
| 215 | * Returns true if video.ko can do display output switching. | 207 | * Use acpi_backlight=vendor/video to force that backlight switching |
| 216 | * This does not work well/at all with binary graphics drivers | 208 | * is processed by vendor specific acpi drivers or video.ko driver. |
| 217 | * which disable system io ranges and do it on their own. | ||
| 218 | */ | ||
| 219 | int acpi_video_display_switch_support(void) | ||
| 220 | { | ||
| 221 | if (!acpi_video_caps_checked) | ||
| 222 | acpi_video_get_capabilities(NULL); | ||
| 223 | |||
| 224 | if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR) | ||
| 225 | return 0; | ||
| 226 | else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO) | ||
| 227 | return 1; | ||
| 228 | |||
| 229 | if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR) | ||
| 230 | return 0; | ||
| 231 | else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO) | ||
| 232 | return 1; | ||
| 233 | |||
| 234 | return acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING; | ||
| 235 | } | ||
| 236 | EXPORT_SYMBOL(acpi_video_display_switch_support); | ||
| 237 | |||
| 238 | /* | ||
| 239 | * Use acpi_display_output=vendor/video or acpi_backlight=vendor/video | ||
| 240 | * To force that backlight or display output switching is processed by vendor | ||
| 241 | * specific acpi drivers or video.ko driver. | ||
| 242 | */ | 209 | */ |
| 243 | static int __init acpi_backlight(char *str) | 210 | static int __init acpi_backlight(char *str) |
| 244 | { | 211 | { |
| @@ -255,19 +222,3 @@ static int __init acpi_backlight(char *str) | |||
| 255 | return 1; | 222 | return 1; |
| 256 | } | 223 | } |
| 257 | __setup("acpi_backlight=", acpi_backlight); | 224 | __setup("acpi_backlight=", acpi_backlight); |
| 258 | |||
| 259 | static int __init acpi_display_output(char *str) | ||
| 260 | { | ||
| 261 | if (str == NULL || *str == '\0') | ||
| 262 | return 1; | ||
| 263 | else { | ||
| 264 | if (!strcmp("vendor", str)) | ||
| 265 | acpi_video_support |= | ||
| 266 | ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR; | ||
| 267 | if (!strcmp("video", str)) | ||
| 268 | acpi_video_support |= | ||
| 269 | ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO; | ||
| 270 | } | ||
| 271 | return 1; | ||
| 272 | } | ||
| 273 | __setup("acpi_display_output=", acpi_display_output); | ||
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c index f62a50c3ed34..ed6501452507 100644 --- a/drivers/acpi/wakeup.c +++ b/drivers/acpi/wakeup.c | |||
| @@ -37,15 +37,16 @@ void acpi_enable_wakeup_devices(u8 sleep_state) | |||
| 37 | container_of(node, struct acpi_device, wakeup_list); | 37 | container_of(node, struct acpi_device, wakeup_list); |
| 38 | 38 | ||
| 39 | if (!dev->wakeup.flags.valid | 39 | if (!dev->wakeup.flags.valid |
| 40 | || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count) | 40 | || sleep_state > (u32) dev->wakeup.sleep_state |
| 41 | || sleep_state > (u32) dev->wakeup.sleep_state) | 41 | || !(device_may_wakeup(&dev->dev) |
| 42 | || dev->wakeup.prepare_count)) | ||
| 42 | continue; | 43 | continue; |
| 43 | 44 | ||
| 44 | if (dev->wakeup.state.enabled) | 45 | if (device_may_wakeup(&dev->dev)) |
| 45 | acpi_enable_wakeup_device_power(dev, sleep_state); | 46 | acpi_enable_wakeup_device_power(dev, sleep_state); |
| 46 | 47 | ||
| 47 | /* The wake-up power should have been enabled already. */ | 48 | /* The wake-up power should have been enabled already. */ |
| 48 | acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number, | 49 | acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number, |
| 49 | ACPI_GPE_ENABLE); | 50 | ACPI_GPE_ENABLE); |
| 50 | } | 51 | } |
| 51 | } | 52 | } |
| @@ -63,14 +64,15 @@ void acpi_disable_wakeup_devices(u8 sleep_state) | |||
| 63 | container_of(node, struct acpi_device, wakeup_list); | 64 | container_of(node, struct acpi_device, wakeup_list); |
| 64 | 65 | ||
| 65 | if (!dev->wakeup.flags.valid | 66 | if (!dev->wakeup.flags.valid |
| 66 | || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count) | 67 | || sleep_state > (u32) dev->wakeup.sleep_state |
| 67 | || (sleep_state > (u32) dev->wakeup.sleep_state)) | 68 | || !(device_may_wakeup(&dev->dev) |
| 69 | || dev->wakeup.prepare_count)) | ||
| 68 | continue; | 70 | continue; |
| 69 | 71 | ||
| 70 | acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number, | 72 | acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number, |
| 71 | ACPI_GPE_DISABLE); | 73 | ACPI_GPE_DISABLE); |
| 72 | 74 | ||
| 73 | if (dev->wakeup.state.enabled) | 75 | if (device_may_wakeup(&dev->dev)) |
| 74 | acpi_disable_wakeup_device_power(dev); | 76 | acpi_disable_wakeup_device_power(dev); |
| 75 | } | 77 | } |
| 76 | } | 78 | } |
| @@ -84,8 +86,8 @@ int __init acpi_wakeup_device_init(void) | |||
| 84 | struct acpi_device *dev = container_of(node, | 86 | struct acpi_device *dev = container_of(node, |
| 85 | struct acpi_device, | 87 | struct acpi_device, |
| 86 | wakeup_list); | 88 | wakeup_list); |
| 87 | if (dev->wakeup.flags.always_enabled) | 89 | if (device_can_wakeup(&dev->dev)) |
| 88 | dev->wakeup.state.enabled = 1; | 90 | device_set_wakeup_enable(&dev->dev, true); |
| 89 | } | 91 | } |
| 90 | mutex_unlock(&acpi_device_lock); | 92 | mutex_unlock(&acpi_device_lock); |
| 91 | return 0; | 93 | return 0; |
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 2fe72f8edf44..38223e93aa98 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
| @@ -970,6 +970,33 @@ out_kfree: | |||
| 970 | } | 970 | } |
| 971 | EXPORT_SYMBOL(ipmi_create_user); | 971 | EXPORT_SYMBOL(ipmi_create_user); |
| 972 | 972 | ||
| 973 | int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) | ||
| 974 | { | ||
| 975 | int rv = 0; | ||
| 976 | ipmi_smi_t intf; | ||
| 977 | struct ipmi_smi_handlers *handlers; | ||
| 978 | |||
| 979 | mutex_lock(&ipmi_interfaces_mutex); | ||
| 980 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { | ||
| 981 | if (intf->intf_num == if_num) | ||
| 982 | goto found; | ||
| 983 | } | ||
| 984 | /* Not found, return an error */ | ||
| 985 | rv = -EINVAL; | ||
| 986 | mutex_unlock(&ipmi_interfaces_mutex); | ||
| 987 | return rv; | ||
| 988 | |||
| 989 | found: | ||
| 990 | handlers = intf->handlers; | ||
| 991 | rv = -ENOSYS; | ||
| 992 | if (handlers->get_smi_info) | ||
| 993 | rv = handlers->get_smi_info(intf->send_info, data); | ||
| 994 | mutex_unlock(&ipmi_interfaces_mutex); | ||
| 995 | |||
| 996 | return rv; | ||
| 997 | } | ||
| 998 | EXPORT_SYMBOL(ipmi_get_smi_info); | ||
| 999 | |||
| 973 | static void free_user(struct kref *ref) | 1000 | static void free_user(struct kref *ref) |
| 974 | { | 1001 | { |
| 975 | ipmi_user_t user = container_of(ref, struct ipmi_user, refcount); | 1002 | ipmi_user_t user = container_of(ref, struct ipmi_user, refcount); |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index f27c04e18aaa..b6ae6e9a9c5f 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
| @@ -57,6 +57,7 @@ | |||
| 57 | #include <asm/irq.h> | 57 | #include <asm/irq.h> |
| 58 | #include <linux/interrupt.h> | 58 | #include <linux/interrupt.h> |
| 59 | #include <linux/rcupdate.h> | 59 | #include <linux/rcupdate.h> |
| 60 | #include <linux/ipmi.h> | ||
| 60 | #include <linux/ipmi_smi.h> | 61 | #include <linux/ipmi_smi.h> |
| 61 | #include <asm/io.h> | 62 | #include <asm/io.h> |
| 62 | #include "ipmi_si_sm.h" | 63 | #include "ipmi_si_sm.h" |
| @@ -109,10 +110,6 @@ enum si_type { | |||
| 109 | }; | 110 | }; |
| 110 | static char *si_to_str[] = { "kcs", "smic", "bt" }; | 111 | static char *si_to_str[] = { "kcs", "smic", "bt" }; |
| 111 | 112 | ||
| 112 | enum ipmi_addr_src { | ||
| 113 | SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, | ||
| 114 | SI_PCI, SI_DEVICETREE, SI_DEFAULT | ||
| 115 | }; | ||
| 116 | static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI", | 113 | static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI", |
| 117 | "ACPI", "SMBIOS", "PCI", | 114 | "ACPI", "SMBIOS", "PCI", |
| 118 | "device-tree", "default" }; | 115 | "device-tree", "default" }; |
| @@ -293,6 +290,7 @@ struct smi_info { | |||
| 293 | struct task_struct *thread; | 290 | struct task_struct *thread; |
| 294 | 291 | ||
| 295 | struct list_head link; | 292 | struct list_head link; |
| 293 | union ipmi_smi_info_union addr_info; | ||
| 296 | }; | 294 | }; |
| 297 | 295 | ||
| 298 | #define smi_inc_stat(smi, stat) \ | 296 | #define smi_inc_stat(smi, stat) \ |
| @@ -1188,6 +1186,18 @@ static int smi_start_processing(void *send_info, | |||
| 1188 | return 0; | 1186 | return 0; |
| 1189 | } | 1187 | } |
| 1190 | 1188 | ||
| 1189 | static int get_smi_info(void *send_info, struct ipmi_smi_info *data) | ||
| 1190 | { | ||
| 1191 | struct smi_info *smi = send_info; | ||
| 1192 | |||
| 1193 | data->addr_src = smi->addr_source; | ||
| 1194 | data->dev = smi->dev; | ||
| 1195 | data->addr_info = smi->addr_info; | ||
| 1196 | get_device(smi->dev); | ||
| 1197 | |||
| 1198 | return 0; | ||
| 1199 | } | ||
| 1200 | |||
| 1191 | static void set_maintenance_mode(void *send_info, int enable) | 1201 | static void set_maintenance_mode(void *send_info, int enable) |
| 1192 | { | 1202 | { |
| 1193 | struct smi_info *smi_info = send_info; | 1203 | struct smi_info *smi_info = send_info; |
| @@ -1199,6 +1209,7 @@ static void set_maintenance_mode(void *send_info, int enable) | |||
| 1199 | static struct ipmi_smi_handlers handlers = { | 1209 | static struct ipmi_smi_handlers handlers = { |
| 1200 | .owner = THIS_MODULE, | 1210 | .owner = THIS_MODULE, |
| 1201 | .start_processing = smi_start_processing, | 1211 | .start_processing = smi_start_processing, |
| 1212 | .get_smi_info = get_smi_info, | ||
| 1202 | .sender = sender, | 1213 | .sender = sender, |
| 1203 | .request_events = request_events, | 1214 | .request_events = request_events, |
| 1204 | .set_maintenance_mode = set_maintenance_mode, | 1215 | .set_maintenance_mode = set_maintenance_mode, |
| @@ -1930,7 +1941,8 @@ static void __devinit hardcode_find_bmc(void) | |||
| 1930 | static int acpi_failure; | 1941 | static int acpi_failure; |
| 1931 | 1942 | ||
| 1932 | /* For GPE-type interrupts. */ | 1943 | /* For GPE-type interrupts. */ |
| 1933 | static u32 ipmi_acpi_gpe(void *context) | 1944 | static u32 ipmi_acpi_gpe(acpi_handle gpe_device, |
| 1945 | u32 gpe_number, void *context) | ||
| 1934 | { | 1946 | { |
| 1935 | struct smi_info *smi_info = context; | 1947 | struct smi_info *smi_info = context; |
| 1936 | unsigned long flags; | 1948 | unsigned long flags; |
| @@ -2158,6 +2170,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, | |||
| 2158 | printk(KERN_INFO PFX "probing via ACPI\n"); | 2170 | printk(KERN_INFO PFX "probing via ACPI\n"); |
| 2159 | 2171 | ||
| 2160 | handle = acpi_dev->handle; | 2172 | handle = acpi_dev->handle; |
| 2173 | info->addr_info.acpi_info.acpi_handle = handle; | ||
| 2161 | 2174 | ||
| 2162 | /* _IFT tells us the interface type: KCS, BT, etc */ | 2175 | /* _IFT tells us the interface type: KCS, BT, etc */ |
| 2163 | status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp); | 2176 | status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp); |
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 7af443672626..64828a7db77b 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
| @@ -107,7 +107,6 @@ config DRM_I915 | |||
| 107 | select FB_CFB_IMAGEBLIT | 107 | select FB_CFB_IMAGEBLIT |
| 108 | # i915 depends on ACPI_VIDEO when ACPI is enabled | 108 | # i915 depends on ACPI_VIDEO when ACPI is enabled |
| 109 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick | 109 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick |
| 110 | select VIDEO_OUTPUT_CONTROL if ACPI | ||
| 111 | select BACKLIGHT_CLASS_DEVICE if ACPI | 110 | select BACKLIGHT_CLASS_DEVICE if ACPI |
| 112 | select INPUT if ACPI | 111 | select INPUT if ACPI |
| 113 | select ACPI_VIDEO if ACPI | 112 | select ACPI_VIDEO if ACPI |
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig index 0e1edd7311ff..09aea5f1556d 100644 --- a/drivers/gpu/stub/Kconfig +++ b/drivers/gpu/stub/Kconfig | |||
| @@ -3,7 +3,6 @@ config STUB_POULSBO | |||
| 3 | depends on PCI | 3 | depends on PCI |
| 4 | # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled | 4 | # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled |
| 5 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick | 5 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick |
| 6 | select VIDEO_OUTPUT_CONTROL if ACPI | ||
| 7 | select BACKLIGHT_CLASS_DEVICE if ACPI | 6 | select BACKLIGHT_CLASS_DEVICE if ACPI |
| 8 | select INPUT if ACPI | 7 | select INPUT if ACPI |
| 9 | select ACPI_VIDEO if ACPI | 8 | select ACPI_VIDEO if ACPI |
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 19e92b2a7f7e..95e3b0948e9c 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c | |||
| @@ -689,7 +689,7 @@ static int acpi_fujitsu_add(struct acpi_device *device) | |||
| 689 | if (error) | 689 | if (error) |
| 690 | goto err_free_input_dev; | 690 | goto err_free_input_dev; |
| 691 | 691 | ||
| 692 | result = acpi_bus_get_power(fujitsu->acpi_handle, &state); | 692 | result = acpi_bus_update_power(fujitsu->acpi_handle, &state); |
| 693 | if (result) { | 693 | if (result) { |
| 694 | printk(KERN_ERR "Error reading power state\n"); | 694 | printk(KERN_ERR "Error reading power state\n"); |
| 695 | goto err_unregister_input_dev; | 695 | goto err_unregister_input_dev; |
| @@ -857,7 +857,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) | |||
| 857 | if (error) | 857 | if (error) |
| 858 | goto err_free_input_dev; | 858 | goto err_free_input_dev; |
| 859 | 859 | ||
| 860 | result = acpi_bus_get_power(fujitsu_hotkey->acpi_handle, &state); | 860 | result = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state); |
| 861 | if (result) { | 861 | if (result) { |
| 862 | printk(KERN_ERR "Error reading power state\n"); | 862 | printk(KERN_ERR "Error reading power state\n"); |
| 863 | goto err_unregister_input_dev; | 863 | goto err_unregister_input_dev; |
diff --git a/drivers/pnp/Makefile b/drivers/pnp/Makefile index 8de3775ec242..bfba893cb321 100644 --- a/drivers/pnp/Makefile +++ b/drivers/pnp/Makefile | |||
| @@ -2,11 +2,13 @@ | |||
| 2 | # Makefile for the Linux Plug-and-Play Support. | 2 | # Makefile for the Linux Plug-and-Play Support. |
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | obj-y := core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o | 5 | obj-y := pnp.o |
| 6 | |||
| 7 | pnp-y := core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o | ||
| 6 | 8 | ||
| 7 | obj-$(CONFIG_PNPACPI) += pnpacpi/ | 9 | obj-$(CONFIG_PNPACPI) += pnpacpi/ |
| 8 | obj-$(CONFIG_PNPBIOS) += pnpbios/ | 10 | obj-$(CONFIG_PNPBIOS) += pnpbios/ |
| 9 | obj-$(CONFIG_ISAPNP) += isapnp/ | 11 | obj-$(CONFIG_ISAPNP) += isapnp/ |
| 10 | 12 | ||
| 11 | # pnp_system_init goes after pnpacpi/pnpbios init | 13 | # pnp_system_init goes after pnpacpi/pnpbios init |
| 12 | obj-y += system.o | 14 | pnp-y += system.o |
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c index 0f34d962fd3c..cb6ce42f8e77 100644 --- a/drivers/pnp/core.c +++ b/drivers/pnp/core.c | |||
| @@ -220,10 +220,5 @@ subsys_initcall(pnp_init); | |||
| 220 | int pnp_debug; | 220 | int pnp_debug; |
| 221 | 221 | ||
| 222 | #if defined(CONFIG_PNP_DEBUG_MESSAGES) | 222 | #if defined(CONFIG_PNP_DEBUG_MESSAGES) |
| 223 | static int __init pnp_debug_setup(char *__unused) | 223 | module_param_named(debug, pnp_debug, int, 0644); |
| 224 | { | ||
| 225 | pnp_debug = 1; | ||
| 226 | return 1; | ||
| 227 | } | ||
| 228 | __setup("pnp.debug", pnp_debug_setup); | ||
| 229 | #endif | 224 | #endif |
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c index d1dbb9df53fa..00e94032531a 100644 --- a/drivers/pnp/driver.c +++ b/drivers/pnp/driver.c | |||
| @@ -189,8 +189,11 @@ static int pnp_bus_resume(struct device *dev) | |||
| 189 | if (!pnp_drv) | 189 | if (!pnp_drv) |
| 190 | return 0; | 190 | return 0; |
| 191 | 191 | ||
| 192 | if (pnp_dev->protocol->resume) | 192 | if (pnp_dev->protocol->resume) { |
| 193 | pnp_dev->protocol->resume(pnp_dev); | 193 | error = pnp_dev->protocol->resume(pnp_dev); |
| 194 | if (error) | ||
| 195 | return error; | ||
| 196 | } | ||
| 194 | 197 | ||
| 195 | if (pnp_can_write(pnp_dev)) { | 198 | if (pnp_can_write(pnp_dev)) { |
| 196 | error = pnp_start_dev(pnp_dev); | 199 | error = pnp_start_dev(pnp_dev); |
diff --git a/drivers/pnp/isapnp/Makefile b/drivers/pnp/isapnp/Makefile index cac18bbfb817..6e607aa33aa3 100644 --- a/drivers/pnp/isapnp/Makefile +++ b/drivers/pnp/isapnp/Makefile | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | # | 1 | # |
| 2 | # Makefile for the kernel ISAPNP driver. | 2 | # Makefile for the kernel ISAPNP driver. |
| 3 | # | 3 | # |
| 4 | obj-y += pnp.o | ||
| 5 | pnp-y := core.o compat.o | ||
| 4 | 6 | ||
| 5 | isapnp-proc-$(CONFIG_PROC_FS) = proc.o | 7 | pnp-$(CONFIG_PROC_FS) += proc.o |
| 6 | |||
| 7 | obj-y := core.o compat.o $(isapnp-proc-y) | ||
diff --git a/drivers/pnp/pnpacpi/Makefile b/drivers/pnp/pnpacpi/Makefile index 905326fcca85..40c93da18252 100644 --- a/drivers/pnp/pnpacpi/Makefile +++ b/drivers/pnp/pnpacpi/Makefile | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | # | 1 | # |
| 2 | # Makefile for the kernel PNPACPI driver. | 2 | # Makefile for the kernel PNPACPI driver. |
| 3 | # | 3 | # |
| 4 | obj-y += pnp.o | ||
| 4 | 5 | ||
| 5 | obj-y := core.o rsparser.o | 6 | pnp-y := core.o rsparser.o |
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 57313f4658bc..ca84d5099ce7 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c | |||
| @@ -81,12 +81,19 @@ static int pnpacpi_get_resources(struct pnp_dev *dev) | |||
| 81 | 81 | ||
| 82 | static int pnpacpi_set_resources(struct pnp_dev *dev) | 82 | static int pnpacpi_set_resources(struct pnp_dev *dev) |
| 83 | { | 83 | { |
| 84 | struct acpi_device *acpi_dev = dev->data; | 84 | struct acpi_device *acpi_dev; |
| 85 | acpi_handle handle = acpi_dev->handle; | 85 | acpi_handle handle; |
| 86 | struct acpi_buffer buffer; | 86 | struct acpi_buffer buffer; |
| 87 | int ret; | 87 | int ret; |
| 88 | 88 | ||
| 89 | pnp_dbg(&dev->dev, "set resources\n"); | 89 | pnp_dbg(&dev->dev, "set resources\n"); |
| 90 | |||
| 91 | handle = DEVICE_ACPI_HANDLE(&dev->dev); | ||
| 92 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { | ||
| 93 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); | ||
| 94 | return -ENODEV; | ||
| 95 | } | ||
| 96 | |||
| 90 | ret = pnpacpi_build_resource_template(dev, &buffer); | 97 | ret = pnpacpi_build_resource_template(dev, &buffer); |
| 91 | if (ret) | 98 | if (ret) |
| 92 | return ret; | 99 | return ret; |
| @@ -105,12 +112,18 @@ static int pnpacpi_set_resources(struct pnp_dev *dev) | |||
| 105 | 112 | ||
| 106 | static int pnpacpi_disable_resources(struct pnp_dev *dev) | 113 | static int pnpacpi_disable_resources(struct pnp_dev *dev) |
| 107 | { | 114 | { |
| 108 | struct acpi_device *acpi_dev = dev->data; | 115 | struct acpi_device *acpi_dev; |
| 109 | acpi_handle handle = acpi_dev->handle; | 116 | acpi_handle handle; |
| 110 | int ret; | 117 | int ret; |
| 111 | 118 | ||
| 112 | dev_dbg(&dev->dev, "disable resources\n"); | 119 | dev_dbg(&dev->dev, "disable resources\n"); |
| 113 | 120 | ||
| 121 | handle = DEVICE_ACPI_HANDLE(&dev->dev); | ||
| 122 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { | ||
| 123 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); | ||
| 124 | return 0; | ||
| 125 | } | ||
| 126 | |||
| 114 | /* acpi_unregister_gsi(pnp_irq(dev, 0)); */ | 127 | /* acpi_unregister_gsi(pnp_irq(dev, 0)); */ |
| 115 | ret = 0; | 128 | ret = 0; |
| 116 | if (acpi_bus_power_manageable(handle)) | 129 | if (acpi_bus_power_manageable(handle)) |
| @@ -124,46 +137,74 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev) | |||
| 124 | #ifdef CONFIG_ACPI_SLEEP | 137 | #ifdef CONFIG_ACPI_SLEEP |
| 125 | static bool pnpacpi_can_wakeup(struct pnp_dev *dev) | 138 | static bool pnpacpi_can_wakeup(struct pnp_dev *dev) |
| 126 | { | 139 | { |
| 127 | struct acpi_device *acpi_dev = dev->data; | 140 | struct acpi_device *acpi_dev; |
| 128 | acpi_handle handle = acpi_dev->handle; | 141 | acpi_handle handle; |
| 142 | |||
| 143 | handle = DEVICE_ACPI_HANDLE(&dev->dev); | ||
| 144 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { | ||
| 145 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); | ||
| 146 | return false; | ||
| 147 | } | ||
| 129 | 148 | ||
| 130 | return acpi_bus_can_wakeup(handle); | 149 | return acpi_bus_can_wakeup(handle); |
| 131 | } | 150 | } |
| 132 | 151 | ||
| 133 | static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) | 152 | static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) |
| 134 | { | 153 | { |
| 135 | struct acpi_device *acpi_dev = dev->data; | 154 | struct acpi_device *acpi_dev; |
| 136 | acpi_handle handle = acpi_dev->handle; | 155 | acpi_handle handle; |
| 137 | int power_state; | 156 | int error = 0; |
| 157 | |||
| 158 | handle = DEVICE_ACPI_HANDLE(&dev->dev); | ||
| 159 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { | ||
| 160 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); | ||
| 161 | return 0; | ||
| 162 | } | ||
| 138 | 163 | ||
| 139 | if (device_can_wakeup(&dev->dev)) { | 164 | if (device_can_wakeup(&dev->dev)) { |
| 140 | int rc = acpi_pm_device_sleep_wake(&dev->dev, | 165 | error = acpi_pm_device_sleep_wake(&dev->dev, |
| 141 | device_may_wakeup(&dev->dev)); | 166 | device_may_wakeup(&dev->dev)); |
| 167 | if (error) | ||
| 168 | return error; | ||
| 169 | } | ||
| 170 | |||
| 171 | if (acpi_bus_power_manageable(handle)) { | ||
| 172 | int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL); | ||
| 173 | |||
| 174 | if (power_state < 0) | ||
| 175 | power_state = (state.event == PM_EVENT_ON) ? | ||
| 176 | ACPI_STATE_D0 : ACPI_STATE_D3; | ||
| 142 | 177 | ||
| 143 | if (rc) | 178 | /* |
| 144 | return rc; | 179 | * acpi_bus_set_power() often fails (keyboard port can't be |
| 180 | * powered-down?), and in any case, our return value is ignored | ||
| 181 | * by pnp_bus_suspend(). Hence we don't revert the wakeup | ||
| 182 | * setting if the set_power fails. | ||
| 183 | */ | ||
| 184 | error = acpi_bus_set_power(handle, power_state); | ||
| 145 | } | 185 | } |
| 146 | power_state = acpi_pm_device_sleep_state(&dev->dev, NULL); | 186 | |
| 147 | if (power_state < 0) | 187 | return error; |
| 148 | power_state = (state.event == PM_EVENT_ON) ? | ||
| 149 | ACPI_STATE_D0 : ACPI_STATE_D3; | ||
| 150 | |||
| 151 | /* acpi_bus_set_power() often fails (keyboard port can't be | ||
| 152 | * powered-down?), and in any case, our return value is ignored | ||
| 153 | * by pnp_bus_suspend(). Hence we don't revert the wakeup | ||
| 154 | * setting if the set_power fails. | ||
| 155 | */ | ||
| 156 | return acpi_bus_set_power(handle, power_state); | ||
| 157 | } | 188 | } |
| 158 | 189 | ||
| 159 | static int pnpacpi_resume(struct pnp_dev *dev) | 190 | static int pnpacpi_resume(struct pnp_dev *dev) |
| 160 | { | 191 | { |
| 161 | struct acpi_device *acpi_dev = dev->data; | 192 | struct acpi_device *acpi_dev; |
| 162 | acpi_handle handle = acpi_dev->handle; | 193 | acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); |
| 194 | int error = 0; | ||
| 195 | |||
| 196 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { | ||
| 197 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); | ||
| 198 | return -ENODEV; | ||
| 199 | } | ||
| 163 | 200 | ||
| 164 | if (device_may_wakeup(&dev->dev)) | 201 | if (device_may_wakeup(&dev->dev)) |
| 165 | acpi_pm_device_sleep_wake(&dev->dev, false); | 202 | acpi_pm_device_sleep_wake(&dev->dev, false); |
| 166 | return acpi_bus_set_power(handle, ACPI_STATE_D0); | 203 | |
| 204 | if (acpi_bus_power_manageable(handle)) | ||
| 205 | error = acpi_bus_set_power(handle, ACPI_STATE_D0); | ||
| 206 | |||
| 207 | return error; | ||
| 167 | } | 208 | } |
| 168 | #endif | 209 | #endif |
| 169 | 210 | ||
diff --git a/drivers/pnp/pnpbios/Makefile b/drivers/pnp/pnpbios/Makefile index 3cd3ed760605..240b0ffb83ca 100644 --- a/drivers/pnp/pnpbios/Makefile +++ b/drivers/pnp/pnpbios/Makefile | |||
| @@ -1,7 +1,8 @@ | |||
| 1 | # | 1 | # |
| 2 | # Makefile for the kernel PNPBIOS driver. | 2 | # Makefile for the kernel PNPBIOS driver. |
| 3 | # | 3 | # |
| 4 | obj-y := pnp.o | ||
| 4 | 5 | ||
| 5 | pnpbios-proc-$(CONFIG_PNPBIOS_PROC_FS) = proc.o | 6 | pnp-y := core.o bioscalls.o rsparser.o |
| 6 | 7 | ||
| 7 | obj-y := core.o bioscalls.o rsparser.o $(pnpbios-proc-y) | 8 | pnp-$(CONFIG_PNPBIOS_PROC_FS) += proc.o |
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index bf7c687519ef..f7a5dba3ca23 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | menuconfig THERMAL | 5 | menuconfig THERMAL |
| 6 | tristate "Generic Thermal sysfs driver" | 6 | tristate "Generic Thermal sysfs driver" |
| 7 | depends on NET | ||
| 7 | help | 8 | help |
| 8 | Generic Thermal Sysfs driver offers a generic mechanism for | 9 | Generic Thermal Sysfs driver offers a generic mechanism for |
| 9 | thermal management. Usually it's made up of one or more thermal | 10 | thermal management. Usually it's made up of one or more thermal |
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index 13c72c629329..7d0e63c79280 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c | |||
| @@ -32,6 +32,8 @@ | |||
| 32 | #include <linux/thermal.h> | 32 | #include <linux/thermal.h> |
| 33 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
| 34 | #include <linux/reboot.h> | 34 | #include <linux/reboot.h> |
| 35 | #include <net/netlink.h> | ||
| 36 | #include <net/genetlink.h> | ||
| 35 | 37 | ||
| 36 | MODULE_AUTHOR("Zhang Rui"); | 38 | MODULE_AUTHOR("Zhang Rui"); |
| 37 | MODULE_DESCRIPTION("Generic thermal management sysfs support"); | 39 | MODULE_DESCRIPTION("Generic thermal management sysfs support"); |
| @@ -58,6 +60,22 @@ static LIST_HEAD(thermal_tz_list); | |||
| 58 | static LIST_HEAD(thermal_cdev_list); | 60 | static LIST_HEAD(thermal_cdev_list); |
| 59 | static DEFINE_MUTEX(thermal_list_lock); | 61 | static DEFINE_MUTEX(thermal_list_lock); |
| 60 | 62 | ||
| 63 | static unsigned int thermal_event_seqnum; | ||
| 64 | |||
| 65 | static struct genl_family thermal_event_genl_family = { | ||
| 66 | .id = GENL_ID_GENERATE, | ||
| 67 | .name = THERMAL_GENL_FAMILY_NAME, | ||
| 68 | .version = THERMAL_GENL_VERSION, | ||
| 69 | .maxattr = THERMAL_GENL_ATTR_MAX, | ||
| 70 | }; | ||
| 71 | |||
| 72 | static struct genl_multicast_group thermal_event_mcgrp = { | ||
| 73 | .name = THERMAL_GENL_MCAST_GROUP_NAME, | ||
| 74 | }; | ||
| 75 | |||
| 76 | static int genetlink_init(void); | ||
| 77 | static void genetlink_exit(void); | ||
| 78 | |||
| 61 | static int get_idr(struct idr *idr, struct mutex *lock, int *id) | 79 | static int get_idr(struct idr *idr, struct mutex *lock, int *id) |
| 62 | { | 80 | { |
| 63 | int err; | 81 | int err; |
| @@ -823,11 +841,8 @@ static struct class thermal_class = { | |||
| 823 | * @devdata: device private data. | 841 | * @devdata: device private data. |
| 824 | * @ops: standard thermal cooling devices callbacks. | 842 | * @ops: standard thermal cooling devices callbacks. |
| 825 | */ | 843 | */ |
| 826 | struct thermal_cooling_device *thermal_cooling_device_register(char *type, | 844 | struct thermal_cooling_device *thermal_cooling_device_register( |
| 827 | void *devdata, | 845 | char *type, void *devdata, const struct thermal_cooling_device_ops *ops) |
| 828 | struct | ||
| 829 | thermal_cooling_device_ops | ||
| 830 | *ops) | ||
| 831 | { | 846 | { |
| 832 | struct thermal_cooling_device *cdev; | 847 | struct thermal_cooling_device *cdev; |
| 833 | struct thermal_zone_device *pos; | 848 | struct thermal_zone_device *pos; |
| @@ -1048,13 +1063,9 @@ EXPORT_SYMBOL(thermal_zone_device_update); | |||
| 1048 | * section 11.1.5.1 of the ACPI specification 3.0. | 1063 | * section 11.1.5.1 of the ACPI specification 3.0. |
| 1049 | */ | 1064 | */ |
| 1050 | struct thermal_zone_device *thermal_zone_device_register(char *type, | 1065 | struct thermal_zone_device *thermal_zone_device_register(char *type, |
| 1051 | int trips, | 1066 | int trips, void *devdata, |
| 1052 | void *devdata, struct | 1067 | const struct thermal_zone_device_ops *ops, |
| 1053 | thermal_zone_device_ops | 1068 | int tc1, int tc2, int passive_delay, int polling_delay) |
| 1054 | *ops, int tc1, int | ||
| 1055 | tc2, | ||
| 1056 | int passive_delay, | ||
| 1057 | int polling_delay) | ||
| 1058 | { | 1069 | { |
| 1059 | struct thermal_zone_device *tz; | 1070 | struct thermal_zone_device *tz; |
| 1060 | struct thermal_cooling_device *pos; | 1071 | struct thermal_cooling_device *pos; |
| @@ -1214,6 +1225,82 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) | |||
| 1214 | 1225 | ||
| 1215 | EXPORT_SYMBOL(thermal_zone_device_unregister); | 1226 | EXPORT_SYMBOL(thermal_zone_device_unregister); |
| 1216 | 1227 | ||
| 1228 | int generate_netlink_event(u32 orig, enum events event) | ||
| 1229 | { | ||
| 1230 | struct sk_buff *skb; | ||
| 1231 | struct nlattr *attr; | ||
| 1232 | struct thermal_genl_event *thermal_event; | ||
| 1233 | void *msg_header; | ||
| 1234 | int size; | ||
| 1235 | int result; | ||
| 1236 | |||
| 1237 | /* allocate memory */ | ||
| 1238 | size = nla_total_size(sizeof(struct thermal_genl_event)) + \ | ||
| 1239 | nla_total_size(0); | ||
| 1240 | |||
| 1241 | skb = genlmsg_new(size, GFP_ATOMIC); | ||
| 1242 | if (!skb) | ||
| 1243 | return -ENOMEM; | ||
| 1244 | |||
| 1245 | /* add the genetlink message header */ | ||
| 1246 | msg_header = genlmsg_put(skb, 0, thermal_event_seqnum++, | ||
| 1247 | &thermal_event_genl_family, 0, | ||
| 1248 | THERMAL_GENL_CMD_EVENT); | ||
| 1249 | if (!msg_header) { | ||
| 1250 | nlmsg_free(skb); | ||
| 1251 | return -ENOMEM; | ||
| 1252 | } | ||
| 1253 | |||
| 1254 | /* fill the data */ | ||
| 1255 | attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT, \ | ||
| 1256 | sizeof(struct thermal_genl_event)); | ||
| 1257 | |||
| 1258 | if (!attr) { | ||
| 1259 | nlmsg_free(skb); | ||
| 1260 | return -EINVAL; | ||
| 1261 | } | ||
| 1262 | |||
| 1263 | thermal_event = nla_data(attr); | ||
| 1264 | if (!thermal_event) { | ||
| 1265 | nlmsg_free(skb); | ||
| 1266 | return -EINVAL; | ||
| 1267 | } | ||
| 1268 | |||
| 1269 | memset(thermal_event, 0, sizeof(struct thermal_genl_event)); | ||
| 1270 | |||
| 1271 | thermal_event->orig = orig; | ||
| 1272 | thermal_event->event = event; | ||
| 1273 | |||
| 1274 | /* send multicast genetlink message */ | ||
| 1275 | result = genlmsg_end(skb, msg_header); | ||
| 1276 | if (result < 0) { | ||
| 1277 | nlmsg_free(skb); | ||
| 1278 | return result; | ||
| 1279 | } | ||
| 1280 | |||
| 1281 | result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC); | ||
| 1282 | if (result) | ||
| 1283 | printk(KERN_INFO "failed to send netlink event:%d", result); | ||
| 1284 | |||
| 1285 | return result; | ||
| 1286 | } | ||
| 1287 | EXPORT_SYMBOL(generate_netlink_event); | ||
| 1288 | |||
| 1289 | static int genetlink_init(void) | ||
| 1290 | { | ||
| 1291 | int result; | ||
| 1292 | |||
| 1293 | result = genl_register_family(&thermal_event_genl_family); | ||
| 1294 | if (result) | ||
| 1295 | return result; | ||
| 1296 | |||
| 1297 | result = genl_register_mc_group(&thermal_event_genl_family, | ||
| 1298 | &thermal_event_mcgrp); | ||
| 1299 | if (result) | ||
| 1300 | genl_unregister_family(&thermal_event_genl_family); | ||
| 1301 | return result; | ||
| 1302 | } | ||
| 1303 | |||
| 1217 | static int __init thermal_init(void) | 1304 | static int __init thermal_init(void) |
| 1218 | { | 1305 | { |
| 1219 | int result = 0; | 1306 | int result = 0; |
| @@ -1225,9 +1312,15 @@ static int __init thermal_init(void) | |||
| 1225 | mutex_destroy(&thermal_idr_lock); | 1312 | mutex_destroy(&thermal_idr_lock); |
| 1226 | mutex_destroy(&thermal_list_lock); | 1313 | mutex_destroy(&thermal_list_lock); |
| 1227 | } | 1314 | } |
| 1315 | result = genetlink_init(); | ||
| 1228 | return result; | 1316 | return result; |
| 1229 | } | 1317 | } |
| 1230 | 1318 | ||
| 1319 | static void genetlink_exit(void) | ||
| 1320 | { | ||
| 1321 | genl_unregister_family(&thermal_event_genl_family); | ||
| 1322 | } | ||
| 1323 | |||
| 1231 | static void __exit thermal_exit(void) | 1324 | static void __exit thermal_exit(void) |
| 1232 | { | 1325 | { |
| 1233 | class_unregister(&thermal_class); | 1326 | class_unregister(&thermal_class); |
| @@ -1235,7 +1328,8 @@ static void __exit thermal_exit(void) | |||
| 1235 | idr_destroy(&thermal_cdev_idr); | 1328 | idr_destroy(&thermal_cdev_idr); |
| 1236 | mutex_destroy(&thermal_idr_lock); | 1329 | mutex_destroy(&thermal_idr_lock); |
| 1237 | mutex_destroy(&thermal_list_lock); | 1330 | mutex_destroy(&thermal_list_lock); |
| 1331 | genetlink_exit(); | ||
| 1238 | } | 1332 | } |
| 1239 | 1333 | ||
| 1240 | subsys_initcall(thermal_init); | 1334 | fs_initcall(thermal_init); |
| 1241 | module_exit(thermal_exit); | 1335 | module_exit(thermal_exit); |
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 359ef11725a6..78ca429929f7 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h | |||
| @@ -148,9 +148,7 @@ struct acpi_device_flags { | |||
| 148 | u32 suprise_removal_ok:1; | 148 | u32 suprise_removal_ok:1; |
| 149 | u32 power_manageable:1; | 149 | u32 power_manageable:1; |
| 150 | u32 performance_manageable:1; | 150 | u32 performance_manageable:1; |
| 151 | u32 wake_capable:1; /* Wakeup(_PRW) supported? */ | 151 | u32 reserved:24; |
| 152 | u32 force_power_state:1; | ||
| 153 | u32 reserved:22; | ||
| 154 | }; | 152 | }; |
| 155 | 153 | ||
| 156 | /* File System */ | 154 | /* File System */ |
| @@ -242,20 +240,14 @@ struct acpi_device_perf { | |||
| 242 | struct acpi_device_wakeup_flags { | 240 | struct acpi_device_wakeup_flags { |
| 243 | u8 valid:1; /* Can successfully enable wakeup? */ | 241 | u8 valid:1; /* Can successfully enable wakeup? */ |
| 244 | u8 run_wake:1; /* Run-Wake GPE devices */ | 242 | u8 run_wake:1; /* Run-Wake GPE devices */ |
| 245 | u8 always_enabled:1; /* Run-wake devices that are always enabled */ | ||
| 246 | u8 notifier_present:1; /* Wake-up notify handler has been installed */ | 243 | u8 notifier_present:1; /* Wake-up notify handler has been installed */ |
| 247 | }; | 244 | }; |
| 248 | 245 | ||
| 249 | struct acpi_device_wakeup_state { | ||
| 250 | u8 enabled:1; | ||
| 251 | }; | ||
| 252 | |||
| 253 | struct acpi_device_wakeup { | 246 | struct acpi_device_wakeup { |
| 254 | acpi_handle gpe_device; | 247 | acpi_handle gpe_device; |
| 255 | u64 gpe_number; | 248 | u64 gpe_number; |
| 256 | u64 sleep_state; | 249 | u64 sleep_state; |
| 257 | struct acpi_handle_list resources; | 250 | struct acpi_handle_list resources; |
| 258 | struct acpi_device_wakeup_state state; | ||
| 259 | struct acpi_device_wakeup_flags flags; | 251 | struct acpi_device_wakeup_flags flags; |
| 260 | int prepare_count; | 252 | int prepare_count; |
| 261 | int run_wake_count; | 253 | int run_wake_count; |
| @@ -328,8 +320,8 @@ void acpi_bus_data_handler(acpi_handle handle, void *context); | |||
| 328 | acpi_status acpi_bus_get_status_handle(acpi_handle handle, | 320 | acpi_status acpi_bus_get_status_handle(acpi_handle handle, |
| 329 | unsigned long long *sta); | 321 | unsigned long long *sta); |
| 330 | int acpi_bus_get_status(struct acpi_device *device); | 322 | int acpi_bus_get_status(struct acpi_device *device); |
| 331 | int acpi_bus_get_power(acpi_handle handle, int *state); | ||
| 332 | int acpi_bus_set_power(acpi_handle handle, int state); | 323 | int acpi_bus_set_power(acpi_handle handle, int state); |
| 324 | int acpi_bus_update_power(acpi_handle handle, int *state_p); | ||
| 333 | bool acpi_bus_power_manageable(acpi_handle handle); | 325 | bool acpi_bus_power_manageable(acpi_handle handle); |
| 334 | bool acpi_bus_can_wakeup(acpi_handle handle); | 326 | bool acpi_bus_can_wakeup(acpi_handle handle); |
| 335 | #ifdef CONFIG_ACPI_PROC_EVENT | 327 | #ifdef CONFIG_ACPI_PROC_EVENT |
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 53b7cfd924a3..241b8a04c83c 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h | |||
| @@ -47,7 +47,7 @@ | |||
| 47 | 47 | ||
| 48 | /* Current ACPICA subsystem version in YYYYMMDD format */ | 48 | /* Current ACPICA subsystem version in YYYYMMDD format */ |
| 49 | 49 | ||
| 50 | #define ACPI_CA_VERSION 0x20101013 | 50 | #define ACPI_CA_VERSION 0x20101209 |
| 51 | 51 | ||
| 52 | #include "actypes.h" | 52 | #include "actypes.h" |
| 53 | #include "actbl.h" | 53 | #include "actbl.h" |
| @@ -229,6 +229,10 @@ acpi_status | |||
| 229 | acpi_install_initialization_handler(acpi_init_handler handler, u32 function); | 229 | acpi_install_initialization_handler(acpi_init_handler handler, u32 function); |
| 230 | 230 | ||
| 231 | acpi_status | 231 | acpi_status |
| 232 | acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, | ||
| 233 | void *context); | ||
| 234 | |||
| 235 | acpi_status | ||
| 232 | acpi_install_fixed_event_handler(u32 acpi_event, | 236 | acpi_install_fixed_event_handler(u32 acpi_event, |
| 233 | acpi_event_handler handler, void *context); | 237 | acpi_event_handler handler, void *context); |
| 234 | 238 | ||
| @@ -258,11 +262,11 @@ acpi_remove_address_space_handler(acpi_handle device, | |||
| 258 | acpi_status | 262 | acpi_status |
| 259 | acpi_install_gpe_handler(acpi_handle gpe_device, | 263 | acpi_install_gpe_handler(acpi_handle gpe_device, |
| 260 | u32 gpe_number, | 264 | u32 gpe_number, |
| 261 | u32 type, acpi_event_handler address, void *context); | 265 | u32 type, acpi_gpe_handler address, void *context); |
| 262 | 266 | ||
| 263 | acpi_status | 267 | acpi_status |
| 264 | acpi_remove_gpe_handler(acpi_handle gpe_device, | 268 | acpi_remove_gpe_handler(acpi_handle gpe_device, |
| 265 | u32 gpe_number, acpi_event_handler address); | 269 | u32 gpe_number, acpi_gpe_handler address); |
| 266 | 270 | ||
| 267 | #ifdef ACPI_FUTURE_USAGE | 271 | #ifdef ACPI_FUTURE_USAGE |
| 268 | acpi_status acpi_install_exception_handler(acpi_exception_handler handler); | 272 | acpi_status acpi_install_exception_handler(acpi_exception_handler handler); |
| @@ -292,11 +296,13 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number); | |||
| 292 | 296 | ||
| 293 | acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number); | 297 | acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number); |
| 294 | 298 | ||
| 295 | acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number); | ||
| 296 | |||
| 297 | acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number); | 299 | acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number); |
| 298 | 300 | ||
| 299 | acpi_status acpi_gpe_wakeup(acpi_handle gpe_device, u32 gpe_number, u8 action); | 301 | acpi_status |
| 302 | acpi_setup_gpe_for_wake(acpi_handle parent_device, | ||
| 303 | acpi_handle gpe_device, u32 gpe_number); | ||
| 304 | |||
| 305 | acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action); | ||
| 300 | 306 | ||
| 301 | acpi_status | 307 | acpi_status |
| 302 | acpi_get_gpe_status(acpi_handle gpe_device, | 308 | acpi_get_gpe_status(acpi_handle gpe_device, |
| @@ -315,7 +321,7 @@ acpi_install_gpe_block(acpi_handle gpe_device, | |||
| 315 | 321 | ||
| 316 | acpi_status acpi_remove_gpe_block(acpi_handle gpe_device); | 322 | acpi_status acpi_remove_gpe_block(acpi_handle gpe_device); |
| 317 | 323 | ||
| 318 | acpi_status acpi_update_gpes(void); | 324 | acpi_status acpi_update_all_gpes(void); |
| 319 | 325 | ||
| 320 | /* | 326 | /* |
| 321 | * Resource interfaces | 327 | * Resource interfaces |
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 2b134b691e34..939a431a6ab6 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h | |||
| @@ -656,33 +656,34 @@ typedef u32 acpi_event_status; | |||
| 656 | #define ACPI_GPE_MAX 0xFF | 656 | #define ACPI_GPE_MAX 0xFF |
| 657 | #define ACPI_NUM_GPE 256 | 657 | #define ACPI_NUM_GPE 256 |
| 658 | 658 | ||
| 659 | /* Actions for acpi_gpe_wakeup, acpi_hw_low_set_gpe */ | 659 | /* Actions for acpi_set_gpe_wake_mask, acpi_hw_low_set_gpe */ |
| 660 | 660 | ||
| 661 | #define ACPI_GPE_ENABLE 0 | 661 | #define ACPI_GPE_ENABLE 0 |
| 662 | #define ACPI_GPE_DISABLE 1 | 662 | #define ACPI_GPE_DISABLE 1 |
| 663 | #define ACPI_GPE_COND_ENABLE 2 | 663 | #define ACPI_GPE_CONDITIONAL_ENABLE 2 |
| 664 | 664 | ||
| 665 | /* | 665 | /* |
| 666 | * GPE info flags - Per GPE | 666 | * GPE info flags - Per GPE |
| 667 | * +-------+---+-+-+ | 667 | * +-------+-+-+---+ |
| 668 | * | 7:4 |3:2|1|0| | 668 | * | 7:4 |3|2|1:0| |
| 669 | * +-------+---+-+-+ | 669 | * +-------+-+-+---+ |
| 670 | * | | | | | 670 | * | | | | |
| 671 | * | | | +--- Interrupt type: edge or level triggered | 671 | * | | | +-- Type of dispatch:to method, handler, notify, or none |
| 672 | * | | +----- GPE can wake the system | 672 | * | | +----- Interrupt type: edge or level triggered |
| 673 | * | +-------- Type of dispatch:to method, handler, or none | 673 | * | +------- Is a Wake GPE |
| 674 | * +-------------- <Reserved> | 674 | * +------------ <Reserved> |
| 675 | */ | 675 | */ |
| 676 | #define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x01 | 676 | #define ACPI_GPE_DISPATCH_NONE (u8) 0x00 |
| 677 | #define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x01 | 677 | #define ACPI_GPE_DISPATCH_METHOD (u8) 0x01 |
| 678 | #define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00 | 678 | #define ACPI_GPE_DISPATCH_HANDLER (u8) 0x02 |
| 679 | #define ACPI_GPE_DISPATCH_NOTIFY (u8) 0x03 | ||
| 680 | #define ACPI_GPE_DISPATCH_MASK (u8) 0x03 | ||
| 679 | 681 | ||
| 680 | #define ACPI_GPE_CAN_WAKE (u8) 0x02 | 682 | #define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x04 |
| 683 | #define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00 | ||
| 684 | #define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x04 | ||
| 681 | 685 | ||
| 682 | #define ACPI_GPE_DISPATCH_MASK (u8) 0x0C | 686 | #define ACPI_GPE_CAN_WAKE (u8) 0x08 |
| 683 | #define ACPI_GPE_DISPATCH_HANDLER (u8) 0x04 | ||
| 684 | #define ACPI_GPE_DISPATCH_METHOD (u8) 0x08 | ||
| 685 | #define ACPI_GPE_DISPATCH_NOT_USED (u8) 0x00 | ||
| 686 | 687 | ||
| 687 | /* | 688 | /* |
| 688 | * Flags for GPE and Lock interfaces | 689 | * Flags for GPE and Lock interfaces |
| @@ -894,9 +895,20 @@ typedef void | |||
| 894 | /* | 895 | /* |
| 895 | * Various handlers and callback procedures | 896 | * Various handlers and callback procedures |
| 896 | */ | 897 | */ |
| 898 | typedef | ||
| 899 | void (*ACPI_GBL_EVENT_HANDLER) (u32 event_type, | ||
| 900 | acpi_handle device, | ||
| 901 | u32 event_number, void *context); | ||
| 902 | |||
| 903 | #define ACPI_EVENT_TYPE_GPE 0 | ||
| 904 | #define ACPI_EVENT_TYPE_FIXED 1 | ||
| 905 | |||
| 897 | typedef u32(*acpi_event_handler) (void *context); | 906 | typedef u32(*acpi_event_handler) (void *context); |
| 898 | 907 | ||
| 899 | typedef | 908 | typedef |
| 909 | u32 (*acpi_gpe_handler) (acpi_handle gpe_device, u32 gpe_number, void *context); | ||
| 910 | |||
| 911 | typedef | ||
| 900 | void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context); | 912 | void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context); |
| 901 | 913 | ||
| 902 | typedef | 914 | typedef |
| @@ -951,6 +963,10 @@ u32 (*acpi_interface_handler) (acpi_string interface_name, u32 supported); | |||
| 951 | #define ACPI_INTERRUPT_NOT_HANDLED 0x00 | 963 | #define ACPI_INTERRUPT_NOT_HANDLED 0x00 |
| 952 | #define ACPI_INTERRUPT_HANDLED 0x01 | 964 | #define ACPI_INTERRUPT_HANDLED 0x01 |
| 953 | 965 | ||
| 966 | /* GPE handler return values */ | ||
| 967 | |||
| 968 | #define ACPI_REENABLE_GPE 0x80 | ||
| 969 | |||
| 954 | /* Length of 32-bit EISAID values when converted back to a string */ | 970 | /* Length of 32-bit EISAID values when converted back to a string */ |
| 955 | 971 | ||
| 956 | #define ACPI_EISAID_STRING_SIZE 8 /* Includes null terminator */ | 972 | #define ACPI_EISAID_STRING_SIZE 8 /* Includes null terminator */ |
diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 1b62102fbb67..55192ac0cede 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h | |||
| @@ -324,6 +324,12 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr); | |||
| 324 | int acpi_processor_get_throttling_info(struct acpi_processor *pr); | 324 | int acpi_processor_get_throttling_info(struct acpi_processor *pr); |
| 325 | extern int acpi_processor_set_throttling(struct acpi_processor *pr, | 325 | extern int acpi_processor_set_throttling(struct acpi_processor *pr, |
| 326 | int state, bool force); | 326 | int state, bool force); |
| 327 | /* | ||
| 328 | * Reevaluate whether the T-state is invalid after one cpu is | ||
| 329 | * onlined/offlined. In such case the flags.throttling will be updated. | ||
| 330 | */ | ||
| 331 | extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, | ||
| 332 | unsigned long action); | ||
| 327 | extern const struct file_operations acpi_processor_throttling_fops; | 333 | extern const struct file_operations acpi_processor_throttling_fops; |
| 328 | extern void acpi_processor_throttling_init(void); | 334 | extern void acpi_processor_throttling_init(void); |
| 329 | /* in processor_idle.c */ | 335 | /* in processor_idle.c */ |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 67c91b4418b0..eb176bb1b15b 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
| @@ -352,4 +352,14 @@ static inline int acpi_table_parse(char *id, | |||
| 352 | return -1; | 352 | return -1; |
| 353 | } | 353 | } |
| 354 | #endif /* !CONFIG_ACPI */ | 354 | #endif /* !CONFIG_ACPI */ |
| 355 | |||
| 356 | #ifdef CONFIG_ACPI_SLEEP | ||
| 357 | int suspend_nvs_register(unsigned long start, unsigned long size); | ||
| 358 | #else | ||
| 359 | static inline int suspend_nvs_register(unsigned long a, unsigned long b) | ||
| 360 | { | ||
| 361 | return 0; | ||
| 362 | } | ||
| 363 | #endif | ||
| 364 | |||
| 355 | #endif /*_LINUX_ACPI_H*/ | 365 | #endif /*_LINUX_ACPI_H*/ |
diff --git a/include/linux/cper.h b/include/linux/cper.h index bf972f81e2a7..3104aaff5dd0 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h | |||
| @@ -39,10 +39,12 @@ | |||
| 39 | * Severity difinition for error_severity in struct cper_record_header | 39 | * Severity difinition for error_severity in struct cper_record_header |
| 40 | * and section_severity in struct cper_section_descriptor | 40 | * and section_severity in struct cper_section_descriptor |
| 41 | */ | 41 | */ |
| 42 | #define CPER_SEV_RECOVERABLE 0x0 | 42 | enum { |
| 43 | #define CPER_SEV_FATAL 0x1 | 43 | CPER_SEV_RECOVERABLE, |
| 44 | #define CPER_SEV_CORRECTED 0x2 | 44 | CPER_SEV_FATAL, |
| 45 | #define CPER_SEV_INFORMATIONAL 0x3 | 45 | CPER_SEV_CORRECTED, |
| 46 | CPER_SEV_INFORMATIONAL, | ||
| 47 | }; | ||
| 46 | 48 | ||
| 47 | /* | 49 | /* |
| 48 | * Validation bits difinition for validation_bits in struct | 50 | * Validation bits difinition for validation_bits in struct |
| @@ -201,6 +203,47 @@ | |||
| 201 | UUID_LE(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \ | 203 | UUID_LE(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \ |
| 202 | 0xDF, 0xAA, 0x84, 0xEC) | 204 | 0xDF, 0xAA, 0x84, 0xEC) |
| 203 | 205 | ||
| 206 | #define CPER_PROC_VALID_TYPE 0x0001 | ||
| 207 | #define CPER_PROC_VALID_ISA 0x0002 | ||
| 208 | #define CPER_PROC_VALID_ERROR_TYPE 0x0004 | ||
| 209 | #define CPER_PROC_VALID_OPERATION 0x0008 | ||
| 210 | #define CPER_PROC_VALID_FLAGS 0x0010 | ||
| 211 | #define CPER_PROC_VALID_LEVEL 0x0020 | ||
| 212 | #define CPER_PROC_VALID_VERSION 0x0040 | ||
| 213 | #define CPER_PROC_VALID_BRAND_INFO 0x0080 | ||
| 214 | #define CPER_PROC_VALID_ID 0x0100 | ||
| 215 | #define CPER_PROC_VALID_TARGET_ADDRESS 0x0200 | ||
| 216 | #define CPER_PROC_VALID_REQUESTOR_ID 0x0400 | ||
| 217 | #define CPER_PROC_VALID_RESPONDER_ID 0x0800 | ||
| 218 | #define CPER_PROC_VALID_IP 0x1000 | ||
| 219 | |||
| 220 | #define CPER_MEM_VALID_ERROR_STATUS 0x0001 | ||
| 221 | #define CPER_MEM_VALID_PHYSICAL_ADDRESS 0x0002 | ||
| 222 | #define CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK 0x0004 | ||
| 223 | #define CPER_MEM_VALID_NODE 0x0008 | ||
| 224 | #define CPER_MEM_VALID_CARD 0x0010 | ||
| 225 | #define CPER_MEM_VALID_MODULE 0x0020 | ||
| 226 | #define CPER_MEM_VALID_BANK 0x0040 | ||
| 227 | #define CPER_MEM_VALID_DEVICE 0x0080 | ||
| 228 | #define CPER_MEM_VALID_ROW 0x0100 | ||
| 229 | #define CPER_MEM_VALID_COLUMN 0x0200 | ||
| 230 | #define CPER_MEM_VALID_BIT_POSITION 0x0400 | ||
| 231 | #define CPER_MEM_VALID_REQUESTOR_ID 0x0800 | ||
| 232 | #define CPER_MEM_VALID_RESPONDER_ID 0x1000 | ||
| 233 | #define CPER_MEM_VALID_TARGET_ID 0x2000 | ||
| 234 | #define CPER_MEM_VALID_ERROR_TYPE 0x4000 | ||
| 235 | |||
| 236 | #define CPER_PCIE_VALID_PORT_TYPE 0x0001 | ||
| 237 | #define CPER_PCIE_VALID_VERSION 0x0002 | ||
| 238 | #define CPER_PCIE_VALID_COMMAND_STATUS 0x0004 | ||
| 239 | #define CPER_PCIE_VALID_DEVICE_ID 0x0008 | ||
| 240 | #define CPER_PCIE_VALID_SERIAL_NUMBER 0x0010 | ||
| 241 | #define CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS 0x0020 | ||
| 242 | #define CPER_PCIE_VALID_CAPABILITY 0x0040 | ||
| 243 | #define CPER_PCIE_VALID_AER_INFO 0x0080 | ||
| 244 | |||
| 245 | #define CPER_PCIE_SLOT_SHIFT 3 | ||
| 246 | |||
| 204 | /* | 247 | /* |
| 205 | * All tables and structs must be byte-packed to match CPER | 248 | * All tables and structs must be byte-packed to match CPER |
| 206 | * specification, since the tables are provided by the system BIOS | 249 | * specification, since the tables are provided by the system BIOS |
| @@ -306,6 +349,41 @@ struct cper_sec_mem_err { | |||
| 306 | __u8 error_type; | 349 | __u8 error_type; |
| 307 | }; | 350 | }; |
| 308 | 351 | ||
| 352 | struct cper_sec_pcie { | ||
| 353 | __u64 validation_bits; | ||
| 354 | __u32 port_type; | ||
| 355 | struct { | ||
| 356 | __u8 minor; | ||
| 357 | __u8 major; | ||
| 358 | __u8 reserved[2]; | ||
| 359 | } version; | ||
| 360 | __u16 command; | ||
| 361 | __u16 status; | ||
| 362 | __u32 reserved; | ||
| 363 | struct { | ||
| 364 | __u16 vendor_id; | ||
| 365 | __u16 device_id; | ||
| 366 | __u8 class_code[3]; | ||
| 367 | __u8 function; | ||
| 368 | __u8 device; | ||
| 369 | __u16 segment; | ||
| 370 | __u8 bus; | ||
| 371 | __u8 secondary_bus; | ||
| 372 | __u16 slot; | ||
| 373 | __u8 reserved; | ||
| 374 | } device_id; | ||
| 375 | struct { | ||
| 376 | __u32 lower; | ||
| 377 | __u32 upper; | ||
| 378 | } serial_number; | ||
| 379 | struct { | ||
| 380 | __u16 secondary_status; | ||
| 381 | __u16 control; | ||
| 382 | } bridge; | ||
| 383 | __u8 capability[60]; | ||
| 384 | __u8 aer_info[96]; | ||
| 385 | }; | ||
| 386 | |||
| 309 | /* Reset to default packing */ | 387 | /* Reset to default packing */ |
| 310 | #pragma pack() | 388 | #pragma pack() |
| 311 | 389 | ||
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 65aae34759de..045f2f275cd0 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h | |||
| @@ -454,6 +454,44 @@ unsigned int ipmi_addr_length(int addr_type); | |||
| 454 | /* Validate that the given IPMI address is valid. */ | 454 | /* Validate that the given IPMI address is valid. */ |
| 455 | int ipmi_validate_addr(struct ipmi_addr *addr, int len); | 455 | int ipmi_validate_addr(struct ipmi_addr *addr, int len); |
| 456 | 456 | ||
| 457 | /* | ||
| 458 | * How did the IPMI driver find out about the device? | ||
| 459 | */ | ||
| 460 | enum ipmi_addr_src { | ||
| 461 | SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, | ||
| 462 | SI_PCI, SI_DEVICETREE, SI_DEFAULT | ||
| 463 | }; | ||
| 464 | |||
| 465 | union ipmi_smi_info_union { | ||
| 466 | /* | ||
| 467 | * the acpi_info element is defined for the SI_ACPI | ||
| 468 | * address type | ||
| 469 | */ | ||
| 470 | struct { | ||
| 471 | void *acpi_handle; | ||
| 472 | } acpi_info; | ||
| 473 | }; | ||
| 474 | |||
| 475 | struct ipmi_smi_info { | ||
| 476 | enum ipmi_addr_src addr_src; | ||
| 477 | |||
| 478 | /* | ||
| 479 | * Base device for the interface. Don't forget to put this when | ||
| 480 | * you are done. | ||
| 481 | */ | ||
| 482 | struct device *dev; | ||
| 483 | |||
| 484 | /* | ||
| 485 | * The addr_info provides more detailed info for some IPMI | ||
| 486 | * devices, depending on the addr_src. Currently only SI_ACPI | ||
| 487 | * info is provided. | ||
| 488 | */ | ||
| 489 | union ipmi_smi_info_union addr_info; | ||
| 490 | }; | ||
| 491 | |||
| 492 | /* This is to get the private info of ipmi_smi_t */ | ||
| 493 | extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data); | ||
| 494 | |||
| 457 | #endif /* __KERNEL__ */ | 495 | #endif /* __KERNEL__ */ |
| 458 | 496 | ||
| 459 | 497 | ||
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index 4b48318ac542..906590aa6907 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
| 40 | #include <linux/device.h> | 40 | #include <linux/device.h> |
| 41 | #include <linux/platform_device.h> | 41 | #include <linux/platform_device.h> |
| 42 | #include <linux/ipmi.h> | ||
| 42 | 43 | ||
| 43 | /* This files describes the interface for IPMI system management interface | 44 | /* This files describes the interface for IPMI system management interface |
| 44 | drivers to bind into the IPMI message handler. */ | 45 | drivers to bind into the IPMI message handler. */ |
| @@ -86,6 +87,13 @@ struct ipmi_smi_handlers { | |||
| 86 | int (*start_processing)(void *send_info, | 87 | int (*start_processing)(void *send_info, |
| 87 | ipmi_smi_t new_intf); | 88 | ipmi_smi_t new_intf); |
| 88 | 89 | ||
| 90 | /* | ||
| 91 | * Get the detailed private info of the low level interface and store | ||
| 92 | * it into the structure of ipmi_smi_data. For example: the | ||
| 93 | * ACPI device handle will be returned for the pnp_acpi IPMI device. | ||
| 94 | */ | ||
| 95 | int (*get_smi_info)(void *send_info, struct ipmi_smi_info *data); | ||
| 96 | |||
| 89 | /* Called to enqueue an SMI message to be sent. This | 97 | /* Called to enqueue an SMI message to be sent. This |
| 90 | operation is not allowed to fail. If an error occurs, it | 98 | operation is not allowed to fail. If an error occurs, it |
| 91 | should report back the error in a received message. It may | 99 | should report back the error in a received message. It may |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index c1f499835b2a..5a89e3612875 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
| @@ -258,23 +258,6 @@ static inline int hibernate(void) { return -ENOSYS; } | |||
| 258 | static inline bool system_entering_hibernation(void) { return false; } | 258 | static inline bool system_entering_hibernation(void) { return false; } |
| 259 | #endif /* CONFIG_HIBERNATION */ | 259 | #endif /* CONFIG_HIBERNATION */ |
| 260 | 260 | ||
| 261 | #ifdef CONFIG_SUSPEND_NVS | ||
| 262 | extern int suspend_nvs_register(unsigned long start, unsigned long size); | ||
| 263 | extern int suspend_nvs_alloc(void); | ||
| 264 | extern void suspend_nvs_free(void); | ||
| 265 | extern void suspend_nvs_save(void); | ||
| 266 | extern void suspend_nvs_restore(void); | ||
| 267 | #else /* CONFIG_SUSPEND_NVS */ | ||
| 268 | static inline int suspend_nvs_register(unsigned long a, unsigned long b) | ||
| 269 | { | ||
| 270 | return 0; | ||
| 271 | } | ||
| 272 | static inline int suspend_nvs_alloc(void) { return 0; } | ||
| 273 | static inline void suspend_nvs_free(void) {} | ||
| 274 | static inline void suspend_nvs_save(void) {} | ||
| 275 | static inline void suspend_nvs_restore(void) {} | ||
| 276 | #endif /* CONFIG_SUSPEND_NVS */ | ||
| 277 | |||
| 278 | #ifdef CONFIG_PM_SLEEP | 261 | #ifdef CONFIG_PM_SLEEP |
| 279 | void save_processor_state(void); | 262 | void save_processor_state(void); |
| 280 | void restore_processor_state(void); | 263 | void restore_processor_state(void); |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 1de8b9eb841b..8651556dbd52 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
| @@ -77,7 +77,7 @@ struct thermal_cooling_device { | |||
| 77 | char type[THERMAL_NAME_LENGTH]; | 77 | char type[THERMAL_NAME_LENGTH]; |
| 78 | struct device device; | 78 | struct device device; |
| 79 | void *devdata; | 79 | void *devdata; |
| 80 | struct thermal_cooling_device_ops *ops; | 80 | const struct thermal_cooling_device_ops *ops; |
| 81 | struct list_head node; | 81 | struct list_head node; |
| 82 | }; | 82 | }; |
| 83 | 83 | ||
| @@ -114,7 +114,7 @@ struct thermal_zone_device { | |||
| 114 | int last_temperature; | 114 | int last_temperature; |
| 115 | bool passive; | 115 | bool passive; |
| 116 | unsigned int forced_passive; | 116 | unsigned int forced_passive; |
| 117 | struct thermal_zone_device_ops *ops; | 117 | const struct thermal_zone_device_ops *ops; |
| 118 | struct list_head cooling_devices; | 118 | struct list_head cooling_devices; |
| 119 | struct idr idr; | 119 | struct idr idr; |
| 120 | struct mutex lock; /* protect cooling devices list */ | 120 | struct mutex lock; /* protect cooling devices list */ |
| @@ -127,13 +127,41 @@ struct thermal_zone_device { | |||
| 127 | struct thermal_hwmon_attr temp_crit; /* hwmon sys attr */ | 127 | struct thermal_hwmon_attr temp_crit; /* hwmon sys attr */ |
| 128 | #endif | 128 | #endif |
| 129 | }; | 129 | }; |
| 130 | /* Adding event notification support elements */ | ||
| 131 | #define THERMAL_GENL_FAMILY_NAME "thermal_event" | ||
| 132 | #define THERMAL_GENL_VERSION 0x01 | ||
| 133 | #define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_group" | ||
| 134 | |||
| 135 | enum events { | ||
| 136 | THERMAL_AUX0, | ||
| 137 | THERMAL_AUX1, | ||
| 138 | THERMAL_CRITICAL, | ||
| 139 | THERMAL_DEV_FAULT, | ||
| 140 | }; | ||
| 141 | |||
| 142 | struct thermal_genl_event { | ||
| 143 | u32 orig; | ||
| 144 | enum events event; | ||
| 145 | }; | ||
| 146 | /* attributes of thermal_genl_family */ | ||
| 147 | enum { | ||
| 148 | THERMAL_GENL_ATTR_UNSPEC, | ||
| 149 | THERMAL_GENL_ATTR_EVENT, | ||
| 150 | __THERMAL_GENL_ATTR_MAX, | ||
| 151 | }; | ||
| 152 | #define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1) | ||
| 153 | |||
| 154 | /* commands supported by the thermal_genl_family */ | ||
| 155 | enum { | ||
| 156 | THERMAL_GENL_CMD_UNSPEC, | ||
| 157 | THERMAL_GENL_CMD_EVENT, | ||
| 158 | __THERMAL_GENL_CMD_MAX, | ||
| 159 | }; | ||
| 160 | #define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1) | ||
| 130 | 161 | ||
| 131 | struct thermal_zone_device *thermal_zone_device_register(char *, int, void *, | 162 | struct thermal_zone_device *thermal_zone_device_register(char *, int, void *, |
| 132 | struct | 163 | const struct thermal_zone_device_ops *, int tc1, int tc2, |
| 133 | thermal_zone_device_ops | 164 | int passive_freq, int polling_freq); |
| 134 | *, int tc1, int tc2, | ||
| 135 | int passive_freq, | ||
| 136 | int polling_freq); | ||
| 137 | void thermal_zone_device_unregister(struct thermal_zone_device *); | 165 | void thermal_zone_device_unregister(struct thermal_zone_device *); |
| 138 | 166 | ||
| 139 | int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, | 167 | int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, |
| @@ -142,9 +170,8 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, | |||
| 142 | struct thermal_cooling_device *); | 170 | struct thermal_cooling_device *); |
| 143 | void thermal_zone_device_update(struct thermal_zone_device *); | 171 | void thermal_zone_device_update(struct thermal_zone_device *); |
| 144 | struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, | 172 | struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, |
| 145 | struct | 173 | const struct thermal_cooling_device_ops *); |
| 146 | thermal_cooling_device_ops | ||
| 147 | *); | ||
| 148 | void thermal_cooling_device_unregister(struct thermal_cooling_device *); | 174 | void thermal_cooling_device_unregister(struct thermal_cooling_device *); |
| 175 | extern int generate_netlink_event(u32 orig, enum events event); | ||
| 149 | 176 | ||
| 150 | #endif /* __THERMAL_H__ */ | 177 | #endif /* __THERMAL_H__ */ |
diff --git a/kernel/panic.c b/kernel/panic.c index 4c13b1a88ebb..991bb87a1704 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
| @@ -34,6 +34,7 @@ static int pause_on_oops_flag; | |||
| 34 | static DEFINE_SPINLOCK(pause_on_oops_lock); | 34 | static DEFINE_SPINLOCK(pause_on_oops_lock); |
| 35 | 35 | ||
| 36 | int panic_timeout; | 36 | int panic_timeout; |
| 37 | EXPORT_SYMBOL_GPL(panic_timeout); | ||
| 37 | 38 | ||
| 38 | ATOMIC_NOTIFIER_HEAD(panic_notifier_list); | 39 | ATOMIC_NOTIFIER_HEAD(panic_notifier_list); |
| 39 | 40 | ||
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index a5aff3ebad38..265729966ece 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
| @@ -100,13 +100,9 @@ config PM_SLEEP_ADVANCED_DEBUG | |||
| 100 | depends on PM_ADVANCED_DEBUG | 100 | depends on PM_ADVANCED_DEBUG |
| 101 | default n | 101 | default n |
| 102 | 102 | ||
| 103 | config SUSPEND_NVS | ||
| 104 | bool | ||
| 105 | |||
| 106 | config SUSPEND | 103 | config SUSPEND |
| 107 | bool "Suspend to RAM and standby" | 104 | bool "Suspend to RAM and standby" |
| 108 | depends on PM && ARCH_SUSPEND_POSSIBLE | 105 | depends on PM && ARCH_SUSPEND_POSSIBLE |
| 109 | select SUSPEND_NVS if HAS_IOMEM | ||
| 110 | default y | 106 | default y |
| 111 | ---help--- | 107 | ---help--- |
| 112 | Allow the system to enter sleep states in which main memory is | 108 | Allow the system to enter sleep states in which main memory is |
| @@ -140,7 +136,6 @@ config HIBERNATION | |||
| 140 | depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE | 136 | depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE |
| 141 | select LZO_COMPRESS | 137 | select LZO_COMPRESS |
| 142 | select LZO_DECOMPRESS | 138 | select LZO_DECOMPRESS |
| 143 | select SUSPEND_NVS if HAS_IOMEM | ||
| 144 | ---help--- | 139 | ---help--- |
| 145 | Enable the suspend to disk (STD) functionality, which is usually | 140 | Enable the suspend to disk (STD) functionality, which is usually |
| 146 | called "hibernation" in user interfaces. STD checkpoints the | 141 | called "hibernation" in user interfaces. STD checkpoints the |
diff --git a/kernel/power/Makefile b/kernel/power/Makefile index b75597235d85..c350e18b53e3 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile | |||
| @@ -7,6 +7,5 @@ obj-$(CONFIG_SUSPEND) += suspend.o | |||
| 7 | obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o | 7 | obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o |
| 8 | obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ | 8 | obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ |
| 9 | block_io.o | 9 | block_io.o |
| 10 | obj-$(CONFIG_SUSPEND_NVS) += nvs.o | ||
| 11 | 10 | ||
| 12 | obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o | 11 | obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o |
diff --git a/lib/ioremap.c b/lib/ioremap.c index 5730ecd3eb66..da4e2ad74b68 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
| 10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
| 11 | #include <linux/io.h> | 11 | #include <linux/io.h> |
| 12 | #include <linux/module.h> | ||
| 12 | #include <asm/cacheflush.h> | 13 | #include <asm/cacheflush.h> |
| 13 | #include <asm/pgtable.h> | 14 | #include <asm/pgtable.h> |
| 14 | 15 | ||
| @@ -90,3 +91,4 @@ int ioremap_page_range(unsigned long addr, | |||
| 90 | 91 | ||
| 91 | return err; | 92 | return err; |
| 92 | } | 93 | } |
| 94 | EXPORT_SYMBOL_GPL(ioremap_page_range); | ||
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index cac13b415635..f9b166732e70 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
| @@ -1175,6 +1175,7 @@ void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) | |||
| 1175 | { | 1175 | { |
| 1176 | vunmap_page_range(addr, addr + size); | 1176 | vunmap_page_range(addr, addr + size); |
| 1177 | } | 1177 | } |
| 1178 | EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush); | ||
| 1178 | 1179 | ||
| 1179 | /** | 1180 | /** |
| 1180 | * unmap_kernel_range - unmap kernel VM area and flush cache and TLB | 1181 | * unmap_kernel_range - unmap kernel VM area and flush cache and TLB |
