diff options
Diffstat (limited to 'drivers/acpi')
44 files changed, 1790 insertions, 1461 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 3f3489c5ca8c..788e88eb18ec 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -53,10 +53,6 @@ config ACPI_PROCFS | |||
53 | they have been replaced by functions in /sys. | 53 | they have been replaced by functions in /sys. |
54 | The deprecated files (and their replacements) include: | 54 | The deprecated files (and their replacements) include: |
55 | 55 | ||
56 | /proc/acpi/processor/*/throttling (/sys/class/thermal/ | ||
57 | cooling_device*/*) | ||
58 | /proc/acpi/video/*/brightness (/sys/class/backlight/) | ||
59 | /proc/acpi/thermal_zone/*/* (/sys/class/thermal/) | ||
60 | This option has no effect on /proc/acpi/ files | 56 | This option has no effect on /proc/acpi/ files |
61 | and functions which do not yet exist in /sys. | 57 | and functions which do not yet exist in /sys. |
62 | 58 | ||
@@ -74,6 +70,8 @@ config ACPI_PROCFS_POWER | |||
74 | /proc/acpi/ac_adapter/* (sys/class/power_supply/*) | 70 | /proc/acpi/ac_adapter/* (sys/class/power_supply/*) |
75 | This option has no effect on /proc/acpi/ directories | 71 | This option has no effect on /proc/acpi/ directories |
76 | and functions, which do not yet exist in /sys | 72 | and functions, which do not yet exist in /sys |
73 | This option, together with the proc directories, will be | ||
74 | deleted in 2.6.39. | ||
77 | 75 | ||
78 | Say N to delete power /proc/acpi/ directories that have moved to /sys/ | 76 | Say N to delete power /proc/acpi/ directories that have moved to /sys/ |
79 | 77 | ||
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 3d031d02e54b..9cc9f2c4da79 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile | |||
@@ -24,7 +24,7 @@ acpi-y += atomicio.o | |||
24 | # sleep related files | 24 | # sleep related files |
25 | acpi-y += wakeup.o | 25 | acpi-y += wakeup.o |
26 | acpi-y += sleep.o | 26 | acpi-y += sleep.o |
27 | acpi-$(CONFIG_ACPI_SLEEP) += proc.o | 27 | acpi-$(CONFIG_ACPI_SLEEP) += proc.o nvs.o |
28 | 28 | ||
29 | 29 | ||
30 | # | 30 | # |
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index ba9afeaa23ac..58c3f74bd84c 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c | |||
@@ -100,24 +100,7 @@ static const struct file_operations acpi_ac_fops = { | |||
100 | .release = single_release, | 100 | .release = single_release, |
101 | }; | 101 | }; |
102 | #endif | 102 | #endif |
103 | static int get_ac_property(struct power_supply *psy, | ||
104 | enum power_supply_property psp, | ||
105 | union power_supply_propval *val) | ||
106 | { | ||
107 | struct acpi_ac *ac = to_acpi_ac(psy); | ||
108 | switch (psp) { | ||
109 | case POWER_SUPPLY_PROP_ONLINE: | ||
110 | val->intval = ac->state; | ||
111 | break; | ||
112 | default: | ||
113 | return -EINVAL; | ||
114 | } | ||
115 | return 0; | ||
116 | } | ||
117 | 103 | ||
118 | static enum power_supply_property ac_props[] = { | ||
119 | POWER_SUPPLY_PROP_ONLINE, | ||
120 | }; | ||
121 | /* -------------------------------------------------------------------------- | 104 | /* -------------------------------------------------------------------------- |
122 | AC Adapter Management | 105 | AC Adapter Management |
123 | -------------------------------------------------------------------------- */ | 106 | -------------------------------------------------------------------------- */ |
@@ -140,6 +123,35 @@ static int acpi_ac_get_state(struct acpi_ac *ac) | |||
140 | return 0; | 123 | return 0; |
141 | } | 124 | } |
142 | 125 | ||
126 | /* -------------------------------------------------------------------------- | ||
127 | sysfs I/F | ||
128 | -------------------------------------------------------------------------- */ | ||
129 | static int get_ac_property(struct power_supply *psy, | ||
130 | enum power_supply_property psp, | ||
131 | union power_supply_propval *val) | ||
132 | { | ||
133 | struct acpi_ac *ac = to_acpi_ac(psy); | ||
134 | |||
135 | if (!ac) | ||
136 | return -ENODEV; | ||
137 | |||
138 | if (acpi_ac_get_state(ac)) | ||
139 | return -ENODEV; | ||
140 | |||
141 | switch (psp) { | ||
142 | case POWER_SUPPLY_PROP_ONLINE: | ||
143 | val->intval = ac->state; | ||
144 | break; | ||
145 | default: | ||
146 | return -EINVAL; | ||
147 | } | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static enum power_supply_property ac_props[] = { | ||
152 | POWER_SUPPLY_PROP_ONLINE, | ||
153 | }; | ||
154 | |||
143 | #ifdef CONFIG_ACPI_PROCFS_POWER | 155 | #ifdef CONFIG_ACPI_PROCFS_POWER |
144 | /* -------------------------------------------------------------------------- | 156 | /* -------------------------------------------------------------------------- |
145 | FS Interface (/proc) | 157 | FS Interface (/proc) |
@@ -185,7 +197,8 @@ static int acpi_ac_add_fs(struct acpi_device *device) | |||
185 | { | 197 | { |
186 | struct proc_dir_entry *entry = NULL; | 198 | struct proc_dir_entry *entry = NULL; |
187 | 199 | ||
188 | 200 | printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded," | |
201 | " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); | ||
189 | if (!acpi_device_dir(device)) { | 202 | if (!acpi_device_dir(device)) { |
190 | acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), | 203 | acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), |
191 | acpi_ac_dir); | 204 | acpi_ac_dir); |
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index a7e1d1aa4107..eec2eadd2431 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile | |||
@@ -14,7 +14,7 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \ | |||
14 | 14 | ||
15 | acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \ | 15 | acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \ |
16 | evmisc.o evrgnini.o evxface.o evxfregn.o \ | 16 | evmisc.o evrgnini.o evxface.o evxfregn.o \ |
17 | evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o | 17 | evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o |
18 | 18 | ||
19 | acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\ | 19 | acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\ |
20 | exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\ | 20 | exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\ |
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index a6f99cc37a19..70e0b28801aa 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h | |||
@@ -51,8 +51,6 @@ acpi_status acpi_ev_initialize_events(void); | |||
51 | 51 | ||
52 | acpi_status acpi_ev_install_xrupt_handlers(void); | 52 | acpi_status acpi_ev_install_xrupt_handlers(void); |
53 | 53 | ||
54 | acpi_status acpi_ev_install_fadt_gpes(void); | ||
55 | |||
56 | u32 acpi_ev_fixed_event_detect(void); | 54 | u32 acpi_ev_fixed_event_detect(void); |
57 | 55 | ||
58 | /* | 56 | /* |
@@ -82,9 +80,9 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info); | |||
82 | 80 | ||
83 | acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); | 81 | acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); |
84 | 82 | ||
85 | acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); | 83 | acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); |
86 | 84 | ||
87 | acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info); | 85 | acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); |
88 | 86 | ||
89 | struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, | 87 | struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, |
90 | u32 gpe_number); | 88 | u32 gpe_number); |
@@ -93,6 +91,8 @@ struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number, | |||
93 | struct acpi_gpe_block_info | 91 | struct acpi_gpe_block_info |
94 | *gpe_block); | 92 | *gpe_block); |
95 | 93 | ||
94 | acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info); | ||
95 | |||
96 | /* | 96 | /* |
97 | * evgpeblk - Upper-level GPE block support | 97 | * evgpeblk - Upper-level GPE block support |
98 | */ | 98 | */ |
@@ -107,12 +107,13 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, | |||
107 | acpi_status | 107 | acpi_status |
108 | acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | 108 | acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
109 | struct acpi_gpe_block_info *gpe_block, | 109 | struct acpi_gpe_block_info *gpe_block, |
110 | void *ignored); | 110 | void *context); |
111 | 111 | ||
112 | acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block); | 112 | acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block); |
113 | 113 | ||
114 | u32 | 114 | u32 |
115 | acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, | 115 | acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, |
116 | struct acpi_gpe_event_info *gpe_event_info, | ||
116 | u32 gpe_number); | 117 | u32 gpe_number); |
117 | 118 | ||
118 | /* | 119 | /* |
@@ -126,10 +127,6 @@ acpi_status | |||
126 | acpi_ev_match_gpe_method(acpi_handle obj_handle, | 127 | acpi_ev_match_gpe_method(acpi_handle obj_handle, |
127 | u32 level, void *context, void **return_value); | 128 | u32 level, void *context, void **return_value); |
128 | 129 | ||
129 | acpi_status | ||
130 | acpi_ev_match_prw_and_gpe(acpi_handle obj_handle, | ||
131 | u32 level, void *context, void **return_value); | ||
132 | |||
133 | /* | 130 | /* |
134 | * evgpeutil - GPE utilities | 131 | * evgpeutil - GPE utilities |
135 | */ | 132 | */ |
@@ -138,6 +135,10 @@ acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context); | |||
138 | 135 | ||
139 | u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info); | 136 | u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info); |
140 | 137 | ||
138 | acpi_status | ||
139 | acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | ||
140 | struct acpi_gpe_block_info *gpe_block, void *context); | ||
141 | |||
141 | struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number); | 142 | struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number); |
142 | 143 | ||
143 | acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt); | 144 | acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt); |
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index ad88fcae4eb9..9bb69c59bb12 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h | |||
@@ -146,6 +146,9 @@ u8 acpi_gbl_system_awake_and_running; | |||
146 | 146 | ||
147 | extern u32 acpi_gbl_nesting_level; | 147 | extern u32 acpi_gbl_nesting_level; |
148 | 148 | ||
149 | ACPI_EXTERN u32 acpi_gpe_count; | ||
150 | ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]; | ||
151 | |||
149 | /* Support for dynamic control method tracing mechanism */ | 152 | /* Support for dynamic control method tracing mechanism */ |
150 | 153 | ||
151 | ACPI_EXTERN u32 acpi_gbl_original_dbg_level; | 154 | ACPI_EXTERN u32 acpi_gbl_original_dbg_level; |
@@ -370,7 +373,9 @@ ACPI_EXTERN struct acpi_fixed_event_handler | |||
370 | ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head; | 373 | ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head; |
371 | ACPI_EXTERN struct acpi_gpe_block_info | 374 | ACPI_EXTERN struct acpi_gpe_block_info |
372 | *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; | 375 | *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; |
373 | ACPI_EXTERN u8 acpi_all_gpes_initialized; | 376 | ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized; |
377 | ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler; | ||
378 | ACPI_EXTERN void *acpi_gbl_global_event_handler_context; | ||
374 | 379 | ||
375 | /***************************************************************************** | 380 | /***************************************************************************** |
376 | * | 381 | * |
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index 167470ad2d21..258d628793ea 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h | |||
@@ -94,7 +94,7 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info, | |||
94 | struct acpi_gpe_register_info *gpe_register_info); | 94 | struct acpi_gpe_register_info *gpe_register_info); |
95 | 95 | ||
96 | acpi_status | 96 | acpi_status |
97 | acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action); | 97 | acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action); |
98 | 98 | ||
99 | acpi_status | 99 | acpi_status |
100 | acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | 100 | acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 2ceb0c05b2d7..74000f5b7dab 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h | |||
@@ -408,17 +408,18 @@ struct acpi_predefined_data { | |||
408 | 408 | ||
409 | /* Dispatch info for each GPE -- either a method or handler, cannot be both */ | 409 | /* Dispatch info for each GPE -- either a method or handler, cannot be both */ |
410 | 410 | ||
411 | struct acpi_handler_info { | 411 | struct acpi_gpe_handler_info { |
412 | acpi_event_handler address; /* Address of handler, if any */ | 412 | acpi_gpe_handler address; /* Address of handler, if any */ |
413 | void *context; /* Context to be passed to handler */ | 413 | void *context; /* Context to be passed to handler */ |
414 | struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */ | 414 | struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */ |
415 | u8 orig_flags; /* Original misc info about this GPE */ | 415 | u8 original_flags; /* Original (pre-handler) GPE info */ |
416 | u8 orig_enabled; /* Set if the GPE was originally enabled */ | 416 | u8 originally_enabled; /* True if GPE was originally enabled */ |
417 | }; | 417 | }; |
418 | 418 | ||
419 | union acpi_gpe_dispatch_info { | 419 | union acpi_gpe_dispatch_info { |
420 | struct acpi_namespace_node *method_node; /* Method node for this GPE level */ | 420 | struct acpi_namespace_node *method_node; /* Method node for this GPE level */ |
421 | struct acpi_handler_info *handler; | 421 | struct acpi_gpe_handler_info *handler; /* Installed GPE handler */ |
422 | struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */ | ||
422 | }; | 423 | }; |
423 | 424 | ||
424 | /* | 425 | /* |
@@ -458,7 +459,7 @@ struct acpi_gpe_block_info { | |||
458 | u32 register_count; /* Number of register pairs in block */ | 459 | u32 register_count; /* Number of register pairs in block */ |
459 | u16 gpe_count; /* Number of individual GPEs in block */ | 460 | u16 gpe_count; /* Number of individual GPEs in block */ |
460 | u8 block_base_number; /* Base GPE number for this block */ | 461 | u8 block_base_number; /* Base GPE number for this block */ |
461 | u8 initialized; /* If set, the GPE block has been initialized */ | 462 | u8 initialized; /* TRUE if this block is initialized */ |
462 | }; | 463 | }; |
463 | 464 | ||
464 | /* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */ | 465 | /* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */ |
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c index c61c3039c31a..e5e313c663a5 100644 --- a/drivers/acpi/acpica/evevent.c +++ b/drivers/acpi/acpica/evevent.c | |||
@@ -217,9 +217,17 @@ u32 acpi_ev_fixed_event_detect(void) | |||
217 | status_bit_mask) | 217 | status_bit_mask) |
218 | && (fixed_enable & acpi_gbl_fixed_event_info[i]. | 218 | && (fixed_enable & acpi_gbl_fixed_event_info[i]. |
219 | enable_bit_mask)) { | 219 | enable_bit_mask)) { |
220 | /* | ||
221 | * Found an active (signalled) event. Invoke global event | ||
222 | * handler if present. | ||
223 | */ | ||
224 | acpi_fixed_event_count[i]++; | ||
225 | if (acpi_gbl_global_event_handler) { | ||
226 | acpi_gbl_global_event_handler | ||
227 | (ACPI_EVENT_TYPE_FIXED, NULL, i, | ||
228 | acpi_gbl_global_event_handler_context); | ||
229 | } | ||
220 | 230 | ||
221 | /* Found an active (signalled) event */ | ||
222 | acpi_os_fixed_event_count(i); | ||
223 | int_status |= acpi_ev_fixed_event_dispatch(i); | 231 | int_status |= acpi_ev_fixed_event_dispatch(i); |
224 | } | 232 | } |
225 | } | 233 | } |
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index f226eac314db..7c339d34ab42 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c | |||
@@ -52,6 +52,8 @@ ACPI_MODULE_NAME("evgpe") | |||
52 | /* Local prototypes */ | 52 | /* Local prototypes */ |
53 | static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); | 53 | static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); |
54 | 54 | ||
55 | static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context); | ||
56 | |||
55 | /******************************************************************************* | 57 | /******************************************************************************* |
56 | * | 58 | * |
57 | * FUNCTION: acpi_ev_update_gpe_enable_mask | 59 | * FUNCTION: acpi_ev_update_gpe_enable_mask |
@@ -102,7 +104,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) | |||
102 | * | 104 | * |
103 | * RETURN: Status | 105 | * RETURN: Status |
104 | * | 106 | * |
105 | * DESCRIPTION: Clear the given GPE from stale events and enable it. | 107 | * DESCRIPTION: Clear a GPE of stale events and enable it. |
106 | * | 108 | * |
107 | ******************************************************************************/ | 109 | ******************************************************************************/ |
108 | acpi_status | 110 | acpi_status |
@@ -113,12 +115,13 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
113 | ACPI_FUNCTION_TRACE(ev_enable_gpe); | 115 | ACPI_FUNCTION_TRACE(ev_enable_gpe); |
114 | 116 | ||
115 | /* | 117 | /* |
116 | * We will only allow a GPE to be enabled if it has either an | 118 | * We will only allow a GPE to be enabled if it has either an associated |
117 | * associated method (_Lxx/_Exx) or a handler. Otherwise, the | 119 | * method (_Lxx/_Exx) or a handler, or is using the implicit notify |
118 | * GPE will be immediately disabled by acpi_ev_gpe_dispatch the | 120 | * feature. Otherwise, the GPE will be immediately disabled by |
119 | * first time it fires. | 121 | * acpi_ev_gpe_dispatch the first time it fires. |
120 | */ | 122 | */ |
121 | if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) { | 123 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == |
124 | ACPI_GPE_DISPATCH_NONE) { | ||
122 | return_ACPI_STATUS(AE_NO_HANDLER); | 125 | return_ACPI_STATUS(AE_NO_HANDLER); |
123 | } | 126 | } |
124 | 127 | ||
@@ -137,9 +140,9 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
137 | 140 | ||
138 | /******************************************************************************* | 141 | /******************************************************************************* |
139 | * | 142 | * |
140 | * FUNCTION: acpi_raw_enable_gpe | 143 | * FUNCTION: acpi_ev_add_gpe_reference |
141 | * | 144 | * |
142 | * PARAMETERS: gpe_event_info - GPE to enable | 145 | * PARAMETERS: gpe_event_info - Add a reference to this GPE |
143 | * | 146 | * |
144 | * RETURN: Status | 147 | * RETURN: Status |
145 | * | 148 | * |
@@ -148,16 +151,21 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
148 | * | 151 | * |
149 | ******************************************************************************/ | 152 | ******************************************************************************/ |
150 | 153 | ||
151 | acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | 154 | acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) |
152 | { | 155 | { |
153 | acpi_status status = AE_OK; | 156 | acpi_status status = AE_OK; |
154 | 157 | ||
158 | ACPI_FUNCTION_TRACE(ev_add_gpe_reference); | ||
159 | |||
155 | if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) { | 160 | if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) { |
156 | return_ACPI_STATUS(AE_LIMIT); | 161 | return_ACPI_STATUS(AE_LIMIT); |
157 | } | 162 | } |
158 | 163 | ||
159 | gpe_event_info->runtime_count++; | 164 | gpe_event_info->runtime_count++; |
160 | if (gpe_event_info->runtime_count == 1) { | 165 | if (gpe_event_info->runtime_count == 1) { |
166 | |||
167 | /* Enable on first reference */ | ||
168 | |||
161 | status = acpi_ev_update_gpe_enable_mask(gpe_event_info); | 169 | status = acpi_ev_update_gpe_enable_mask(gpe_event_info); |
162 | if (ACPI_SUCCESS(status)) { | 170 | if (ACPI_SUCCESS(status)) { |
163 | status = acpi_ev_enable_gpe(gpe_event_info); | 171 | status = acpi_ev_enable_gpe(gpe_event_info); |
@@ -173,9 +181,9 @@ acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
173 | 181 | ||
174 | /******************************************************************************* | 182 | /******************************************************************************* |
175 | * | 183 | * |
176 | * FUNCTION: acpi_raw_disable_gpe | 184 | * FUNCTION: acpi_ev_remove_gpe_reference |
177 | * | 185 | * |
178 | * PARAMETERS: gpe_event_info - GPE to disable | 186 | * PARAMETERS: gpe_event_info - Remove a reference to this GPE |
179 | * | 187 | * |
180 | * RETURN: Status | 188 | * RETURN: Status |
181 | * | 189 | * |
@@ -184,16 +192,21 @@ acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
184 | * | 192 | * |
185 | ******************************************************************************/ | 193 | ******************************************************************************/ |
186 | 194 | ||
187 | acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) | 195 | acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) |
188 | { | 196 | { |
189 | acpi_status status = AE_OK; | 197 | acpi_status status = AE_OK; |
190 | 198 | ||
199 | ACPI_FUNCTION_TRACE(ev_remove_gpe_reference); | ||
200 | |||
191 | if (!gpe_event_info->runtime_count) { | 201 | if (!gpe_event_info->runtime_count) { |
192 | return_ACPI_STATUS(AE_LIMIT); | 202 | return_ACPI_STATUS(AE_LIMIT); |
193 | } | 203 | } |
194 | 204 | ||
195 | gpe_event_info->runtime_count--; | 205 | gpe_event_info->runtime_count--; |
196 | if (!gpe_event_info->runtime_count) { | 206 | if (!gpe_event_info->runtime_count) { |
207 | |||
208 | /* Disable on last reference */ | ||
209 | |||
197 | status = acpi_ev_update_gpe_enable_mask(gpe_event_info); | 210 | status = acpi_ev_update_gpe_enable_mask(gpe_event_info); |
198 | if (ACPI_SUCCESS(status)) { | 211 | if (ACPI_SUCCESS(status)) { |
199 | status = acpi_hw_low_set_gpe(gpe_event_info, | 212 | status = acpi_hw_low_set_gpe(gpe_event_info, |
@@ -379,7 +392,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
379 | } | 392 | } |
380 | 393 | ||
381 | ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, | 394 | ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, |
382 | "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n", | 395 | "Read GPE Register at GPE%02X: Status=%02X, Enable=%02X\n", |
383 | gpe_register_info->base_gpe_number, | 396 | gpe_register_info->base_gpe_number, |
384 | status_reg, enable_reg)); | 397 | status_reg, enable_reg)); |
385 | 398 | ||
@@ -405,7 +418,9 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
405 | * or method. | 418 | * or method. |
406 | */ | 419 | */ |
407 | int_status |= | 420 | int_status |= |
408 | acpi_ev_gpe_dispatch(&gpe_block-> | 421 | acpi_ev_gpe_dispatch(gpe_block-> |
422 | node, | ||
423 | &gpe_block-> | ||
409 | event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); | 424 | event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); |
410 | } | 425 | } |
411 | } | 426 | } |
@@ -435,17 +450,25 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
435 | * an interrupt handler. | 450 | * an interrupt handler. |
436 | * | 451 | * |
437 | ******************************************************************************/ | 452 | ******************************************************************************/ |
438 | static void acpi_ev_asynch_enable_gpe(void *context); | ||
439 | 453 | ||
440 | static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | 454 | static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) |
441 | { | 455 | { |
442 | struct acpi_gpe_event_info *gpe_event_info = (void *)context; | 456 | struct acpi_gpe_event_info *gpe_event_info = context; |
443 | acpi_status status; | 457 | acpi_status status; |
444 | struct acpi_gpe_event_info local_gpe_event_info; | 458 | struct acpi_gpe_event_info *local_gpe_event_info; |
445 | struct acpi_evaluate_info *info; | 459 | struct acpi_evaluate_info *info; |
446 | 460 | ||
447 | ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); | 461 | ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); |
448 | 462 | ||
463 | /* Allocate a local GPE block */ | ||
464 | |||
465 | local_gpe_event_info = | ||
466 | ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info)); | ||
467 | if (!local_gpe_event_info) { | ||
468 | ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE")); | ||
469 | return_VOID; | ||
470 | } | ||
471 | |||
449 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | 472 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); |
450 | if (ACPI_FAILURE(status)) { | 473 | if (ACPI_FAILURE(status)) { |
451 | return_VOID; | 474 | return_VOID; |
@@ -462,7 +485,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
462 | * Take a snapshot of the GPE info for this level - we copy the info to | 485 | * Take a snapshot of the GPE info for this level - we copy the info to |
463 | * prevent a race condition with remove_handler/remove_block. | 486 | * prevent a race condition with remove_handler/remove_block. |
464 | */ | 487 | */ |
465 | ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info, | 488 | ACPI_MEMCPY(local_gpe_event_info, gpe_event_info, |
466 | sizeof(struct acpi_gpe_event_info)); | 489 | sizeof(struct acpi_gpe_event_info)); |
467 | 490 | ||
468 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); | 491 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); |
@@ -470,12 +493,26 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
470 | return_VOID; | 493 | return_VOID; |
471 | } | 494 | } |
472 | 495 | ||
473 | /* | 496 | /* Do the correct dispatch - normal method or implicit notify */ |
474 | * Must check for control method type dispatch one more time to avoid a | 497 | |
475 | * race with ev_gpe_install_handler | 498 | switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { |
476 | */ | 499 | case ACPI_GPE_DISPATCH_NOTIFY: |
477 | if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) == | 500 | |
478 | ACPI_GPE_DISPATCH_METHOD) { | 501 | /* |
502 | * Implicit notify. | ||
503 | * Dispatch a DEVICE_WAKE notify to the appropriate handler. | ||
504 | * NOTE: the request is queued for execution after this method | ||
505 | * completes. The notify handlers are NOT invoked synchronously | ||
506 | * from this thread -- because handlers may in turn run other | ||
507 | * control methods. | ||
508 | */ | ||
509 | status = | ||
510 | acpi_ev_queue_notify_request(local_gpe_event_info->dispatch. | ||
511 | device_node, | ||
512 | ACPI_NOTIFY_DEVICE_WAKE); | ||
513 | break; | ||
514 | |||
515 | case ACPI_GPE_DISPATCH_METHOD: | ||
479 | 516 | ||
480 | /* Allocate the evaluation information block */ | 517 | /* Allocate the evaluation information block */ |
481 | 518 | ||
@@ -488,7 +525,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
488 | * control method that corresponds to this GPE | 525 | * control method that corresponds to this GPE |
489 | */ | 526 | */ |
490 | info->prefix_node = | 527 | info->prefix_node = |
491 | local_gpe_event_info.dispatch.method_node; | 528 | local_gpe_event_info->dispatch.method_node; |
492 | info->flags = ACPI_IGNORE_RETURN_VALUE; | 529 | info->flags = ACPI_IGNORE_RETURN_VALUE; |
493 | 530 | ||
494 | status = acpi_ns_evaluate(info); | 531 | status = acpi_ns_evaluate(info); |
@@ -499,46 +536,98 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
499 | ACPI_EXCEPTION((AE_INFO, status, | 536 | ACPI_EXCEPTION((AE_INFO, status, |
500 | "while evaluating GPE method [%4.4s]", | 537 | "while evaluating GPE method [%4.4s]", |
501 | acpi_ut_get_node_name | 538 | acpi_ut_get_node_name |
502 | (local_gpe_event_info.dispatch. | 539 | (local_gpe_event_info->dispatch. |
503 | method_node))); | 540 | method_node))); |
504 | } | 541 | } |
542 | |||
543 | break; | ||
544 | |||
545 | default: | ||
546 | return_VOID; /* Should never happen */ | ||
505 | } | 547 | } |
548 | |||
506 | /* Defer enabling of GPE until all notify handlers are done */ | 549 | /* Defer enabling of GPE until all notify handlers are done */ |
507 | acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe, | 550 | |
508 | gpe_event_info); | 551 | status = acpi_os_execute(OSL_NOTIFY_HANDLER, |
552 | acpi_ev_asynch_enable_gpe, | ||
553 | local_gpe_event_info); | ||
554 | if (ACPI_FAILURE(status)) { | ||
555 | ACPI_FREE(local_gpe_event_info); | ||
556 | } | ||
509 | return_VOID; | 557 | return_VOID; |
510 | } | 558 | } |
511 | 559 | ||
512 | static void acpi_ev_asynch_enable_gpe(void *context) | 560 | |
561 | /******************************************************************************* | ||
562 | * | ||
563 | * FUNCTION: acpi_ev_asynch_enable_gpe | ||
564 | * | ||
565 | * PARAMETERS: Context (gpe_event_info) - Info for this GPE | ||
566 | * Callback from acpi_os_execute | ||
567 | * | ||
568 | * RETURN: None | ||
569 | * | ||
570 | * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to | ||
571 | * complete (i.e., finish execution of Notify) | ||
572 | * | ||
573 | ******************************************************************************/ | ||
574 | |||
575 | static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context) | ||
513 | { | 576 | { |
514 | struct acpi_gpe_event_info *gpe_event_info = context; | 577 | struct acpi_gpe_event_info *gpe_event_info = context; |
578 | |||
579 | (void)acpi_ev_finish_gpe(gpe_event_info); | ||
580 | |||
581 | ACPI_FREE(gpe_event_info); | ||
582 | return; | ||
583 | } | ||
584 | |||
585 | |||
586 | /******************************************************************************* | ||
587 | * | ||
588 | * FUNCTION: acpi_ev_finish_gpe | ||
589 | * | ||
590 | * PARAMETERS: gpe_event_info - Info for this GPE | ||
591 | * | ||
592 | * RETURN: Status | ||
593 | * | ||
594 | * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution | ||
595 | * of a GPE method or a synchronous or asynchronous GPE handler. | ||
596 | * | ||
597 | ******************************************************************************/ | ||
598 | |||
599 | acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info) | ||
600 | { | ||
515 | acpi_status status; | 601 | acpi_status status; |
602 | |||
516 | if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == | 603 | if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == |
517 | ACPI_GPE_LEVEL_TRIGGERED) { | 604 | ACPI_GPE_LEVEL_TRIGGERED) { |
518 | /* | 605 | /* |
519 | * GPE is level-triggered, we clear the GPE status bit after handling | 606 | * GPE is level-triggered, we clear the GPE status bit after |
520 | * the event. | 607 | * handling the event. |
521 | */ | 608 | */ |
522 | status = acpi_hw_clear_gpe(gpe_event_info); | 609 | status = acpi_hw_clear_gpe(gpe_event_info); |
523 | if (ACPI_FAILURE(status)) { | 610 | if (ACPI_FAILURE(status)) { |
524 | return_VOID; | 611 | return (status); |
525 | } | 612 | } |
526 | } | 613 | } |
527 | 614 | ||
528 | /* | 615 | /* |
529 | * Enable this GPE, conditionally. This means that the GPE will only be | 616 | * Enable this GPE, conditionally. This means that the GPE will |
530 | * physically enabled if the enable_for_run bit is set in the event_info | 617 | * only be physically enabled if the enable_for_run bit is set |
618 | * in the event_info. | ||
531 | */ | 619 | */ |
532 | (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE); | 620 | (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE); |
533 | 621 | return (AE_OK); | |
534 | return_VOID; | ||
535 | } | 622 | } |
536 | 623 | ||
624 | |||
537 | /******************************************************************************* | 625 | /******************************************************************************* |
538 | * | 626 | * |
539 | * FUNCTION: acpi_ev_gpe_dispatch | 627 | * FUNCTION: acpi_ev_gpe_dispatch |
540 | * | 628 | * |
541 | * PARAMETERS: gpe_event_info - Info for this GPE | 629 | * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 |
630 | * gpe_event_info - Info for this GPE | ||
542 | * gpe_number - Number relative to the parent GPE block | 631 | * gpe_number - Number relative to the parent GPE block |
543 | * | 632 | * |
544 | * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED | 633 | * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED |
@@ -551,13 +640,22 @@ static void acpi_ev_asynch_enable_gpe(void *context) | |||
551 | ******************************************************************************/ | 640 | ******************************************************************************/ |
552 | 641 | ||
553 | u32 | 642 | u32 |
554 | acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | 643 | acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, |
644 | struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | ||
555 | { | 645 | { |
556 | acpi_status status; | 646 | acpi_status status; |
647 | u32 return_value; | ||
557 | 648 | ||
558 | ACPI_FUNCTION_TRACE(ev_gpe_dispatch); | 649 | ACPI_FUNCTION_TRACE(ev_gpe_dispatch); |
559 | 650 | ||
560 | acpi_os_gpe_count(gpe_number); | 651 | /* Invoke global event handler if present */ |
652 | |||
653 | acpi_gpe_count++; | ||
654 | if (acpi_gbl_global_event_handler) { | ||
655 | acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device, | ||
656 | gpe_number, | ||
657 | acpi_gbl_global_event_handler_context); | ||
658 | } | ||
561 | 659 | ||
562 | /* | 660 | /* |
563 | * If edge-triggered, clear the GPE status bit now. Note that | 661 | * If edge-triggered, clear the GPE status bit now. Note that |
@@ -568,59 +666,55 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | |||
568 | status = acpi_hw_clear_gpe(gpe_event_info); | 666 | status = acpi_hw_clear_gpe(gpe_event_info); |
569 | if (ACPI_FAILURE(status)) { | 667 | if (ACPI_FAILURE(status)) { |
570 | ACPI_EXCEPTION((AE_INFO, status, | 668 | ACPI_EXCEPTION((AE_INFO, status, |
571 | "Unable to clear GPE[0x%2X]", | 669 | "Unable to clear GPE%02X", gpe_number)); |
572 | gpe_number)); | ||
573 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); | 670 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); |
574 | } | 671 | } |
575 | } | 672 | } |
576 | 673 | ||
577 | /* | 674 | /* |
578 | * Dispatch the GPE to either an installed handler, or the control method | 675 | * Always disable the GPE so that it does not keep firing before |
579 | * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke | 676 | * any asynchronous activity completes (either from the execution |
580 | * it and do not attempt to run the method. If there is neither a handler | 677 | * of a GPE method or an asynchronous GPE handler.) |
581 | * nor a method, we disable this GPE to prevent further such pointless | 678 | * |
582 | * events from firing. | 679 | * If there is no handler or method to run, just disable the |
680 | * GPE and leave it disabled permanently to prevent further such | ||
681 | * pointless events from firing. | ||
682 | */ | ||
683 | status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); | ||
684 | if (ACPI_FAILURE(status)) { | ||
685 | ACPI_EXCEPTION((AE_INFO, status, | ||
686 | "Unable to disable GPE%02X", gpe_number)); | ||
687 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); | ||
688 | } | ||
689 | |||
690 | /* | ||
691 | * Dispatch the GPE to either an installed handler or the control | ||
692 | * method associated with this GPE (_Lxx or _Exx). If a handler | ||
693 | * exists, we invoke it and do not attempt to run the method. | ||
694 | * If there is neither a handler nor a method, leave the GPE | ||
695 | * disabled. | ||
583 | */ | 696 | */ |
584 | switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { | 697 | switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { |
585 | case ACPI_GPE_DISPATCH_HANDLER: | 698 | case ACPI_GPE_DISPATCH_HANDLER: |
586 | 699 | ||
587 | /* | 700 | /* Invoke the installed handler (at interrupt level) */ |
588 | * Invoke the installed handler (at interrupt level) | ||
589 | * Ignore return status for now. | ||
590 | * TBD: leave GPE disabled on error? | ||
591 | */ | ||
592 | (void)gpe_event_info->dispatch.handler->address(gpe_event_info-> | ||
593 | dispatch. | ||
594 | handler-> | ||
595 | context); | ||
596 | 701 | ||
597 | /* It is now safe to clear level-triggered events. */ | 702 | return_value = |
703 | gpe_event_info->dispatch.handler->address(gpe_device, | ||
704 | gpe_number, | ||
705 | gpe_event_info-> | ||
706 | dispatch.handler-> | ||
707 | context); | ||
598 | 708 | ||
599 | if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == | 709 | /* If requested, clear (if level-triggered) and reenable the GPE */ |
600 | ACPI_GPE_LEVEL_TRIGGERED) { | 710 | |
601 | status = acpi_hw_clear_gpe(gpe_event_info); | 711 | if (return_value & ACPI_REENABLE_GPE) { |
602 | if (ACPI_FAILURE(status)) { | 712 | (void)acpi_ev_finish_gpe(gpe_event_info); |
603 | ACPI_EXCEPTION((AE_INFO, status, | ||
604 | "Unable to clear GPE[0x%2X]", | ||
605 | gpe_number)); | ||
606 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); | ||
607 | } | ||
608 | } | 713 | } |
609 | break; | 714 | break; |
610 | 715 | ||
611 | case ACPI_GPE_DISPATCH_METHOD: | 716 | case ACPI_GPE_DISPATCH_METHOD: |
612 | 717 | case ACPI_GPE_DISPATCH_NOTIFY: | |
613 | /* | ||
614 | * Disable the GPE, so it doesn't keep firing before the method has a | ||
615 | * chance to run (it runs asynchronously with interrupts enabled). | ||
616 | */ | ||
617 | status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); | ||
618 | if (ACPI_FAILURE(status)) { | ||
619 | ACPI_EXCEPTION((AE_INFO, status, | ||
620 | "Unable to disable GPE[0x%2X]", | ||
621 | gpe_number)); | ||
622 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); | ||
623 | } | ||
624 | 718 | ||
625 | /* | 719 | /* |
626 | * Execute the method associated with the GPE | 720 | * Execute the method associated with the GPE |
@@ -631,7 +725,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | |||
631 | gpe_event_info); | 725 | gpe_event_info); |
632 | if (ACPI_FAILURE(status)) { | 726 | if (ACPI_FAILURE(status)) { |
633 | ACPI_EXCEPTION((AE_INFO, status, | 727 | ACPI_EXCEPTION((AE_INFO, status, |
634 | "Unable to queue handler for GPE[0x%2X] - event disabled", | 728 | "Unable to queue handler for GPE%2X - event disabled", |
635 | gpe_number)); | 729 | gpe_number)); |
636 | } | 730 | } |
637 | break; | 731 | break; |
@@ -644,20 +738,9 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | |||
644 | * a GPE to be enabled if it has no handler or method. | 738 | * a GPE to be enabled if it has no handler or method. |
645 | */ | 739 | */ |
646 | ACPI_ERROR((AE_INFO, | 740 | ACPI_ERROR((AE_INFO, |
647 | "No handler or method for GPE[0x%2X], disabling event", | 741 | "No handler or method for GPE%02X, disabling event", |
648 | gpe_number)); | 742 | gpe_number)); |
649 | 743 | ||
650 | /* | ||
651 | * Disable the GPE. The GPE will remain disabled a handler | ||
652 | * is installed or ACPICA is restarted. | ||
653 | */ | ||
654 | status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); | ||
655 | if (ACPI_FAILURE(status)) { | ||
656 | ACPI_EXCEPTION((AE_INFO, status, | ||
657 | "Unable to disable GPE[0x%2X]", | ||
658 | gpe_number)); | ||
659 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); | ||
660 | } | ||
661 | break; | 744 | break; |
662 | } | 745 | } |
663 | 746 | ||
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index 020add3eee1c..9acb86958c09 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c | |||
@@ -361,9 +361,9 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, | |||
361 | 361 | ||
362 | gpe_block->node = gpe_device; | 362 | gpe_block->node = gpe_device; |
363 | gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH); | 363 | gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH); |
364 | gpe_block->initialized = FALSE; | ||
364 | gpe_block->register_count = register_count; | 365 | gpe_block->register_count = register_count; |
365 | gpe_block->block_base_number = gpe_block_base_number; | 366 | gpe_block->block_base_number = gpe_block_base_number; |
366 | gpe_block->initialized = FALSE; | ||
367 | 367 | ||
368 | ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address, | 368 | ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address, |
369 | sizeof(struct acpi_generic_address)); | 369 | sizeof(struct acpi_generic_address)); |
@@ -386,7 +386,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, | |||
386 | return_ACPI_STATUS(status); | 386 | return_ACPI_STATUS(status); |
387 | } | 387 | } |
388 | 388 | ||
389 | acpi_all_gpes_initialized = FALSE; | 389 | acpi_gbl_all_gpes_initialized = FALSE; |
390 | 390 | ||
391 | /* Find all GPE methods (_Lxx or_Exx) for this block */ | 391 | /* Find all GPE methods (_Lxx or_Exx) for this block */ |
392 | 392 | ||
@@ -423,14 +423,12 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, | |||
423 | * | 423 | * |
424 | * FUNCTION: acpi_ev_initialize_gpe_block | 424 | * FUNCTION: acpi_ev_initialize_gpe_block |
425 | * | 425 | * |
426 | * PARAMETERS: gpe_device - Handle to the parent GPE block | 426 | * PARAMETERS: acpi_gpe_callback |
427 | * gpe_block - Gpe Block info | ||
428 | * | 427 | * |
429 | * RETURN: Status | 428 | * RETURN: Status |
430 | * | 429 | * |
431 | * DESCRIPTION: Initialize and enable a GPE block. First find and run any | 430 | * DESCRIPTION: Initialize and enable a GPE block. Enable GPEs that have |
432 | * _PRT methods associated with the block, then enable the | 431 | * associated methods. |
433 | * appropriate GPEs. | ||
434 | * Note: Assumes namespace is locked. | 432 | * Note: Assumes namespace is locked. |
435 | * | 433 | * |
436 | ******************************************************************************/ | 434 | ******************************************************************************/ |
@@ -450,8 +448,8 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
450 | ACPI_FUNCTION_TRACE(ev_initialize_gpe_block); | 448 | ACPI_FUNCTION_TRACE(ev_initialize_gpe_block); |
451 | 449 | ||
452 | /* | 450 | /* |
453 | * Ignore a null GPE block (e.g., if no GPE block 1 exists) and | 451 | * Ignore a null GPE block (e.g., if no GPE block 1 exists), and |
454 | * GPE blocks that have been initialized already. | 452 | * any GPE blocks that have been initialized already. |
455 | */ | 453 | */ |
456 | if (!gpe_block || gpe_block->initialized) { | 454 | if (!gpe_block || gpe_block->initialized) { |
457 | return_ACPI_STATUS(AE_OK); | 455 | return_ACPI_STATUS(AE_OK); |
@@ -459,8 +457,8 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
459 | 457 | ||
460 | /* | 458 | /* |
461 | * Enable all GPEs that have a corresponding method and have the | 459 | * Enable all GPEs that have a corresponding method and have the |
462 | * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block must | 460 | * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block |
463 | * be enabled via the acpi_enable_gpe() interface. | 461 | * must be enabled via the acpi_enable_gpe() interface. |
464 | */ | 462 | */ |
465 | gpe_enabled_count = 0; | 463 | gpe_enabled_count = 0; |
466 | 464 | ||
@@ -472,14 +470,19 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
472 | gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j; | 470 | gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j; |
473 | gpe_event_info = &gpe_block->event_info[gpe_index]; | 471 | gpe_event_info = &gpe_block->event_info[gpe_index]; |
474 | 472 | ||
475 | /* Ignore GPEs that have no corresponding _Lxx/_Exx method */ | 473 | /* |
476 | 474 | * Ignore GPEs that have no corresponding _Lxx/_Exx method | |
477 | if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD) | 475 | * and GPEs that are used to wake the system |
476 | */ | ||
477 | if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | ||
478 | ACPI_GPE_DISPATCH_NONE) | ||
479 | || ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) | ||
480 | == ACPI_GPE_DISPATCH_HANDLER) | ||
478 | || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { | 481 | || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { |
479 | continue; | 482 | continue; |
480 | } | 483 | } |
481 | 484 | ||
482 | status = acpi_raw_enable_gpe(gpe_event_info); | 485 | status = acpi_ev_add_gpe_reference(gpe_event_info); |
483 | if (ACPI_FAILURE(status)) { | 486 | if (ACPI_FAILURE(status)) { |
484 | ACPI_EXCEPTION((AE_INFO, status, | 487 | ACPI_EXCEPTION((AE_INFO, status, |
485 | "Could not enable GPE 0x%02X", | 488 | "Could not enable GPE 0x%02X", |
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c index 2c7def95f721..c59dc2340593 100644 --- a/drivers/acpi/acpica/evgpeinit.c +++ b/drivers/acpi/acpica/evgpeinit.c | |||
@@ -45,11 +45,27 @@ | |||
45 | #include "accommon.h" | 45 | #include "accommon.h" |
46 | #include "acevents.h" | 46 | #include "acevents.h" |
47 | #include "acnamesp.h" | 47 | #include "acnamesp.h" |
48 | #include "acinterp.h" | ||
49 | 48 | ||
50 | #define _COMPONENT ACPI_EVENTS | 49 | #define _COMPONENT ACPI_EVENTS |
51 | ACPI_MODULE_NAME("evgpeinit") | 50 | ACPI_MODULE_NAME("evgpeinit") |
52 | 51 | ||
52 | /* | ||
53 | * Note: History of _PRW support in ACPICA | ||
54 | * | ||
55 | * Originally (2000 - 2010), the GPE initialization code performed a walk of | ||
56 | * the entire namespace to execute the _PRW methods and detect all GPEs | ||
57 | * capable of waking the system. | ||
58 | * | ||
59 | * As of 10/2010, the _PRW method execution has been removed since it is | ||
60 | * actually unnecessary. The host OS must in fact execute all _PRW methods | ||
61 | * in order to identify the device/power-resource dependencies. We now put | ||
62 | * the onus on the host OS to identify the wake GPEs as part of this process | ||
63 | * and to inform ACPICA of these GPEs via the acpi_setup_gpe_for_wake interface. This | ||
64 | * not only reduces the complexity of the ACPICA initialization code, but in | ||
65 | * some cases (on systems with very large namespaces) it should reduce the | ||
66 | * kernel boot time as well. | ||
67 | */ | ||
68 | |||
53 | /******************************************************************************* | 69 | /******************************************************************************* |
54 | * | 70 | * |
55 | * FUNCTION: acpi_ev_gpe_initialize | 71 | * FUNCTION: acpi_ev_gpe_initialize |
@@ -222,7 +238,7 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id) | |||
222 | acpi_status status = AE_OK; | 238 | acpi_status status = AE_OK; |
223 | 239 | ||
224 | /* | 240 | /* |
225 | * 2) Find any _Lxx/_Exx GPE methods that have just been loaded. | 241 | * Find any _Lxx/_Exx GPE methods that have just been loaded. |
226 | * | 242 | * |
227 | * Any GPEs that correspond to new _Lxx/_Exx methods are immediately | 243 | * Any GPEs that correspond to new _Lxx/_Exx methods are immediately |
228 | * enabled. | 244 | * enabled. |
@@ -235,9 +251,9 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id) | |||
235 | return; | 251 | return; |
236 | } | 252 | } |
237 | 253 | ||
254 | walk_info.count = 0; | ||
238 | walk_info.owner_id = table_owner_id; | 255 | walk_info.owner_id = table_owner_id; |
239 | walk_info.execute_by_owner_id = TRUE; | 256 | walk_info.execute_by_owner_id = TRUE; |
240 | walk_info.count = 0; | ||
241 | 257 | ||
242 | /* Walk the interrupt level descriptor list */ | 258 | /* Walk the interrupt level descriptor list */ |
243 | 259 | ||
@@ -298,7 +314,7 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id) | |||
298 | * xx - is the GPE number [in HEX] | 314 | * xx - is the GPE number [in HEX] |
299 | * | 315 | * |
300 | * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods | 316 | * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods |
301 | * with that owner. | 317 | * with that owner. |
302 | * | 318 | * |
303 | ******************************************************************************/ | 319 | ******************************************************************************/ |
304 | 320 | ||
@@ -408,10 +424,14 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle, | |||
408 | return_ACPI_STATUS(AE_OK); | 424 | return_ACPI_STATUS(AE_OK); |
409 | } | 425 | } |
410 | 426 | ||
427 | /* Disable the GPE in case it's been enabled already. */ | ||
428 | (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); | ||
429 | |||
411 | /* | 430 | /* |
412 | * Add the GPE information from above to the gpe_event_info block for | 431 | * Add the GPE information from above to the gpe_event_info block for |
413 | * use during dispatch of this GPE. | 432 | * use during dispatch of this GPE. |
414 | */ | 433 | */ |
434 | gpe_event_info->flags &= ~(ACPI_GPE_DISPATCH_MASK); | ||
415 | gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD); | 435 | gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD); |
416 | gpe_event_info->dispatch.method_node = method_node; | 436 | gpe_event_info->dispatch.method_node = method_node; |
417 | 437 | ||
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c index 19a0e513ea48..10e477494dcf 100644 --- a/drivers/acpi/acpica/evgpeutil.c +++ b/drivers/acpi/acpica/evgpeutil.c | |||
@@ -154,6 +154,45 @@ u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info) | |||
154 | 154 | ||
155 | /******************************************************************************* | 155 | /******************************************************************************* |
156 | * | 156 | * |
157 | * FUNCTION: acpi_ev_get_gpe_device | ||
158 | * | ||
159 | * PARAMETERS: GPE_WALK_CALLBACK | ||
160 | * | ||
161 | * RETURN: Status | ||
162 | * | ||
163 | * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE | ||
164 | * block device. NULL if the GPE is one of the FADT-defined GPEs. | ||
165 | * | ||
166 | ******************************************************************************/ | ||
167 | |||
168 | acpi_status | ||
169 | acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | ||
170 | struct acpi_gpe_block_info *gpe_block, void *context) | ||
171 | { | ||
172 | struct acpi_gpe_device_info *info = context; | ||
173 | |||
174 | /* Increment Index by the number of GPEs in this block */ | ||
175 | |||
176 | info->next_block_base_index += gpe_block->gpe_count; | ||
177 | |||
178 | if (info->index < info->next_block_base_index) { | ||
179 | /* | ||
180 | * The GPE index is within this block, get the node. Leave the node | ||
181 | * NULL for the FADT-defined GPEs | ||
182 | */ | ||
183 | if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) { | ||
184 | info->gpe_device = gpe_block->node; | ||
185 | } | ||
186 | |||
187 | info->status = AE_OK; | ||
188 | return (AE_CTRL_END); | ||
189 | } | ||
190 | |||
191 | return (AE_OK); | ||
192 | } | ||
193 | |||
194 | /******************************************************************************* | ||
195 | * | ||
157 | * FUNCTION: acpi_ev_get_gpe_xrupt_block | 196 | * FUNCTION: acpi_ev_get_gpe_xrupt_block |
158 | * | 197 | * |
159 | * PARAMETERS: interrupt_number - Interrupt for a GPE block | 198 | * PARAMETERS: interrupt_number - Interrupt for a GPE block |
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c index fcaed9fb44ff..8e31bb5a973a 100644 --- a/drivers/acpi/acpica/evmisc.c +++ b/drivers/acpi/acpica/evmisc.c | |||
@@ -284,41 +284,41 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context) | |||
284 | * RETURN: ACPI_INTERRUPT_HANDLED | 284 | * RETURN: ACPI_INTERRUPT_HANDLED |
285 | * | 285 | * |
286 | * DESCRIPTION: Invoked directly from the SCI handler when a global lock | 286 | * DESCRIPTION: Invoked directly from the SCI handler when a global lock |
287 | * release interrupt occurs. Attempt to acquire the global lock, | 287 | * release interrupt occurs. If there's a thread waiting for |
288 | * if successful, signal the thread waiting for the lock. | 288 | * the global lock, signal it. |
289 | * | 289 | * |
290 | * NOTE: Assumes that the semaphore can be signaled from interrupt level. If | 290 | * NOTE: Assumes that the semaphore can be signaled from interrupt level. If |
291 | * this is not possible for some reason, a separate thread will have to be | 291 | * this is not possible for some reason, a separate thread will have to be |
292 | * scheduled to do this. | 292 | * scheduled to do this. |
293 | * | 293 | * |
294 | ******************************************************************************/ | 294 | ******************************************************************************/ |
295 | static u8 acpi_ev_global_lock_pending; | ||
296 | static spinlock_t _acpi_ev_global_lock_pending_lock; | ||
297 | #define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock | ||
295 | 298 | ||
296 | static u32 acpi_ev_global_lock_handler(void *context) | 299 | static u32 acpi_ev_global_lock_handler(void *context) |
297 | { | 300 | { |
298 | u8 acquired = FALSE; | 301 | acpi_status status; |
302 | acpi_cpu_flags flags; | ||
299 | 303 | ||
300 | /* | 304 | flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock); |
301 | * Attempt to get the lock. | ||
302 | * | ||
303 | * If we don't get it now, it will be marked pending and we will | ||
304 | * take another interrupt when it becomes free. | ||
305 | */ | ||
306 | ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); | ||
307 | if (acquired) { | ||
308 | 305 | ||
309 | /* Got the lock, now wake all threads waiting for it */ | 306 | if (!acpi_ev_global_lock_pending) { |
307 | goto out; | ||
308 | } | ||
310 | 309 | ||
311 | acpi_gbl_global_lock_acquired = TRUE; | 310 | /* Send a unit to the semaphore */ |
312 | /* Send a unit to the semaphore */ | ||
313 | 311 | ||
314 | if (ACPI_FAILURE | 312 | status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1); |
315 | (acpi_os_signal_semaphore | 313 | if (ACPI_FAILURE(status)) { |
316 | (acpi_gbl_global_lock_semaphore, 1))) { | 314 | ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore")); |
317 | ACPI_ERROR((AE_INFO, | ||
318 | "Could not signal Global Lock semaphore")); | ||
319 | } | ||
320 | } | 315 | } |
321 | 316 | ||
317 | acpi_ev_global_lock_pending = FALSE; | ||
318 | |||
319 | out: | ||
320 | acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags); | ||
321 | |||
322 | return (ACPI_INTERRUPT_HANDLED); | 322 | return (ACPI_INTERRUPT_HANDLED); |
323 | } | 323 | } |
324 | 324 | ||
@@ -415,6 +415,7 @@ static int acpi_ev_global_lock_acquired; | |||
415 | 415 | ||
416 | acpi_status acpi_ev_acquire_global_lock(u16 timeout) | 416 | acpi_status acpi_ev_acquire_global_lock(u16 timeout) |
417 | { | 417 | { |
418 | acpi_cpu_flags flags; | ||
418 | acpi_status status = AE_OK; | 419 | acpi_status status = AE_OK; |
419 | u8 acquired = FALSE; | 420 | u8 acquired = FALSE; |
420 | 421 | ||
@@ -467,32 +468,47 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout) | |||
467 | return_ACPI_STATUS(AE_OK); | 468 | return_ACPI_STATUS(AE_OK); |
468 | } | 469 | } |
469 | 470 | ||
470 | /* Attempt to acquire the actual hardware lock */ | 471 | flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock); |
472 | |||
473 | do { | ||
474 | |||
475 | /* Attempt to acquire the actual hardware lock */ | ||
476 | |||
477 | ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); | ||
478 | if (acquired) { | ||
479 | acpi_gbl_global_lock_acquired = TRUE; | ||
480 | |||
481 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, | ||
482 | "Acquired hardware Global Lock\n")); | ||
483 | break; | ||
484 | } | ||
471 | 485 | ||
472 | ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); | 486 | acpi_ev_global_lock_pending = TRUE; |
473 | if (acquired) { | ||
474 | 487 | ||
475 | /* We got the lock */ | 488 | acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags); |
476 | 489 | ||
490 | /* | ||
491 | * Did not get the lock. The pending bit was set above, and we | ||
492 | * must wait until we get the global lock released interrupt. | ||
493 | */ | ||
477 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, | 494 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, |
478 | "Acquired hardware Global Lock\n")); | 495 | "Waiting for hardware Global Lock\n")); |
479 | 496 | ||
480 | acpi_gbl_global_lock_acquired = TRUE; | 497 | /* |
481 | return_ACPI_STATUS(AE_OK); | 498 | * Wait for handshake with the global lock interrupt handler. |
482 | } | 499 | * This interface releases the interpreter if we must wait. |
500 | */ | ||
501 | status = acpi_ex_system_wait_semaphore( | ||
502 | acpi_gbl_global_lock_semaphore, | ||
503 | ACPI_WAIT_FOREVER); | ||
483 | 504 | ||
484 | /* | 505 | flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock); |
485 | * Did not get the lock. The pending bit was set above, and we must now | ||
486 | * wait until we get the global lock released interrupt. | ||
487 | */ | ||
488 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n")); | ||
489 | 506 | ||
490 | /* | 507 | } while (ACPI_SUCCESS(status)); |
491 | * Wait for handshake with the global lock interrupt handler. | 508 | |
492 | * This interface releases the interpreter if we must wait. | 509 | acpi_ev_global_lock_pending = FALSE; |
493 | */ | 510 | |
494 | status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore, | 511 | acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags); |
495 | ACPI_WAIT_FOREVER); | ||
496 | 512 | ||
497 | return_ACPI_STATUS(status); | 513 | return_ACPI_STATUS(status); |
498 | } | 514 | } |
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index 36af222cac65..1226689bdb1b 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c | |||
@@ -92,6 +92,57 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler) | |||
92 | 92 | ||
93 | ACPI_EXPORT_SYMBOL(acpi_install_exception_handler) | 93 | ACPI_EXPORT_SYMBOL(acpi_install_exception_handler) |
94 | #endif /* ACPI_FUTURE_USAGE */ | 94 | #endif /* ACPI_FUTURE_USAGE */ |
95 | |||
96 | /******************************************************************************* | ||
97 | * | ||
98 | * FUNCTION: acpi_install_global_event_handler | ||
99 | * | ||
100 | * PARAMETERS: Handler - Pointer to the global event handler function | ||
101 | * Context - Value passed to the handler on each event | ||
102 | * | ||
103 | * RETURN: Status | ||
104 | * | ||
105 | * DESCRIPTION: Saves the pointer to the handler function. The global handler | ||
106 | * is invoked upon each incoming GPE and Fixed Event. It is | ||
107 | * invoked at interrupt level at the time of the event dispatch. | ||
108 | * Can be used to update event counters, etc. | ||
109 | * | ||
110 | ******************************************************************************/ | ||
111 | acpi_status | ||
112 | acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context) | ||
113 | { | ||
114 | acpi_status status; | ||
115 | |||
116 | ACPI_FUNCTION_TRACE(acpi_install_global_event_handler); | ||
117 | |||
118 | /* Parameter validation */ | ||
119 | |||
120 | if (!handler) { | ||
121 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
122 | } | ||
123 | |||
124 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
125 | if (ACPI_FAILURE(status)) { | ||
126 | return_ACPI_STATUS(status); | ||
127 | } | ||
128 | |||
129 | /* Don't allow two handlers. */ | ||
130 | |||
131 | if (acpi_gbl_global_event_handler) { | ||
132 | status = AE_ALREADY_EXISTS; | ||
133 | goto cleanup; | ||
134 | } | ||
135 | |||
136 | acpi_gbl_global_event_handler = handler; | ||
137 | acpi_gbl_global_event_handler_context = context; | ||
138 | |||
139 | cleanup: | ||
140 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
141 | return_ACPI_STATUS(status); | ||
142 | } | ||
143 | |||
144 | ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler) | ||
145 | |||
95 | /******************************************************************************* | 146 | /******************************************************************************* |
96 | * | 147 | * |
97 | * FUNCTION: acpi_install_fixed_event_handler | 148 | * FUNCTION: acpi_install_fixed_event_handler |
@@ -671,10 +722,10 @@ ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler) | |||
671 | acpi_status | 722 | acpi_status |
672 | acpi_install_gpe_handler(acpi_handle gpe_device, | 723 | acpi_install_gpe_handler(acpi_handle gpe_device, |
673 | u32 gpe_number, | 724 | u32 gpe_number, |
674 | u32 type, acpi_event_handler address, void *context) | 725 | u32 type, acpi_gpe_handler address, void *context) |
675 | { | 726 | { |
676 | struct acpi_gpe_event_info *gpe_event_info; | 727 | struct acpi_gpe_event_info *gpe_event_info; |
677 | struct acpi_handler_info *handler; | 728 | struct acpi_gpe_handler_info *handler; |
678 | acpi_status status; | 729 | acpi_status status; |
679 | acpi_cpu_flags flags; | 730 | acpi_cpu_flags flags; |
680 | 731 | ||
@@ -693,7 +744,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device, | |||
693 | 744 | ||
694 | /* Allocate memory for the handler object */ | 745 | /* Allocate memory for the handler object */ |
695 | 746 | ||
696 | handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info)); | 747 | handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_handler_info)); |
697 | if (!handler) { | 748 | if (!handler) { |
698 | status = AE_NO_MEMORY; | 749 | status = AE_NO_MEMORY; |
699 | goto unlock_and_exit; | 750 | goto unlock_and_exit; |
@@ -722,7 +773,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device, | |||
722 | handler->address = address; | 773 | handler->address = address; |
723 | handler->context = context; | 774 | handler->context = context; |
724 | handler->method_node = gpe_event_info->dispatch.method_node; | 775 | handler->method_node = gpe_event_info->dispatch.method_node; |
725 | handler->orig_flags = gpe_event_info->flags & | 776 | handler->original_flags = gpe_event_info->flags & |
726 | (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); | 777 | (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); |
727 | 778 | ||
728 | /* | 779 | /* |
@@ -731,10 +782,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device, | |||
731 | * disabled now to avoid spurious execution of the handler. | 782 | * disabled now to avoid spurious execution of the handler. |
732 | */ | 783 | */ |
733 | 784 | ||
734 | if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD) | 785 | if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) |
735 | && gpe_event_info->runtime_count) { | 786 | && gpe_event_info->runtime_count) { |
736 | handler->orig_enabled = 1; | 787 | handler->originally_enabled = 1; |
737 | (void)acpi_raw_disable_gpe(gpe_event_info); | 788 | (void)acpi_ev_remove_gpe_reference(gpe_event_info); |
738 | } | 789 | } |
739 | 790 | ||
740 | /* Install the handler */ | 791 | /* Install the handler */ |
@@ -777,10 +828,10 @@ ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler) | |||
777 | ******************************************************************************/ | 828 | ******************************************************************************/ |
778 | acpi_status | 829 | acpi_status |
779 | acpi_remove_gpe_handler(acpi_handle gpe_device, | 830 | acpi_remove_gpe_handler(acpi_handle gpe_device, |
780 | u32 gpe_number, acpi_event_handler address) | 831 | u32 gpe_number, acpi_gpe_handler address) |
781 | { | 832 | { |
782 | struct acpi_gpe_event_info *gpe_event_info; | 833 | struct acpi_gpe_event_info *gpe_event_info; |
783 | struct acpi_handler_info *handler; | 834 | struct acpi_gpe_handler_info *handler; |
784 | acpi_status status; | 835 | acpi_status status; |
785 | acpi_cpu_flags flags; | 836 | acpi_cpu_flags flags; |
786 | 837 | ||
@@ -835,7 +886,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, | |||
835 | gpe_event_info->dispatch.method_node = handler->method_node; | 886 | gpe_event_info->dispatch.method_node = handler->method_node; |
836 | gpe_event_info->flags &= | 887 | gpe_event_info->flags &= |
837 | ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); | 888 | ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); |
838 | gpe_event_info->flags |= handler->orig_flags; | 889 | gpe_event_info->flags |= handler->original_flags; |
839 | 890 | ||
840 | /* | 891 | /* |
841 | * If the GPE was previously associated with a method and it was | 892 | * If the GPE was previously associated with a method and it was |
@@ -843,9 +894,9 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, | |||
843 | * post-initialization configuration. | 894 | * post-initialization configuration. |
844 | */ | 895 | */ |
845 | 896 | ||
846 | if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD) | 897 | if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) |
847 | && handler->orig_enabled) | 898 | && handler->originally_enabled) |
848 | (void)acpi_raw_enable_gpe(gpe_event_info); | 899 | (void)acpi_ev_add_gpe_reference(gpe_event_info); |
849 | 900 | ||
850 | /* Now we can free the handler object */ | 901 | /* Now we can free the handler object */ |
851 | 902 | ||
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index a1dabe3fd8ae..90488c1e0f3d 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c | |||
@@ -43,18 +43,11 @@ | |||
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include "accommon.h" | 45 | #include "accommon.h" |
46 | #include "acevents.h" | ||
47 | #include "acnamesp.h" | ||
48 | #include "actables.h" | 46 | #include "actables.h" |
49 | 47 | ||
50 | #define _COMPONENT ACPI_EVENTS | 48 | #define _COMPONENT ACPI_EVENTS |
51 | ACPI_MODULE_NAME("evxfevnt") | 49 | ACPI_MODULE_NAME("evxfevnt") |
52 | 50 | ||
53 | /* Local prototypes */ | ||
54 | static acpi_status | ||
55 | acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | ||
56 | struct acpi_gpe_block_info *gpe_block, void *context); | ||
57 | |||
58 | /******************************************************************************* | 51 | /******************************************************************************* |
59 | * | 52 | * |
60 | * FUNCTION: acpi_enable | 53 | * FUNCTION: acpi_enable |
@@ -213,185 +206,6 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event) | |||
213 | 206 | ||
214 | /******************************************************************************* | 207 | /******************************************************************************* |
215 | * | 208 | * |
216 | * FUNCTION: acpi_gpe_wakeup | ||
217 | * | ||
218 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
219 | * gpe_number - GPE level within the GPE block | ||
220 | * Action - Enable or Disable | ||
221 | * | ||
222 | * RETURN: Status | ||
223 | * | ||
224 | * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit. | ||
225 | * | ||
226 | ******************************************************************************/ | ||
227 | acpi_status acpi_gpe_wakeup(acpi_handle gpe_device, u32 gpe_number, u8 action) | ||
228 | { | ||
229 | acpi_status status = AE_OK; | ||
230 | struct acpi_gpe_event_info *gpe_event_info; | ||
231 | struct acpi_gpe_register_info *gpe_register_info; | ||
232 | acpi_cpu_flags flags; | ||
233 | u32 register_bit; | ||
234 | |||
235 | ACPI_FUNCTION_TRACE(acpi_gpe_wakeup); | ||
236 | |||
237 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
238 | |||
239 | /* Ensure that we have a valid GPE number */ | ||
240 | |||
241 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
242 | if (!gpe_event_info || !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { | ||
243 | status = AE_BAD_PARAMETER; | ||
244 | goto unlock_and_exit; | ||
245 | } | ||
246 | |||
247 | gpe_register_info = gpe_event_info->register_info; | ||
248 | if (!gpe_register_info) { | ||
249 | status = AE_NOT_EXIST; | ||
250 | goto unlock_and_exit; | ||
251 | } | ||
252 | |||
253 | register_bit = | ||
254 | acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); | ||
255 | |||
256 | /* Perform the action */ | ||
257 | |||
258 | switch (action) { | ||
259 | case ACPI_GPE_ENABLE: | ||
260 | ACPI_SET_BIT(gpe_register_info->enable_for_wake, | ||
261 | (u8)register_bit); | ||
262 | break; | ||
263 | |||
264 | case ACPI_GPE_DISABLE: | ||
265 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, | ||
266 | (u8)register_bit); | ||
267 | break; | ||
268 | |||
269 | default: | ||
270 | ACPI_ERROR((AE_INFO, "%u, Invalid action", action)); | ||
271 | status = AE_BAD_PARAMETER; | ||
272 | break; | ||
273 | } | ||
274 | |||
275 | unlock_and_exit: | ||
276 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
277 | return_ACPI_STATUS(status); | ||
278 | } | ||
279 | |||
280 | ACPI_EXPORT_SYMBOL(acpi_gpe_wakeup) | ||
281 | |||
282 | /******************************************************************************* | ||
283 | * | ||
284 | * FUNCTION: acpi_enable_gpe | ||
285 | * | ||
286 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
287 | * gpe_number - GPE level within the GPE block | ||
288 | * | ||
289 | * RETURN: Status | ||
290 | * | ||
291 | * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is | ||
292 | * hardware-enabled. | ||
293 | * | ||
294 | ******************************************************************************/ | ||
295 | acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) | ||
296 | { | ||
297 | acpi_status status = AE_BAD_PARAMETER; | ||
298 | struct acpi_gpe_event_info *gpe_event_info; | ||
299 | acpi_cpu_flags flags; | ||
300 | |||
301 | ACPI_FUNCTION_TRACE(acpi_enable_gpe); | ||
302 | |||
303 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
304 | |||
305 | /* Ensure that we have a valid GPE number */ | ||
306 | |||
307 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
308 | if (gpe_event_info) { | ||
309 | status = acpi_raw_enable_gpe(gpe_event_info); | ||
310 | } | ||
311 | |||
312 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
313 | return_ACPI_STATUS(status); | ||
314 | } | ||
315 | ACPI_EXPORT_SYMBOL(acpi_enable_gpe) | ||
316 | |||
317 | /******************************************************************************* | ||
318 | * | ||
319 | * FUNCTION: acpi_disable_gpe | ||
320 | * | ||
321 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
322 | * gpe_number - GPE level within the GPE block | ||
323 | * | ||
324 | * RETURN: Status | ||
325 | * | ||
326 | * DESCRIPTION: Remove a reference to a GPE. When the last reference is | ||
327 | * removed, only then is the GPE disabled (for runtime GPEs), or | ||
328 | * the GPE mask bit disabled (for wake GPEs) | ||
329 | * | ||
330 | ******************************************************************************/ | ||
331 | acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) | ||
332 | { | ||
333 | acpi_status status = AE_BAD_PARAMETER; | ||
334 | struct acpi_gpe_event_info *gpe_event_info; | ||
335 | acpi_cpu_flags flags; | ||
336 | |||
337 | ACPI_FUNCTION_TRACE(acpi_disable_gpe); | ||
338 | |||
339 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
340 | |||
341 | /* Ensure that we have a valid GPE number */ | ||
342 | |||
343 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
344 | if (gpe_event_info) { | ||
345 | status = acpi_raw_disable_gpe(gpe_event_info) ; | ||
346 | } | ||
347 | |||
348 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
349 | return_ACPI_STATUS(status); | ||
350 | } | ||
351 | ACPI_EXPORT_SYMBOL(acpi_disable_gpe) | ||
352 | |||
353 | /******************************************************************************* | ||
354 | * | ||
355 | * FUNCTION: acpi_gpe_can_wake | ||
356 | * | ||
357 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
358 | * gpe_number - GPE level within the GPE block | ||
359 | * | ||
360 | * RETURN: Status | ||
361 | * | ||
362 | * DESCRIPTION: Set the ACPI_GPE_CAN_WAKE flag for the given GPE. If the GPE | ||
363 | * has a corresponding method and is currently enabled, disable it | ||
364 | * (GPEs with corresponding methods are enabled unconditionally | ||
365 | * during initialization, but GPEs that can wake up are expected | ||
366 | * to be initially disabled). | ||
367 | * | ||
368 | ******************************************************************************/ | ||
369 | acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number) | ||
370 | { | ||
371 | acpi_status status = AE_OK; | ||
372 | struct acpi_gpe_event_info *gpe_event_info; | ||
373 | acpi_cpu_flags flags; | ||
374 | |||
375 | ACPI_FUNCTION_TRACE(acpi_gpe_can_wake); | ||
376 | |||
377 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
378 | |||
379 | /* Ensure that we have a valid GPE number */ | ||
380 | |||
381 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
382 | if (gpe_event_info) { | ||
383 | gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; | ||
384 | } else { | ||
385 | status = AE_BAD_PARAMETER; | ||
386 | } | ||
387 | |||
388 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
389 | return_ACPI_STATUS(status); | ||
390 | } | ||
391 | ACPI_EXPORT_SYMBOL(acpi_gpe_can_wake) | ||
392 | |||
393 | /******************************************************************************* | ||
394 | * | ||
395 | * FUNCTION: acpi_disable_event | 209 | * FUNCTION: acpi_disable_event |
396 | * | 210 | * |
397 | * PARAMETERS: Event - The fixed eventto be enabled | 211 | * PARAMETERS: Event - The fixed eventto be enabled |
@@ -483,44 +297,6 @@ ACPI_EXPORT_SYMBOL(acpi_clear_event) | |||
483 | 297 | ||
484 | /******************************************************************************* | 298 | /******************************************************************************* |
485 | * | 299 | * |
486 | * FUNCTION: acpi_clear_gpe | ||
487 | * | ||
488 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
489 | * gpe_number - GPE level within the GPE block | ||
490 | * | ||
491 | * RETURN: Status | ||
492 | * | ||
493 | * DESCRIPTION: Clear an ACPI event (general purpose) | ||
494 | * | ||
495 | ******************************************************************************/ | ||
496 | acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number) | ||
497 | { | ||
498 | acpi_status status = AE_OK; | ||
499 | struct acpi_gpe_event_info *gpe_event_info; | ||
500 | acpi_cpu_flags flags; | ||
501 | |||
502 | ACPI_FUNCTION_TRACE(acpi_clear_gpe); | ||
503 | |||
504 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
505 | |||
506 | /* Ensure that we have a valid GPE number */ | ||
507 | |||
508 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
509 | if (!gpe_event_info) { | ||
510 | status = AE_BAD_PARAMETER; | ||
511 | goto unlock_and_exit; | ||
512 | } | ||
513 | |||
514 | status = acpi_hw_clear_gpe(gpe_event_info); | ||
515 | |||
516 | unlock_and_exit: | ||
517 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
518 | return_ACPI_STATUS(status); | ||
519 | } | ||
520 | |||
521 | ACPI_EXPORT_SYMBOL(acpi_clear_gpe) | ||
522 | /******************************************************************************* | ||
523 | * | ||
524 | * FUNCTION: acpi_get_event_status | 300 | * FUNCTION: acpi_get_event_status |
525 | * | 301 | * |
526 | * PARAMETERS: Event - The fixed event | 302 | * PARAMETERS: Event - The fixed event |
@@ -575,379 +351,3 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) | |||
575 | } | 351 | } |
576 | 352 | ||
577 | ACPI_EXPORT_SYMBOL(acpi_get_event_status) | 353 | ACPI_EXPORT_SYMBOL(acpi_get_event_status) |
578 | |||
579 | /******************************************************************************* | ||
580 | * | ||
581 | * FUNCTION: acpi_get_gpe_status | ||
582 | * | ||
583 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
584 | * gpe_number - GPE level within the GPE block | ||
585 | * event_status - Where the current status of the event will | ||
586 | * be returned | ||
587 | * | ||
588 | * RETURN: Status | ||
589 | * | ||
590 | * DESCRIPTION: Get status of an event (general purpose) | ||
591 | * | ||
592 | ******************************************************************************/ | ||
593 | acpi_status | ||
594 | acpi_get_gpe_status(acpi_handle gpe_device, | ||
595 | u32 gpe_number, acpi_event_status *event_status) | ||
596 | { | ||
597 | acpi_status status = AE_OK; | ||
598 | struct acpi_gpe_event_info *gpe_event_info; | ||
599 | acpi_cpu_flags flags; | ||
600 | |||
601 | ACPI_FUNCTION_TRACE(acpi_get_gpe_status); | ||
602 | |||
603 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
604 | |||
605 | /* Ensure that we have a valid GPE number */ | ||
606 | |||
607 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
608 | if (!gpe_event_info) { | ||
609 | status = AE_BAD_PARAMETER; | ||
610 | goto unlock_and_exit; | ||
611 | } | ||
612 | |||
613 | /* Obtain status on the requested GPE number */ | ||
614 | |||
615 | status = acpi_hw_get_gpe_status(gpe_event_info, event_status); | ||
616 | |||
617 | if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) | ||
618 | *event_status |= ACPI_EVENT_FLAG_HANDLE; | ||
619 | |||
620 | unlock_and_exit: | ||
621 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
622 | return_ACPI_STATUS(status); | ||
623 | } | ||
624 | |||
625 | ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) | ||
626 | /******************************************************************************* | ||
627 | * | ||
628 | * FUNCTION: acpi_install_gpe_block | ||
629 | * | ||
630 | * PARAMETERS: gpe_device - Handle to the parent GPE Block Device | ||
631 | * gpe_block_address - Address and space_iD | ||
632 | * register_count - Number of GPE register pairs in the block | ||
633 | * interrupt_number - H/W interrupt for the block | ||
634 | * | ||
635 | * RETURN: Status | ||
636 | * | ||
637 | * DESCRIPTION: Create and Install a block of GPE registers | ||
638 | * | ||
639 | ******************************************************************************/ | ||
640 | acpi_status | ||
641 | acpi_install_gpe_block(acpi_handle gpe_device, | ||
642 | struct acpi_generic_address *gpe_block_address, | ||
643 | u32 register_count, u32 interrupt_number) | ||
644 | { | ||
645 | acpi_status status = AE_OK; | ||
646 | union acpi_operand_object *obj_desc; | ||
647 | struct acpi_namespace_node *node; | ||
648 | struct acpi_gpe_block_info *gpe_block; | ||
649 | |||
650 | ACPI_FUNCTION_TRACE(acpi_install_gpe_block); | ||
651 | |||
652 | if ((!gpe_device) || (!gpe_block_address) || (!register_count)) { | ||
653 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
654 | } | ||
655 | |||
656 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
657 | if (ACPI_FAILURE(status)) { | ||
658 | return (status); | ||
659 | } | ||
660 | |||
661 | node = acpi_ns_validate_handle(gpe_device); | ||
662 | if (!node) { | ||
663 | status = AE_BAD_PARAMETER; | ||
664 | goto unlock_and_exit; | ||
665 | } | ||
666 | |||
667 | /* | ||
668 | * For user-installed GPE Block Devices, the gpe_block_base_number | ||
669 | * is always zero | ||
670 | */ | ||
671 | status = | ||
672 | acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0, | ||
673 | interrupt_number, &gpe_block); | ||
674 | if (ACPI_FAILURE(status)) { | ||
675 | goto unlock_and_exit; | ||
676 | } | ||
677 | |||
678 | /* Install block in the device_object attached to the node */ | ||
679 | |||
680 | obj_desc = acpi_ns_get_attached_object(node); | ||
681 | if (!obj_desc) { | ||
682 | |||
683 | /* | ||
684 | * No object, create a new one (Device nodes do not always have | ||
685 | * an attached object) | ||
686 | */ | ||
687 | obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE); | ||
688 | if (!obj_desc) { | ||
689 | status = AE_NO_MEMORY; | ||
690 | goto unlock_and_exit; | ||
691 | } | ||
692 | |||
693 | status = | ||
694 | acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE); | ||
695 | |||
696 | /* Remove local reference to the object */ | ||
697 | |||
698 | acpi_ut_remove_reference(obj_desc); | ||
699 | |||
700 | if (ACPI_FAILURE(status)) { | ||
701 | goto unlock_and_exit; | ||
702 | } | ||
703 | } | ||
704 | |||
705 | /* Now install the GPE block in the device_object */ | ||
706 | |||
707 | obj_desc->device.gpe_block = gpe_block; | ||
708 | |||
709 | unlock_and_exit: | ||
710 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
711 | return_ACPI_STATUS(status); | ||
712 | } | ||
713 | |||
714 | ACPI_EXPORT_SYMBOL(acpi_install_gpe_block) | ||
715 | |||
716 | /******************************************************************************* | ||
717 | * | ||
718 | * FUNCTION: acpi_remove_gpe_block | ||
719 | * | ||
720 | * PARAMETERS: gpe_device - Handle to the parent GPE Block Device | ||
721 | * | ||
722 | * RETURN: Status | ||
723 | * | ||
724 | * DESCRIPTION: Remove a previously installed block of GPE registers | ||
725 | * | ||
726 | ******************************************************************************/ | ||
727 | acpi_status acpi_remove_gpe_block(acpi_handle gpe_device) | ||
728 | { | ||
729 | union acpi_operand_object *obj_desc; | ||
730 | acpi_status status; | ||
731 | struct acpi_namespace_node *node; | ||
732 | |||
733 | ACPI_FUNCTION_TRACE(acpi_remove_gpe_block); | ||
734 | |||
735 | if (!gpe_device) { | ||
736 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
737 | } | ||
738 | |||
739 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
740 | if (ACPI_FAILURE(status)) { | ||
741 | return (status); | ||
742 | } | ||
743 | |||
744 | node = acpi_ns_validate_handle(gpe_device); | ||
745 | if (!node) { | ||
746 | status = AE_BAD_PARAMETER; | ||
747 | goto unlock_and_exit; | ||
748 | } | ||
749 | |||
750 | /* Get the device_object attached to the node */ | ||
751 | |||
752 | obj_desc = acpi_ns_get_attached_object(node); | ||
753 | if (!obj_desc || !obj_desc->device.gpe_block) { | ||
754 | return_ACPI_STATUS(AE_NULL_OBJECT); | ||
755 | } | ||
756 | |||
757 | /* Delete the GPE block (but not the device_object) */ | ||
758 | |||
759 | status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block); | ||
760 | if (ACPI_SUCCESS(status)) { | ||
761 | obj_desc->device.gpe_block = NULL; | ||
762 | } | ||
763 | |||
764 | unlock_and_exit: | ||
765 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
766 | return_ACPI_STATUS(status); | ||
767 | } | ||
768 | |||
769 | ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block) | ||
770 | |||
771 | /******************************************************************************* | ||
772 | * | ||
773 | * FUNCTION: acpi_get_gpe_device | ||
774 | * | ||
775 | * PARAMETERS: Index - System GPE index (0-current_gpe_count) | ||
776 | * gpe_device - Where the parent GPE Device is returned | ||
777 | * | ||
778 | * RETURN: Status | ||
779 | * | ||
780 | * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL | ||
781 | * gpe device indicates that the gpe number is contained in one of | ||
782 | * the FADT-defined gpe blocks. Otherwise, the GPE block device. | ||
783 | * | ||
784 | ******************************************************************************/ | ||
785 | acpi_status | ||
786 | acpi_get_gpe_device(u32 index, acpi_handle *gpe_device) | ||
787 | { | ||
788 | struct acpi_gpe_device_info info; | ||
789 | acpi_status status; | ||
790 | |||
791 | ACPI_FUNCTION_TRACE(acpi_get_gpe_device); | ||
792 | |||
793 | if (!gpe_device) { | ||
794 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
795 | } | ||
796 | |||
797 | if (index >= acpi_current_gpe_count) { | ||
798 | return_ACPI_STATUS(AE_NOT_EXIST); | ||
799 | } | ||
800 | |||
801 | /* Setup and walk the GPE list */ | ||
802 | |||
803 | info.index = index; | ||
804 | info.status = AE_NOT_EXIST; | ||
805 | info.gpe_device = NULL; | ||
806 | info.next_block_base_index = 0; | ||
807 | |||
808 | status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info); | ||
809 | if (ACPI_FAILURE(status)) { | ||
810 | return_ACPI_STATUS(status); | ||
811 | } | ||
812 | |||
813 | *gpe_device = info.gpe_device; | ||
814 | return_ACPI_STATUS(info.status); | ||
815 | } | ||
816 | |||
817 | ACPI_EXPORT_SYMBOL(acpi_get_gpe_device) | ||
818 | |||
819 | /******************************************************************************* | ||
820 | * | ||
821 | * FUNCTION: acpi_ev_get_gpe_device | ||
822 | * | ||
823 | * PARAMETERS: GPE_WALK_CALLBACK | ||
824 | * | ||
825 | * RETURN: Status | ||
826 | * | ||
827 | * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE | ||
828 | * block device. NULL if the GPE is one of the FADT-defined GPEs. | ||
829 | * | ||
830 | ******************************************************************************/ | ||
831 | static acpi_status | ||
832 | acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | ||
833 | struct acpi_gpe_block_info *gpe_block, void *context) | ||
834 | { | ||
835 | struct acpi_gpe_device_info *info = context; | ||
836 | |||
837 | /* Increment Index by the number of GPEs in this block */ | ||
838 | |||
839 | info->next_block_base_index += gpe_block->gpe_count; | ||
840 | |||
841 | if (info->index < info->next_block_base_index) { | ||
842 | /* | ||
843 | * The GPE index is within this block, get the node. Leave the node | ||
844 | * NULL for the FADT-defined GPEs | ||
845 | */ | ||
846 | if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) { | ||
847 | info->gpe_device = gpe_block->node; | ||
848 | } | ||
849 | |||
850 | info->status = AE_OK; | ||
851 | return (AE_CTRL_END); | ||
852 | } | ||
853 | |||
854 | return (AE_OK); | ||
855 | } | ||
856 | |||
857 | /****************************************************************************** | ||
858 | * | ||
859 | * FUNCTION: acpi_disable_all_gpes | ||
860 | * | ||
861 | * PARAMETERS: None | ||
862 | * | ||
863 | * RETURN: Status | ||
864 | * | ||
865 | * DESCRIPTION: Disable and clear all GPEs in all GPE blocks | ||
866 | * | ||
867 | ******************************************************************************/ | ||
868 | |||
869 | acpi_status acpi_disable_all_gpes(void) | ||
870 | { | ||
871 | acpi_status status; | ||
872 | |||
873 | ACPI_FUNCTION_TRACE(acpi_disable_all_gpes); | ||
874 | |||
875 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
876 | if (ACPI_FAILURE(status)) { | ||
877 | return_ACPI_STATUS(status); | ||
878 | } | ||
879 | |||
880 | status = acpi_hw_disable_all_gpes(); | ||
881 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
882 | |||
883 | return_ACPI_STATUS(status); | ||
884 | } | ||
885 | |||
886 | /****************************************************************************** | ||
887 | * | ||
888 | * FUNCTION: acpi_enable_all_runtime_gpes | ||
889 | * | ||
890 | * PARAMETERS: None | ||
891 | * | ||
892 | * RETURN: Status | ||
893 | * | ||
894 | * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks | ||
895 | * | ||
896 | ******************************************************************************/ | ||
897 | |||
898 | acpi_status acpi_enable_all_runtime_gpes(void) | ||
899 | { | ||
900 | acpi_status status; | ||
901 | |||
902 | ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes); | ||
903 | |||
904 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
905 | if (ACPI_FAILURE(status)) { | ||
906 | return_ACPI_STATUS(status); | ||
907 | } | ||
908 | |||
909 | status = acpi_hw_enable_all_runtime_gpes(); | ||
910 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
911 | |||
912 | return_ACPI_STATUS(status); | ||
913 | } | ||
914 | |||
915 | /****************************************************************************** | ||
916 | * | ||
917 | * FUNCTION: acpi_update_gpes | ||
918 | * | ||
919 | * PARAMETERS: None | ||
920 | * | ||
921 | * RETURN: None | ||
922 | * | ||
923 | * DESCRIPTION: Enable all GPEs that have associated _Lxx or _Exx methods and | ||
924 | * are not pointed to by any device _PRW methods indicating that | ||
925 | * these GPEs are generally intended for system or device wakeup | ||
926 | * (such GPEs have to be enabled directly when the devices whose | ||
927 | * _PRW methods point to them are set up for wakeup signaling). | ||
928 | * | ||
929 | ******************************************************************************/ | ||
930 | |||
931 | acpi_status acpi_update_gpes(void) | ||
932 | { | ||
933 | acpi_status status; | ||
934 | |||
935 | ACPI_FUNCTION_TRACE(acpi_update_gpes); | ||
936 | |||
937 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
938 | if (ACPI_FAILURE(status)) { | ||
939 | return_ACPI_STATUS(status); | ||
940 | } else if (acpi_all_gpes_initialized) { | ||
941 | goto unlock; | ||
942 | } | ||
943 | |||
944 | status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL); | ||
945 | if (ACPI_SUCCESS(status)) { | ||
946 | acpi_all_gpes_initialized = TRUE; | ||
947 | } | ||
948 | |||
949 | unlock: | ||
950 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
951 | |||
952 | return_ACPI_STATUS(status); | ||
953 | } | ||
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c new file mode 100644 index 000000000000..416845bc9c1f --- /dev/null +++ b/drivers/acpi/acpica/evxfgpe.c | |||
@@ -0,0 +1,669 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs) | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2010, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #include <acpi/acpi.h> | ||
45 | #include "accommon.h" | ||
46 | #include "acevents.h" | ||
47 | #include "acnamesp.h" | ||
48 | |||
49 | #define _COMPONENT ACPI_EVENTS | ||
50 | ACPI_MODULE_NAME("evxfgpe") | ||
51 | |||
52 | /****************************************************************************** | ||
53 | * | ||
54 | * FUNCTION: acpi_update_all_gpes | ||
55 | * | ||
56 | * PARAMETERS: None | ||
57 | * | ||
58 | * RETURN: Status | ||
59 | * | ||
60 | * DESCRIPTION: Complete GPE initialization and enable all GPEs that have | ||
61 | * associated _Lxx or _Exx methods and are not pointed to by any | ||
62 | * device _PRW methods (this indicates that these GPEs are | ||
63 | * generally intended for system or device wakeup. Such GPEs | ||
64 | * have to be enabled directly when the devices whose _PRW | ||
65 | * methods point to them are set up for wakeup signaling.) | ||
66 | * | ||
67 | * NOTE: Should be called after any GPEs are added to the system. Primarily, | ||
68 | * after the system _PRW methods have been run, but also after a GPE Block | ||
69 | * Device has been added or if any new GPE methods have been added via a | ||
70 | * dynamic table load. | ||
71 | * | ||
72 | ******************************************************************************/ | ||
73 | |||
74 | acpi_status acpi_update_all_gpes(void) | ||
75 | { | ||
76 | acpi_status status; | ||
77 | |||
78 | ACPI_FUNCTION_TRACE(acpi_update_all_gpes); | ||
79 | |||
80 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
81 | if (ACPI_FAILURE(status)) { | ||
82 | return_ACPI_STATUS(status); | ||
83 | } | ||
84 | |||
85 | if (acpi_gbl_all_gpes_initialized) { | ||
86 | goto unlock_and_exit; | ||
87 | } | ||
88 | |||
89 | status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL); | ||
90 | if (ACPI_SUCCESS(status)) { | ||
91 | acpi_gbl_all_gpes_initialized = TRUE; | ||
92 | } | ||
93 | |||
94 | unlock_and_exit: | ||
95 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
96 | |||
97 | return_ACPI_STATUS(status); | ||
98 | } | ||
99 | |||
100 | ACPI_EXPORT_SYMBOL(acpi_update_all_gpes) | ||
101 | |||
102 | /******************************************************************************* | ||
103 | * | ||
104 | * FUNCTION: acpi_enable_gpe | ||
105 | * | ||
106 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
107 | * gpe_number - GPE level within the GPE block | ||
108 | * | ||
109 | * RETURN: Status | ||
110 | * | ||
111 | * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is | ||
112 | * hardware-enabled. | ||
113 | * | ||
114 | ******************************************************************************/ | ||
115 | |||
116 | acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) | ||
117 | { | ||
118 | acpi_status status = AE_BAD_PARAMETER; | ||
119 | struct acpi_gpe_event_info *gpe_event_info; | ||
120 | acpi_cpu_flags flags; | ||
121 | |||
122 | ACPI_FUNCTION_TRACE(acpi_enable_gpe); | ||
123 | |||
124 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
125 | |||
126 | /* Ensure that we have a valid GPE number */ | ||
127 | |||
128 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
129 | if (gpe_event_info) { | ||
130 | status = acpi_ev_add_gpe_reference(gpe_event_info); | ||
131 | } | ||
132 | |||
133 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
134 | return_ACPI_STATUS(status); | ||
135 | } | ||
136 | ACPI_EXPORT_SYMBOL(acpi_enable_gpe) | ||
137 | |||
138 | /******************************************************************************* | ||
139 | * | ||
140 | * FUNCTION: acpi_disable_gpe | ||
141 | * | ||
142 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
143 | * gpe_number - GPE level within the GPE block | ||
144 | * | ||
145 | * RETURN: Status | ||
146 | * | ||
147 | * DESCRIPTION: Remove a reference to a GPE. When the last reference is | ||
148 | * removed, only then is the GPE disabled (for runtime GPEs), or | ||
149 | * the GPE mask bit disabled (for wake GPEs) | ||
150 | * | ||
151 | ******************************************************************************/ | ||
152 | |||
153 | acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) | ||
154 | { | ||
155 | acpi_status status = AE_BAD_PARAMETER; | ||
156 | struct acpi_gpe_event_info *gpe_event_info; | ||
157 | acpi_cpu_flags flags; | ||
158 | |||
159 | ACPI_FUNCTION_TRACE(acpi_disable_gpe); | ||
160 | |||
161 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
162 | |||
163 | /* Ensure that we have a valid GPE number */ | ||
164 | |||
165 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
166 | if (gpe_event_info) { | ||
167 | status = acpi_ev_remove_gpe_reference(gpe_event_info) ; | ||
168 | } | ||
169 | |||
170 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
171 | return_ACPI_STATUS(status); | ||
172 | } | ||
173 | ACPI_EXPORT_SYMBOL(acpi_disable_gpe) | ||
174 | |||
175 | |||
176 | /******************************************************************************* | ||
177 | * | ||
178 | * FUNCTION: acpi_setup_gpe_for_wake | ||
179 | * | ||
180 | * PARAMETERS: wake_device - Device associated with the GPE (via _PRW) | ||
181 | * gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
182 | * gpe_number - GPE level within the GPE block | ||
183 | * | ||
184 | * RETURN: Status | ||
185 | * | ||
186 | * DESCRIPTION: Mark a GPE as having the ability to wake the system. This | ||
187 | * interface is intended to be used as the host executes the | ||
188 | * _PRW methods (Power Resources for Wake) in the system tables. | ||
189 | * Each _PRW appears under a Device Object (The wake_device), and | ||
190 | * contains the info for the wake GPE associated with the | ||
191 | * wake_device. | ||
192 | * | ||
193 | ******************************************************************************/ | ||
194 | acpi_status | ||
195 | acpi_setup_gpe_for_wake(acpi_handle wake_device, | ||
196 | acpi_handle gpe_device, u32 gpe_number) | ||
197 | { | ||
198 | acpi_status status = AE_BAD_PARAMETER; | ||
199 | struct acpi_gpe_event_info *gpe_event_info; | ||
200 | struct acpi_namespace_node *device_node; | ||
201 | acpi_cpu_flags flags; | ||
202 | |||
203 | ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake); | ||
204 | |||
205 | /* Parameter Validation */ | ||
206 | |||
207 | if (!wake_device) { | ||
208 | /* | ||
209 | * By forcing wake_device to be valid, we automatically enable the | ||
210 | * implicit notify feature on all hosts. | ||
211 | */ | ||
212 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
213 | } | ||
214 | |||
215 | /* Validate wake_device is of type Device */ | ||
216 | |||
217 | device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); | ||
218 | if (device_node->type != ACPI_TYPE_DEVICE) { | ||
219 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
220 | } | ||
221 | |||
222 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
223 | |||
224 | /* Ensure that we have a valid GPE number */ | ||
225 | |||
226 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
227 | if (gpe_event_info) { | ||
228 | /* | ||
229 | * If there is no method or handler for this GPE, then the | ||
230 | * wake_device will be notified whenever this GPE fires (aka | ||
231 | * "implicit notify") Note: The GPE is assumed to be | ||
232 | * level-triggered (for windows compatibility). | ||
233 | */ | ||
234 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | ||
235 | ACPI_GPE_DISPATCH_NONE) { | ||
236 | gpe_event_info->flags = | ||
237 | (ACPI_GPE_DISPATCH_NOTIFY | | ||
238 | ACPI_GPE_LEVEL_TRIGGERED); | ||
239 | gpe_event_info->dispatch.device_node = device_node; | ||
240 | } | ||
241 | |||
242 | gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; | ||
243 | status = AE_OK; | ||
244 | } | ||
245 | |||
246 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
247 | return_ACPI_STATUS(status); | ||
248 | } | ||
249 | ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake) | ||
250 | |||
251 | /******************************************************************************* | ||
252 | * | ||
253 | * FUNCTION: acpi_set_gpe_wake_mask | ||
254 | * | ||
255 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
256 | * gpe_number - GPE level within the GPE block | ||
257 | * Action - Enable or Disable | ||
258 | * | ||
259 | * RETURN: Status | ||
260 | * | ||
261 | * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit. The GPE must | ||
262 | * already be marked as a WAKE GPE. | ||
263 | * | ||
264 | ******************************************************************************/ | ||
265 | |||
266 | acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action) | ||
267 | { | ||
268 | acpi_status status = AE_OK; | ||
269 | struct acpi_gpe_event_info *gpe_event_info; | ||
270 | struct acpi_gpe_register_info *gpe_register_info; | ||
271 | acpi_cpu_flags flags; | ||
272 | u32 register_bit; | ||
273 | |||
274 | ACPI_FUNCTION_TRACE(acpi_set_gpe_wake_mask); | ||
275 | |||
276 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
277 | |||
278 | /* | ||
279 | * Ensure that we have a valid GPE number and that this GPE is in | ||
280 | * fact a wake GPE | ||
281 | */ | ||
282 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
283 | if (!gpe_event_info) { | ||
284 | status = AE_BAD_PARAMETER; | ||
285 | goto unlock_and_exit; | ||
286 | } | ||
287 | |||
288 | if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { | ||
289 | status = AE_TYPE; | ||
290 | goto unlock_and_exit; | ||
291 | } | ||
292 | |||
293 | gpe_register_info = gpe_event_info->register_info; | ||
294 | if (!gpe_register_info) { | ||
295 | status = AE_NOT_EXIST; | ||
296 | goto unlock_and_exit; | ||
297 | } | ||
298 | |||
299 | register_bit = | ||
300 | acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); | ||
301 | |||
302 | /* Perform the action */ | ||
303 | |||
304 | switch (action) { | ||
305 | case ACPI_GPE_ENABLE: | ||
306 | ACPI_SET_BIT(gpe_register_info->enable_for_wake, | ||
307 | (u8)register_bit); | ||
308 | break; | ||
309 | |||
310 | case ACPI_GPE_DISABLE: | ||
311 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, | ||
312 | (u8)register_bit); | ||
313 | break; | ||
314 | |||
315 | default: | ||
316 | ACPI_ERROR((AE_INFO, "%u, Invalid action", action)); | ||
317 | status = AE_BAD_PARAMETER; | ||
318 | break; | ||
319 | } | ||
320 | |||
321 | unlock_and_exit: | ||
322 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
323 | return_ACPI_STATUS(status); | ||
324 | } | ||
325 | |||
326 | ACPI_EXPORT_SYMBOL(acpi_set_gpe_wake_mask) | ||
327 | |||
328 | /******************************************************************************* | ||
329 | * | ||
330 | * FUNCTION: acpi_clear_gpe | ||
331 | * | ||
332 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
333 | * gpe_number - GPE level within the GPE block | ||
334 | * | ||
335 | * RETURN: Status | ||
336 | * | ||
337 | * DESCRIPTION: Clear an ACPI event (general purpose) | ||
338 | * | ||
339 | ******************************************************************************/ | ||
340 | acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number) | ||
341 | { | ||
342 | acpi_status status = AE_OK; | ||
343 | struct acpi_gpe_event_info *gpe_event_info; | ||
344 | acpi_cpu_flags flags; | ||
345 | |||
346 | ACPI_FUNCTION_TRACE(acpi_clear_gpe); | ||
347 | |||
348 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
349 | |||
350 | /* Ensure that we have a valid GPE number */ | ||
351 | |||
352 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
353 | if (!gpe_event_info) { | ||
354 | status = AE_BAD_PARAMETER; | ||
355 | goto unlock_and_exit; | ||
356 | } | ||
357 | |||
358 | status = acpi_hw_clear_gpe(gpe_event_info); | ||
359 | |||
360 | unlock_and_exit: | ||
361 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
362 | return_ACPI_STATUS(status); | ||
363 | } | ||
364 | |||
365 | ACPI_EXPORT_SYMBOL(acpi_clear_gpe) | ||
366 | |||
367 | /******************************************************************************* | ||
368 | * | ||
369 | * FUNCTION: acpi_get_gpe_status | ||
370 | * | ||
371 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
372 | * gpe_number - GPE level within the GPE block | ||
373 | * event_status - Where the current status of the event will | ||
374 | * be returned | ||
375 | * | ||
376 | * RETURN: Status | ||
377 | * | ||
378 | * DESCRIPTION: Get the current status of a GPE (signalled/not_signalled) | ||
379 | * | ||
380 | ******************************************************************************/ | ||
381 | acpi_status | ||
382 | acpi_get_gpe_status(acpi_handle gpe_device, | ||
383 | u32 gpe_number, acpi_event_status *event_status) | ||
384 | { | ||
385 | acpi_status status = AE_OK; | ||
386 | struct acpi_gpe_event_info *gpe_event_info; | ||
387 | acpi_cpu_flags flags; | ||
388 | |||
389 | ACPI_FUNCTION_TRACE(acpi_get_gpe_status); | ||
390 | |||
391 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
392 | |||
393 | /* Ensure that we have a valid GPE number */ | ||
394 | |||
395 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
396 | if (!gpe_event_info) { | ||
397 | status = AE_BAD_PARAMETER; | ||
398 | goto unlock_and_exit; | ||
399 | } | ||
400 | |||
401 | /* Obtain status on the requested GPE number */ | ||
402 | |||
403 | status = acpi_hw_get_gpe_status(gpe_event_info, event_status); | ||
404 | |||
405 | if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) | ||
406 | *event_status |= ACPI_EVENT_FLAG_HANDLE; | ||
407 | |||
408 | unlock_and_exit: | ||
409 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
410 | return_ACPI_STATUS(status); | ||
411 | } | ||
412 | |||
413 | ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) | ||
414 | |||
415 | /****************************************************************************** | ||
416 | * | ||
417 | * FUNCTION: acpi_disable_all_gpes | ||
418 | * | ||
419 | * PARAMETERS: None | ||
420 | * | ||
421 | * RETURN: Status | ||
422 | * | ||
423 | * DESCRIPTION: Disable and clear all GPEs in all GPE blocks | ||
424 | * | ||
425 | ******************************************************************************/ | ||
426 | |||
427 | acpi_status acpi_disable_all_gpes(void) | ||
428 | { | ||
429 | acpi_status status; | ||
430 | |||
431 | ACPI_FUNCTION_TRACE(acpi_disable_all_gpes); | ||
432 | |||
433 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
434 | if (ACPI_FAILURE(status)) { | ||
435 | return_ACPI_STATUS(status); | ||
436 | } | ||
437 | |||
438 | status = acpi_hw_disable_all_gpes(); | ||
439 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
440 | |||
441 | return_ACPI_STATUS(status); | ||
442 | } | ||
443 | |||
444 | ACPI_EXPORT_SYMBOL(acpi_disable_all_gpes) | ||
445 | |||
446 | /****************************************************************************** | ||
447 | * | ||
448 | * FUNCTION: acpi_enable_all_runtime_gpes | ||
449 | * | ||
450 | * PARAMETERS: None | ||
451 | * | ||
452 | * RETURN: Status | ||
453 | * | ||
454 | * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks | ||
455 | * | ||
456 | ******************************************************************************/ | ||
457 | |||
458 | acpi_status acpi_enable_all_runtime_gpes(void) | ||
459 | { | ||
460 | acpi_status status; | ||
461 | |||
462 | ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes); | ||
463 | |||
464 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
465 | if (ACPI_FAILURE(status)) { | ||
466 | return_ACPI_STATUS(status); | ||
467 | } | ||
468 | |||
469 | status = acpi_hw_enable_all_runtime_gpes(); | ||
470 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
471 | |||
472 | return_ACPI_STATUS(status); | ||
473 | } | ||
474 | |||
475 | ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes) | ||
476 | |||
477 | /******************************************************************************* | ||
478 | * | ||
479 | * FUNCTION: acpi_install_gpe_block | ||
480 | * | ||
481 | * PARAMETERS: gpe_device - Handle to the parent GPE Block Device | ||
482 | * gpe_block_address - Address and space_iD | ||
483 | * register_count - Number of GPE register pairs in the block | ||
484 | * interrupt_number - H/W interrupt for the block | ||
485 | * | ||
486 | * RETURN: Status | ||
487 | * | ||
488 | * DESCRIPTION: Create and Install a block of GPE registers. The GPEs are not | ||
489 | * enabled here. | ||
490 | * | ||
491 | ******************************************************************************/ | ||
492 | acpi_status | ||
493 | acpi_install_gpe_block(acpi_handle gpe_device, | ||
494 | struct acpi_generic_address *gpe_block_address, | ||
495 | u32 register_count, u32 interrupt_number) | ||
496 | { | ||
497 | acpi_status status; | ||
498 | union acpi_operand_object *obj_desc; | ||
499 | struct acpi_namespace_node *node; | ||
500 | struct acpi_gpe_block_info *gpe_block; | ||
501 | |||
502 | ACPI_FUNCTION_TRACE(acpi_install_gpe_block); | ||
503 | |||
504 | if ((!gpe_device) || (!gpe_block_address) || (!register_count)) { | ||
505 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
506 | } | ||
507 | |||
508 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
509 | if (ACPI_FAILURE(status)) { | ||
510 | return (status); | ||
511 | } | ||
512 | |||
513 | node = acpi_ns_validate_handle(gpe_device); | ||
514 | if (!node) { | ||
515 | status = AE_BAD_PARAMETER; | ||
516 | goto unlock_and_exit; | ||
517 | } | ||
518 | |||
519 | /* | ||
520 | * For user-installed GPE Block Devices, the gpe_block_base_number | ||
521 | * is always zero | ||
522 | */ | ||
523 | status = | ||
524 | acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0, | ||
525 | interrupt_number, &gpe_block); | ||
526 | if (ACPI_FAILURE(status)) { | ||
527 | goto unlock_and_exit; | ||
528 | } | ||
529 | |||
530 | /* Install block in the device_object attached to the node */ | ||
531 | |||
532 | obj_desc = acpi_ns_get_attached_object(node); | ||
533 | if (!obj_desc) { | ||
534 | |||
535 | /* | ||
536 | * No object, create a new one (Device nodes do not always have | ||
537 | * an attached object) | ||
538 | */ | ||
539 | obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE); | ||
540 | if (!obj_desc) { | ||
541 | status = AE_NO_MEMORY; | ||
542 | goto unlock_and_exit; | ||
543 | } | ||
544 | |||
545 | status = | ||
546 | acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE); | ||
547 | |||
548 | /* Remove local reference to the object */ | ||
549 | |||
550 | acpi_ut_remove_reference(obj_desc); | ||
551 | |||
552 | if (ACPI_FAILURE(status)) { | ||
553 | goto unlock_and_exit; | ||
554 | } | ||
555 | } | ||
556 | |||
557 | /* Now install the GPE block in the device_object */ | ||
558 | |||
559 | obj_desc->device.gpe_block = gpe_block; | ||
560 | |||
561 | unlock_and_exit: | ||
562 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
563 | return_ACPI_STATUS(status); | ||
564 | } | ||
565 | |||
566 | ACPI_EXPORT_SYMBOL(acpi_install_gpe_block) | ||
567 | |||
568 | /******************************************************************************* | ||
569 | * | ||
570 | * FUNCTION: acpi_remove_gpe_block | ||
571 | * | ||
572 | * PARAMETERS: gpe_device - Handle to the parent GPE Block Device | ||
573 | * | ||
574 | * RETURN: Status | ||
575 | * | ||
576 | * DESCRIPTION: Remove a previously installed block of GPE registers | ||
577 | * | ||
578 | ******************************************************************************/ | ||
579 | acpi_status acpi_remove_gpe_block(acpi_handle gpe_device) | ||
580 | { | ||
581 | union acpi_operand_object *obj_desc; | ||
582 | acpi_status status; | ||
583 | struct acpi_namespace_node *node; | ||
584 | |||
585 | ACPI_FUNCTION_TRACE(acpi_remove_gpe_block); | ||
586 | |||
587 | if (!gpe_device) { | ||
588 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
589 | } | ||
590 | |||
591 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
592 | if (ACPI_FAILURE(status)) { | ||
593 | return (status); | ||
594 | } | ||
595 | |||
596 | node = acpi_ns_validate_handle(gpe_device); | ||
597 | if (!node) { | ||
598 | status = AE_BAD_PARAMETER; | ||
599 | goto unlock_and_exit; | ||
600 | } | ||
601 | |||
602 | /* Get the device_object attached to the node */ | ||
603 | |||
604 | obj_desc = acpi_ns_get_attached_object(node); | ||
605 | if (!obj_desc || !obj_desc->device.gpe_block) { | ||
606 | return_ACPI_STATUS(AE_NULL_OBJECT); | ||
607 | } | ||
608 | |||
609 | /* Delete the GPE block (but not the device_object) */ | ||
610 | |||
611 | status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block); | ||
612 | if (ACPI_SUCCESS(status)) { | ||
613 | obj_desc->device.gpe_block = NULL; | ||
614 | } | ||
615 | |||
616 | unlock_and_exit: | ||
617 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
618 | return_ACPI_STATUS(status); | ||
619 | } | ||
620 | |||
621 | ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block) | ||
622 | |||
623 | /******************************************************************************* | ||
624 | * | ||
625 | * FUNCTION: acpi_get_gpe_device | ||
626 | * | ||
627 | * PARAMETERS: Index - System GPE index (0-current_gpe_count) | ||
628 | * gpe_device - Where the parent GPE Device is returned | ||
629 | * | ||
630 | * RETURN: Status | ||
631 | * | ||
632 | * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL | ||
633 | * gpe device indicates that the gpe number is contained in one of | ||
634 | * the FADT-defined gpe blocks. Otherwise, the GPE block device. | ||
635 | * | ||
636 | ******************************************************************************/ | ||
637 | acpi_status | ||
638 | acpi_get_gpe_device(u32 index, acpi_handle *gpe_device) | ||
639 | { | ||
640 | struct acpi_gpe_device_info info; | ||
641 | acpi_status status; | ||
642 | |||
643 | ACPI_FUNCTION_TRACE(acpi_get_gpe_device); | ||
644 | |||
645 | if (!gpe_device) { | ||
646 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
647 | } | ||
648 | |||
649 | if (index >= acpi_current_gpe_count) { | ||
650 | return_ACPI_STATUS(AE_NOT_EXIST); | ||
651 | } | ||
652 | |||
653 | /* Setup and walk the GPE list */ | ||
654 | |||
655 | info.index = index; | ||
656 | info.status = AE_NOT_EXIST; | ||
657 | info.gpe_device = NULL; | ||
658 | info.next_block_base_index = 0; | ||
659 | |||
660 | status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info); | ||
661 | if (ACPI_FAILURE(status)) { | ||
662 | return_ACPI_STATUS(status); | ||
663 | } | ||
664 | |||
665 | *gpe_device = ACPI_CAST_PTR(acpi_handle, info.gpe_device); | ||
666 | return_ACPI_STATUS(info.status); | ||
667 | } | ||
668 | |||
669 | ACPI_EXPORT_SYMBOL(acpi_get_gpe_device) | ||
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index 14750db2a1b8..85c3cbd4304d 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c | |||
@@ -62,10 +62,10 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
62 | * PARAMETERS: gpe_event_info - Info block for the GPE | 62 | * PARAMETERS: gpe_event_info - Info block for the GPE |
63 | * gpe_register_info - Info block for the GPE register | 63 | * gpe_register_info - Info block for the GPE register |
64 | * | 64 | * |
65 | * RETURN: Status | 65 | * RETURN: Register mask with a one in the GPE bit position |
66 | * | 66 | * |
67 | * DESCRIPTION: Compute GPE enable mask with one bit corresponding to the given | 67 | * DESCRIPTION: Compute the register mask for this GPE. One bit is set in the |
68 | * GPE set. | 68 | * correct position for the input GPE. |
69 | * | 69 | * |
70 | ******************************************************************************/ | 70 | ******************************************************************************/ |
71 | 71 | ||
@@ -85,12 +85,12 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info, | |||
85 | * | 85 | * |
86 | * RETURN: Status | 86 | * RETURN: Status |
87 | * | 87 | * |
88 | * DESCRIPTION: Enable or disable a single GPE in its enable register. | 88 | * DESCRIPTION: Enable or disable a single GPE in the parent enable register. |
89 | * | 89 | * |
90 | ******************************************************************************/ | 90 | ******************************************************************************/ |
91 | 91 | ||
92 | acpi_status | 92 | acpi_status |
93 | acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action) | 93 | acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action) |
94 | { | 94 | { |
95 | struct acpi_gpe_register_info *gpe_register_info; | 95 | struct acpi_gpe_register_info *gpe_register_info; |
96 | acpi_status status; | 96 | acpi_status status; |
@@ -113,14 +113,20 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action) | |||
113 | return (status); | 113 | return (status); |
114 | } | 114 | } |
115 | 115 | ||
116 | /* Set ot clear just the bit that corresponds to this GPE */ | 116 | /* Set or clear just the bit that corresponds to this GPE */ |
117 | 117 | ||
118 | register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, | 118 | register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, |
119 | gpe_register_info); | 119 | gpe_register_info); |
120 | switch (action) { | 120 | switch (action) { |
121 | case ACPI_GPE_COND_ENABLE: | 121 | case ACPI_GPE_CONDITIONAL_ENABLE: |
122 | if (!(register_bit & gpe_register_info->enable_for_run)) | 122 | |
123 | /* Only enable if the enable_for_run bit is set */ | ||
124 | |||
125 | if (!(register_bit & gpe_register_info->enable_for_run)) { | ||
123 | return (AE_BAD_PARAMETER); | 126 | return (AE_BAD_PARAMETER); |
127 | } | ||
128 | |||
129 | /*lint -fallthrough */ | ||
124 | 130 | ||
125 | case ACPI_GPE_ENABLE: | 131 | case ACPI_GPE_ENABLE: |
126 | ACPI_SET_BIT(enable_mask, register_bit); | 132 | ACPI_SET_BIT(enable_mask, register_bit); |
@@ -131,7 +137,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action) | |||
131 | break; | 137 | break; |
132 | 138 | ||
133 | default: | 139 | default: |
134 | ACPI_ERROR((AE_INFO, "Invalid action\n")); | 140 | ACPI_ERROR((AE_INFO, "Invalid GPE Action, %u\n", action)); |
135 | return (AE_BAD_PARAMETER); | 141 | return (AE_BAD_PARAMETER); |
136 | } | 142 | } |
137 | 143 | ||
@@ -168,13 +174,13 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info) | |||
168 | return (AE_NOT_EXIST); | 174 | return (AE_NOT_EXIST); |
169 | } | 175 | } |
170 | 176 | ||
171 | register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, | ||
172 | gpe_register_info); | ||
173 | |||
174 | /* | 177 | /* |
175 | * Write a one to the appropriate bit in the status register to | 178 | * Write a one to the appropriate bit in the status register to |
176 | * clear this GPE. | 179 | * clear this GPE. |
177 | */ | 180 | */ |
181 | register_bit = | ||
182 | acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); | ||
183 | |||
178 | status = acpi_hw_write(register_bit, | 184 | status = acpi_hw_write(register_bit, |
179 | &gpe_register_info->status_address); | 185 | &gpe_register_info->status_address); |
180 | 186 | ||
@@ -201,8 +207,8 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, | |||
201 | u32 in_byte; | 207 | u32 in_byte; |
202 | u32 register_bit; | 208 | u32 register_bit; |
203 | struct acpi_gpe_register_info *gpe_register_info; | 209 | struct acpi_gpe_register_info *gpe_register_info; |
204 | acpi_status status; | ||
205 | acpi_event_status local_event_status = 0; | 210 | acpi_event_status local_event_status = 0; |
211 | acpi_status status; | ||
206 | 212 | ||
207 | ACPI_FUNCTION_ENTRY(); | 213 | ACPI_FUNCTION_ENTRY(); |
208 | 214 | ||
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c index e87bc6760be6..508537f884ac 100644 --- a/drivers/acpi/acpica/utglobal.c +++ b/drivers/acpi/acpica/utglobal.c | |||
@@ -768,7 +768,7 @@ acpi_status acpi_ut_init_globals(void) | |||
768 | acpi_gbl_gpe_fadt_blocks[0] = NULL; | 768 | acpi_gbl_gpe_fadt_blocks[0] = NULL; |
769 | acpi_gbl_gpe_fadt_blocks[1] = NULL; | 769 | acpi_gbl_gpe_fadt_blocks[1] = NULL; |
770 | acpi_current_gpe_count = 0; | 770 | acpi_current_gpe_count = 0; |
771 | acpi_all_gpes_initialized = FALSE; | 771 | acpi_gbl_all_gpes_initialized = FALSE; |
772 | 772 | ||
773 | /* Global handlers */ | 773 | /* Global handlers */ |
774 | 774 | ||
@@ -778,6 +778,7 @@ acpi_status acpi_ut_init_globals(void) | |||
778 | acpi_gbl_init_handler = NULL; | 778 | acpi_gbl_init_handler = NULL; |
779 | acpi_gbl_table_handler = NULL; | 779 | acpi_gbl_table_handler = NULL; |
780 | acpi_gbl_interface_handler = NULL; | 780 | acpi_gbl_interface_handler = NULL; |
781 | acpi_gbl_global_event_handler = NULL; | ||
781 | 782 | ||
782 | /* Global Lock support */ | 783 | /* Global Lock support */ |
783 | 784 | ||
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 1211c03149e8..5850d320404c 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c | |||
@@ -86,7 +86,7 @@ static struct erst_erange { | |||
86 | * It is used to provide exclusive accessing for ERST Error Log | 86 | * It is used to provide exclusive accessing for ERST Error Log |
87 | * Address Range too. | 87 | * Address Range too. |
88 | */ | 88 | */ |
89 | static DEFINE_SPINLOCK(erst_lock); | 89 | static DEFINE_RAW_SPINLOCK(erst_lock); |
90 | 90 | ||
91 | static inline int erst_errno(int command_status) | 91 | static inline int erst_errno(int command_status) |
92 | { | 92 | { |
@@ -421,9 +421,9 @@ ssize_t erst_get_record_count(void) | |||
421 | if (erst_disable) | 421 | if (erst_disable) |
422 | return -ENODEV; | 422 | return -ENODEV; |
423 | 423 | ||
424 | spin_lock_irqsave(&erst_lock, flags); | 424 | raw_spin_lock_irqsave(&erst_lock, flags); |
425 | count = __erst_get_record_count(); | 425 | count = __erst_get_record_count(); |
426 | spin_unlock_irqrestore(&erst_lock, flags); | 426 | raw_spin_unlock_irqrestore(&erst_lock, flags); |
427 | 427 | ||
428 | return count; | 428 | return count; |
429 | } | 429 | } |
@@ -456,9 +456,9 @@ int erst_get_next_record_id(u64 *record_id) | |||
456 | if (erst_disable) | 456 | if (erst_disable) |
457 | return -ENODEV; | 457 | return -ENODEV; |
458 | 458 | ||
459 | spin_lock_irqsave(&erst_lock, flags); | 459 | raw_spin_lock_irqsave(&erst_lock, flags); |
460 | rc = __erst_get_next_record_id(record_id); | 460 | rc = __erst_get_next_record_id(record_id); |
461 | spin_unlock_irqrestore(&erst_lock, flags); | 461 | raw_spin_unlock_irqrestore(&erst_lock, flags); |
462 | 462 | ||
463 | return rc; | 463 | return rc; |
464 | } | 464 | } |
@@ -624,17 +624,17 @@ int erst_write(const struct cper_record_header *record) | |||
624 | return -EINVAL; | 624 | return -EINVAL; |
625 | 625 | ||
626 | if (erst_erange.attr & ERST_RANGE_NVRAM) { | 626 | if (erst_erange.attr & ERST_RANGE_NVRAM) { |
627 | if (!spin_trylock_irqsave(&erst_lock, flags)) | 627 | if (!raw_spin_trylock_irqsave(&erst_lock, flags)) |
628 | return -EBUSY; | 628 | return -EBUSY; |
629 | rc = __erst_write_to_nvram(record); | 629 | rc = __erst_write_to_nvram(record); |
630 | spin_unlock_irqrestore(&erst_lock, flags); | 630 | raw_spin_unlock_irqrestore(&erst_lock, flags); |
631 | return rc; | 631 | return rc; |
632 | } | 632 | } |
633 | 633 | ||
634 | if (record->record_length > erst_erange.size) | 634 | if (record->record_length > erst_erange.size) |
635 | return -EINVAL; | 635 | return -EINVAL; |
636 | 636 | ||
637 | if (!spin_trylock_irqsave(&erst_lock, flags)) | 637 | if (!raw_spin_trylock_irqsave(&erst_lock, flags)) |
638 | return -EBUSY; | 638 | return -EBUSY; |
639 | memcpy(erst_erange.vaddr, record, record->record_length); | 639 | memcpy(erst_erange.vaddr, record, record->record_length); |
640 | rcd_erange = erst_erange.vaddr; | 640 | rcd_erange = erst_erange.vaddr; |
@@ -642,7 +642,7 @@ int erst_write(const struct cper_record_header *record) | |||
642 | memcpy(&rcd_erange->persistence_information, "ER", 2); | 642 | memcpy(&rcd_erange->persistence_information, "ER", 2); |
643 | 643 | ||
644 | rc = __erst_write_to_storage(0); | 644 | rc = __erst_write_to_storage(0); |
645 | spin_unlock_irqrestore(&erst_lock, flags); | 645 | raw_spin_unlock_irqrestore(&erst_lock, flags); |
646 | 646 | ||
647 | return rc; | 647 | return rc; |
648 | } | 648 | } |
@@ -696,9 +696,9 @@ ssize_t erst_read(u64 record_id, struct cper_record_header *record, | |||
696 | if (erst_disable) | 696 | if (erst_disable) |
697 | return -ENODEV; | 697 | return -ENODEV; |
698 | 698 | ||
699 | spin_lock_irqsave(&erst_lock, flags); | 699 | raw_spin_lock_irqsave(&erst_lock, flags); |
700 | len = __erst_read(record_id, record, buflen); | 700 | len = __erst_read(record_id, record, buflen); |
701 | spin_unlock_irqrestore(&erst_lock, flags); | 701 | raw_spin_unlock_irqrestore(&erst_lock, flags); |
702 | return len; | 702 | return len; |
703 | } | 703 | } |
704 | EXPORT_SYMBOL_GPL(erst_read); | 704 | EXPORT_SYMBOL_GPL(erst_read); |
@@ -719,20 +719,20 @@ ssize_t erst_read_next(struct cper_record_header *record, size_t buflen) | |||
719 | if (erst_disable) | 719 | if (erst_disable) |
720 | return -ENODEV; | 720 | return -ENODEV; |
721 | 721 | ||
722 | spin_lock_irqsave(&erst_lock, flags); | 722 | raw_spin_lock_irqsave(&erst_lock, flags); |
723 | rc = __erst_get_next_record_id(&record_id); | 723 | rc = __erst_get_next_record_id(&record_id); |
724 | if (rc) { | 724 | if (rc) { |
725 | spin_unlock_irqrestore(&erst_lock, flags); | 725 | raw_spin_unlock_irqrestore(&erst_lock, flags); |
726 | return rc; | 726 | return rc; |
727 | } | 727 | } |
728 | /* no more record */ | 728 | /* no more record */ |
729 | if (record_id == APEI_ERST_INVALID_RECORD_ID) { | 729 | if (record_id == APEI_ERST_INVALID_RECORD_ID) { |
730 | spin_unlock_irqrestore(&erst_lock, flags); | 730 | raw_spin_unlock_irqrestore(&erst_lock, flags); |
731 | return 0; | 731 | return 0; |
732 | } | 732 | } |
733 | 733 | ||
734 | len = __erst_read(record_id, record, buflen); | 734 | len = __erst_read(record_id, record, buflen); |
735 | spin_unlock_irqrestore(&erst_lock, flags); | 735 | raw_spin_unlock_irqrestore(&erst_lock, flags); |
736 | 736 | ||
737 | return len; | 737 | return len; |
738 | } | 738 | } |
@@ -746,12 +746,12 @@ int erst_clear(u64 record_id) | |||
746 | if (erst_disable) | 746 | if (erst_disable) |
747 | return -ENODEV; | 747 | return -ENODEV; |
748 | 748 | ||
749 | spin_lock_irqsave(&erst_lock, flags); | 749 | raw_spin_lock_irqsave(&erst_lock, flags); |
750 | if (erst_erange.attr & ERST_RANGE_NVRAM) | 750 | if (erst_erange.attr & ERST_RANGE_NVRAM) |
751 | rc = __erst_clear_from_nvram(record_id); | 751 | rc = __erst_clear_from_nvram(record_id); |
752 | else | 752 | else |
753 | rc = __erst_clear_from_storage(record_id); | 753 | rc = __erst_clear_from_storage(record_id); |
754 | spin_unlock_irqrestore(&erst_lock, flags); | 754 | raw_spin_unlock_irqrestore(&erst_lock, flags); |
755 | 755 | ||
756 | return rc; | 756 | return rc; |
757 | } | 757 | } |
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index 1a3508a7fe03..daa7bc63f1d4 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c | |||
@@ -46,9 +46,9 @@ EXPORT_SYMBOL_GPL(hest_disable); | |||
46 | 46 | ||
47 | /* HEST table parsing */ | 47 | /* HEST table parsing */ |
48 | 48 | ||
49 | static struct acpi_table_hest *hest_tab; | 49 | static struct acpi_table_hest *__read_mostly hest_tab; |
50 | 50 | ||
51 | static int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = { | 51 | static const int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = { |
52 | [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */ | 52 | [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */ |
53 | [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1, | 53 | [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1, |
54 | [ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi), | 54 | [ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi), |
@@ -126,7 +126,7 @@ struct ghes_arr { | |||
126 | unsigned int count; | 126 | unsigned int count; |
127 | }; | 127 | }; |
128 | 128 | ||
129 | static int hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data) | 129 | static int __init hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data) |
130 | { | 130 | { |
131 | int *count = data; | 131 | int *count = data; |
132 | 132 | ||
@@ -135,7 +135,7 @@ static int hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data) | |||
135 | return 0; | 135 | return 0; |
136 | } | 136 | } |
137 | 137 | ||
138 | static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) | 138 | static int __init hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) |
139 | { | 139 | { |
140 | struct platform_device *ghes_dev; | 140 | struct platform_device *ghes_dev; |
141 | struct ghes_arr *ghes_arr = data; | 141 | struct ghes_arr *ghes_arr = data; |
@@ -165,7 +165,7 @@ err: | |||
165 | return rc; | 165 | return rc; |
166 | } | 166 | } |
167 | 167 | ||
168 | static int hest_ghes_dev_register(unsigned int ghes_count) | 168 | static int __init hest_ghes_dev_register(unsigned int ghes_count) |
169 | { | 169 | { |
170 | int rc, i; | 170 | int rc, i; |
171 | struct ghes_arr ghes_arr; | 171 | struct ghes_arr ghes_arr; |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 95649d373071..2a31421e0d75 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
@@ -868,6 +868,8 @@ static int acpi_battery_add_fs(struct acpi_device *device) | |||
868 | struct proc_dir_entry *entry = NULL; | 868 | struct proc_dir_entry *entry = NULL; |
869 | int i; | 869 | int i; |
870 | 870 | ||
871 | printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded," | ||
872 | " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); | ||
871 | if (!acpi_device_dir(device)) { | 873 | if (!acpi_device_dir(device)) { |
872 | acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), | 874 | acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), |
873 | acpi_battery_dir); | 875 | acpi_battery_dir); |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index d68bd61072bb..7ced61f39492 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -52,22 +52,6 @@ EXPORT_SYMBOL(acpi_root_dir); | |||
52 | 52 | ||
53 | #define STRUCT_TO_INT(s) (*((int*)&s)) | 53 | #define STRUCT_TO_INT(s) (*((int*)&s)) |
54 | 54 | ||
55 | static int set_power_nocheck(const struct dmi_system_id *id) | ||
56 | { | ||
57 | printk(KERN_NOTICE PREFIX "%s detected - " | ||
58 | "disable power check in power transition\n", id->ident); | ||
59 | acpi_power_nocheck = 1; | ||
60 | return 0; | ||
61 | } | ||
62 | static struct dmi_system_id __cpuinitdata power_nocheck_dmi_table[] = { | ||
63 | { | ||
64 | set_power_nocheck, "HP Pavilion 05", { | ||
65 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), | ||
66 | DMI_MATCH(DMI_SYS_VENDOR, "HP Pavilion 05"), | ||
67 | DMI_MATCH(DMI_PRODUCT_VERSION, "2001211RE101GLEND") }, NULL}, | ||
68 | {}, | ||
69 | }; | ||
70 | |||
71 | 55 | ||
72 | #ifdef CONFIG_X86 | 56 | #ifdef CONFIG_X86 |
73 | static int set_copy_dsdt(const struct dmi_system_id *id) | 57 | static int set_copy_dsdt(const struct dmi_system_id *id) |
@@ -196,33 +180,24 @@ EXPORT_SYMBOL(acpi_bus_get_private_data); | |||
196 | Power Management | 180 | Power Management |
197 | -------------------------------------------------------------------------- */ | 181 | -------------------------------------------------------------------------- */ |
198 | 182 | ||
199 | int acpi_bus_get_power(acpi_handle handle, int *state) | 183 | static int __acpi_bus_get_power(struct acpi_device *device, int *state) |
200 | { | 184 | { |
201 | int result = 0; | 185 | int result = 0; |
202 | acpi_status status = 0; | 186 | acpi_status status = 0; |
203 | struct acpi_device *device = NULL; | ||
204 | unsigned long long psc = 0; | 187 | unsigned long long psc = 0; |
205 | 188 | ||
206 | 189 | if (!device || !state) | |
207 | result = acpi_bus_get_device(handle, &device); | 190 | return -EINVAL; |
208 | if (result) | ||
209 | return result; | ||
210 | 191 | ||
211 | *state = ACPI_STATE_UNKNOWN; | 192 | *state = ACPI_STATE_UNKNOWN; |
212 | 193 | ||
213 | if (!device->flags.power_manageable) { | 194 | if (device->flags.power_manageable) { |
214 | /* TBD: Non-recursive algorithm for walking up hierarchy */ | ||
215 | if (device->parent) | ||
216 | *state = device->parent->power.state; | ||
217 | else | ||
218 | *state = ACPI_STATE_D0; | ||
219 | } else { | ||
220 | /* | 195 | /* |
221 | * Get the device's power state either directly (via _PSC) or | 196 | * Get the device's power state either directly (via _PSC) or |
222 | * indirectly (via power resources). | 197 | * indirectly (via power resources). |
223 | */ | 198 | */ |
224 | if (device->power.flags.power_resources) { | 199 | if (device->power.flags.power_resources) { |
225 | result = acpi_power_get_inferred_state(device); | 200 | result = acpi_power_get_inferred_state(device, state); |
226 | if (result) | 201 | if (result) |
227 | return result; | 202 | return result; |
228 | } else if (device->power.flags.explicit_get) { | 203 | } else if (device->power.flags.explicit_get) { |
@@ -230,59 +205,33 @@ int acpi_bus_get_power(acpi_handle handle, int *state) | |||
230 | NULL, &psc); | 205 | NULL, &psc); |
231 | if (ACPI_FAILURE(status)) | 206 | if (ACPI_FAILURE(status)) |
232 | return -ENODEV; | 207 | return -ENODEV; |
233 | device->power.state = (int)psc; | 208 | *state = (int)psc; |
234 | } | 209 | } |
235 | 210 | } else { | |
236 | *state = device->power.state; | 211 | /* TBD: Non-recursive algorithm for walking up hierarchy. */ |
212 | *state = device->parent ? | ||
213 | device->parent->power.state : ACPI_STATE_D0; | ||
237 | } | 214 | } |
238 | 215 | ||
239 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n", | 216 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n", |
240 | device->pnp.bus_id, device->power.state)); | 217 | device->pnp.bus_id, *state)); |
241 | 218 | ||
242 | return 0; | 219 | return 0; |
243 | } | 220 | } |
244 | 221 | ||
245 | EXPORT_SYMBOL(acpi_bus_get_power); | ||
246 | 222 | ||
247 | int acpi_bus_set_power(acpi_handle handle, int state) | 223 | static int __acpi_bus_set_power(struct acpi_device *device, int state) |
248 | { | 224 | { |
249 | int result = 0; | 225 | int result = 0; |
250 | acpi_status status = AE_OK; | 226 | acpi_status status = AE_OK; |
251 | struct acpi_device *device = NULL; | ||
252 | char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' }; | 227 | char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' }; |
253 | 228 | ||
254 | 229 | if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) | |
255 | result = acpi_bus_get_device(handle, &device); | ||
256 | if (result) | ||
257 | return result; | ||
258 | |||
259 | if ((state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) | ||
260 | return -EINVAL; | 230 | return -EINVAL; |
261 | 231 | ||
262 | /* Make sure this is a valid target state */ | 232 | /* Make sure this is a valid target state */ |
263 | 233 | ||
264 | if (!device->flags.power_manageable) { | 234 | if (state == device->power.state) { |
265 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n", | ||
266 | kobject_name(&device->dev.kobj))); | ||
267 | return -ENODEV; | ||
268 | } | ||
269 | /* | ||
270 | * Get device's current power state | ||
271 | */ | ||
272 | if (!acpi_power_nocheck) { | ||
273 | /* | ||
274 | * Maybe the incorrect power state is returned on the bogus | ||
275 | * bios, which is different with the real power state. | ||
276 | * For example: the bios returns D0 state and the real power | ||
277 | * state is D3. OS expects to set the device to D0 state. In | ||
278 | * such case if OS uses the power state returned by the BIOS, | ||
279 | * the device can't be transisted to the correct power state. | ||
280 | * So if the acpi_power_nocheck is set, it is unnecessary to | ||
281 | * get the power state by calling acpi_bus_get_power. | ||
282 | */ | ||
283 | acpi_bus_get_power(device->handle, &device->power.state); | ||
284 | } | ||
285 | if ((state == device->power.state) && !device->flags.force_power_state) { | ||
286 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n", | 235 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n", |
287 | state)); | 236 | state)); |
288 | return 0; | 237 | return 0; |
@@ -351,8 +300,75 @@ int acpi_bus_set_power(acpi_handle handle, int state) | |||
351 | return result; | 300 | return result; |
352 | } | 301 | } |
353 | 302 | ||
303 | |||
304 | int acpi_bus_set_power(acpi_handle handle, int state) | ||
305 | { | ||
306 | struct acpi_device *device; | ||
307 | int result; | ||
308 | |||
309 | result = acpi_bus_get_device(handle, &device); | ||
310 | if (result) | ||
311 | return result; | ||
312 | |||
313 | if (!device->flags.power_manageable) { | ||
314 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
315 | "Device [%s] is not power manageable\n", | ||
316 | dev_name(&device->dev))); | ||
317 | return -ENODEV; | ||
318 | } | ||
319 | |||
320 | return __acpi_bus_set_power(device, state); | ||
321 | } | ||
354 | EXPORT_SYMBOL(acpi_bus_set_power); | 322 | EXPORT_SYMBOL(acpi_bus_set_power); |
355 | 323 | ||
324 | |||
325 | int acpi_bus_init_power(struct acpi_device *device) | ||
326 | { | ||
327 | int state; | ||
328 | int result; | ||
329 | |||
330 | if (!device) | ||
331 | return -EINVAL; | ||
332 | |||
333 | device->power.state = ACPI_STATE_UNKNOWN; | ||
334 | |||
335 | result = __acpi_bus_get_power(device, &state); | ||
336 | if (result) | ||
337 | return result; | ||
338 | |||
339 | if (device->power.flags.power_resources) | ||
340 | result = acpi_power_on_resources(device, state); | ||
341 | |||
342 | if (!result) | ||
343 | device->power.state = state; | ||
344 | |||
345 | return result; | ||
346 | } | ||
347 | |||
348 | |||
349 | int acpi_bus_update_power(acpi_handle handle, int *state_p) | ||
350 | { | ||
351 | struct acpi_device *device; | ||
352 | int state; | ||
353 | int result; | ||
354 | |||
355 | result = acpi_bus_get_device(handle, &device); | ||
356 | if (result) | ||
357 | return result; | ||
358 | |||
359 | result = __acpi_bus_get_power(device, &state); | ||
360 | if (result) | ||
361 | return result; | ||
362 | |||
363 | result = __acpi_bus_set_power(device, state); | ||
364 | if (!result && state_p) | ||
365 | *state_p = state; | ||
366 | |||
367 | return result; | ||
368 | } | ||
369 | EXPORT_SYMBOL_GPL(acpi_bus_update_power); | ||
370 | |||
371 | |||
356 | bool acpi_bus_power_manageable(acpi_handle handle) | 372 | bool acpi_bus_power_manageable(acpi_handle handle) |
357 | { | 373 | { |
358 | struct acpi_device *device; | 374 | struct acpi_device *device; |
@@ -1023,15 +1039,8 @@ static int __init acpi_init(void) | |||
1023 | if (acpi_disabled) | 1039 | if (acpi_disabled) |
1024 | return result; | 1040 | return result; |
1025 | 1041 | ||
1026 | /* | ||
1027 | * If the laptop falls into the DMI check table, the power state check | ||
1028 | * will be disabled in the course of device power transition. | ||
1029 | */ | ||
1030 | dmi_check_system(power_nocheck_dmi_table); | ||
1031 | |||
1032 | acpi_scan_init(); | 1042 | acpi_scan_init(); |
1033 | acpi_ec_init(); | 1043 | acpi_ec_init(); |
1034 | acpi_power_init(); | ||
1035 | acpi_debugfs_init(); | 1044 | acpi_debugfs_init(); |
1036 | acpi_sleep_proc_init(); | 1045 | acpi_sleep_proc_init(); |
1037 | acpi_wakeup_device_init(); | 1046 | acpi_wakeup_device_init(); |
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 71ef9cd0735f..76bbb78a5ad9 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c | |||
@@ -279,6 +279,9 @@ static int acpi_lid_send_state(struct acpi_device *device) | |||
279 | input_report_switch(button->input, SW_LID, !state); | 279 | input_report_switch(button->input, SW_LID, !state); |
280 | input_sync(button->input); | 280 | input_sync(button->input); |
281 | 281 | ||
282 | if (state) | ||
283 | pm_wakeup_event(&device->dev, 0); | ||
284 | |||
282 | ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); | 285 | ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); |
283 | if (ret == NOTIFY_DONE) | 286 | if (ret == NOTIFY_DONE) |
284 | ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, | 287 | ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, |
@@ -314,6 +317,8 @@ static void acpi_button_notify(struct acpi_device *device, u32 event) | |||
314 | input_sync(input); | 317 | input_sync(input); |
315 | input_report_key(input, keycode, 0); | 318 | input_report_key(input, keycode, 0); |
316 | input_sync(input); | 319 | input_sync(input); |
320 | |||
321 | pm_wakeup_event(&device->dev, 0); | ||
317 | } | 322 | } |
318 | 323 | ||
319 | acpi_bus_generate_proc_event(device, event, ++button->pushed); | 324 | acpi_bus_generate_proc_event(device, event, ++button->pushed); |
@@ -426,7 +431,7 @@ static int acpi_button_add(struct acpi_device *device) | |||
426 | acpi_enable_gpe(device->wakeup.gpe_device, | 431 | acpi_enable_gpe(device->wakeup.gpe_device, |
427 | device->wakeup.gpe_number); | 432 | device->wakeup.gpe_number); |
428 | device->wakeup.run_wake_count++; | 433 | device->wakeup.run_wake_count++; |
429 | device->wakeup.state.enabled = 1; | 434 | device_set_wakeup_enable(&device->dev, true); |
430 | } | 435 | } |
431 | 436 | ||
432 | printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); | 437 | printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); |
@@ -449,7 +454,7 @@ static int acpi_button_remove(struct acpi_device *device, int type) | |||
449 | acpi_disable_gpe(device->wakeup.gpe_device, | 454 | acpi_disable_gpe(device->wakeup.gpe_device, |
450 | device->wakeup.gpe_number); | 455 | device->wakeup.gpe_number); |
451 | device->wakeup.run_wake_count--; | 456 | device->wakeup.run_wake_count--; |
452 | device->wakeup.state.enabled = 0; | 457 | device_set_wakeup_enable(&device->dev, false); |
453 | } | 458 | } |
454 | 459 | ||
455 | acpi_button_remove_fs(device); | 460 | acpi_button_remove_fs(device); |
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 81514a4918cc..1864ad3cf895 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c | |||
@@ -725,7 +725,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data) | |||
725 | complete_dock(ds); | 725 | complete_dock(ds); |
726 | dock_event(ds, event, DOCK_EVENT); | 726 | dock_event(ds, event, DOCK_EVENT); |
727 | dock_lock(ds, 1); | 727 | dock_lock(ds, 1); |
728 | acpi_update_gpes(); | 728 | acpi_update_all_gpes(); |
729 | break; | 729 | break; |
730 | } | 730 | } |
731 | if (dock_present(ds) || dock_in_progress(ds)) | 731 | if (dock_present(ds) || dock_in_progress(ds)) |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 372ff80b7b0c..fa848c4116a8 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -606,7 +606,8 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state) | |||
606 | return 0; | 606 | return 0; |
607 | } | 607 | } |
608 | 608 | ||
609 | static u32 acpi_ec_gpe_handler(void *data) | 609 | static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, |
610 | u32 gpe_number, void *data) | ||
610 | { | 611 | { |
611 | struct acpi_ec *ec = data; | 612 | struct acpi_ec *ec = data; |
612 | 613 | ||
@@ -618,7 +619,7 @@ static u32 acpi_ec_gpe_handler(void *data) | |||
618 | wake_up(&ec->wait); | 619 | wake_up(&ec->wait); |
619 | ec_check_sci(ec, acpi_ec_read_status(ec)); | 620 | ec_check_sci(ec, acpi_ec_read_status(ec)); |
620 | } | 621 | } |
621 | return ACPI_INTERRUPT_HANDLED; | 622 | return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE; |
622 | } | 623 | } |
623 | 624 | ||
624 | /* -------------------------------------------------------------------------- | 625 | /* -------------------------------------------------------------------------- |
@@ -934,6 +935,9 @@ static struct dmi_system_id __initdata ec_dmi_table[] = { | |||
934 | ec_flag_msi, "MSI hardware", { | 935 | ec_flag_msi, "MSI hardware", { |
935 | DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL}, | 936 | DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL}, |
936 | { | 937 | { |
938 | ec_flag_msi, "MSI hardware", { | ||
939 | DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL}, | ||
940 | { | ||
937 | ec_validate_ecdt, "ASUS hardware", { | 941 | ec_validate_ecdt, "ASUS hardware", { |
938 | DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, | 942 | DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, |
939 | {}, | 943 | {}, |
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 60049080c869..467479f07c1f 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c | |||
@@ -86,7 +86,7 @@ static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long | |||
86 | if (!device) | 86 | if (!device) |
87 | return -EINVAL; | 87 | return -EINVAL; |
88 | 88 | ||
89 | result = acpi_bus_get_power(device->handle, &acpi_state); | 89 | result = acpi_bus_update_power(device->handle, &acpi_state); |
90 | if (result) | 90 | if (result) |
91 | return result; | 91 | return result; |
92 | 92 | ||
@@ -123,7 +123,6 @@ static struct thermal_cooling_device_ops fan_cooling_ops = { | |||
123 | static int acpi_fan_add(struct acpi_device *device) | 123 | static int acpi_fan_add(struct acpi_device *device) |
124 | { | 124 | { |
125 | int result = 0; | 125 | int result = 0; |
126 | int state = 0; | ||
127 | struct thermal_cooling_device *cdev; | 126 | struct thermal_cooling_device *cdev; |
128 | 127 | ||
129 | if (!device) | 128 | if (!device) |
@@ -132,16 +131,12 @@ static int acpi_fan_add(struct acpi_device *device) | |||
132 | strcpy(acpi_device_name(device), "Fan"); | 131 | strcpy(acpi_device_name(device), "Fan"); |
133 | strcpy(acpi_device_class(device), ACPI_FAN_CLASS); | 132 | strcpy(acpi_device_class(device), ACPI_FAN_CLASS); |
134 | 133 | ||
135 | result = acpi_bus_get_power(device->handle, &state); | 134 | result = acpi_bus_update_power(device->handle, NULL); |
136 | if (result) { | 135 | if (result) { |
137 | printk(KERN_ERR PREFIX "Reading power state\n"); | 136 | printk(KERN_ERR PREFIX "Setting initial power state\n"); |
138 | goto end; | 137 | goto end; |
139 | } | 138 | } |
140 | 139 | ||
141 | device->flags.force_power_state = 1; | ||
142 | acpi_bus_set_power(device->handle, state); | ||
143 | device->flags.force_power_state = 0; | ||
144 | |||
145 | cdev = thermal_cooling_device_register("Fan", device, | 140 | cdev = thermal_cooling_device_register("Fan", device, |
146 | &fan_cooling_ops); | 141 | &fan_cooling_ops); |
147 | if (IS_ERR(cdev)) { | 142 | if (IS_ERR(cdev)) { |
@@ -200,22 +195,14 @@ static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state) | |||
200 | 195 | ||
201 | static int acpi_fan_resume(struct acpi_device *device) | 196 | static int acpi_fan_resume(struct acpi_device *device) |
202 | { | 197 | { |
203 | int result = 0; | 198 | int result; |
204 | int power_state = 0; | ||
205 | 199 | ||
206 | if (!device) | 200 | if (!device) |
207 | return -EINVAL; | 201 | return -EINVAL; |
208 | 202 | ||
209 | result = acpi_bus_get_power(device->handle, &power_state); | 203 | result = acpi_bus_update_power(device->handle, NULL); |
210 | if (result) { | 204 | if (result) |
211 | printk(KERN_ERR PREFIX | 205 | printk(KERN_ERR PREFIX "Error updating fan power state\n"); |
212 | "Error reading fan power state\n"); | ||
213 | return result; | ||
214 | } | ||
215 | |||
216 | device->flags.force_power_state = 1; | ||
217 | acpi_bus_set_power(device->handle, power_state); | ||
218 | device->flags.force_power_state = 0; | ||
219 | 206 | ||
220 | return result; | 207 | return result; |
221 | } | 208 | } |
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 78b0164c35b2..7c47ed55e528 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c | |||
@@ -167,11 +167,8 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle) | |||
167 | "firmware_node"); | 167 | "firmware_node"); |
168 | ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, | 168 | ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, |
169 | "physical_node"); | 169 | "physical_node"); |
170 | if (acpi_dev->wakeup.flags.valid) { | 170 | if (acpi_dev->wakeup.flags.valid) |
171 | device_set_wakeup_capable(dev, true); | 171 | device_set_wakeup_capable(dev, true); |
172 | device_set_wakeup_enable(dev, | ||
173 | acpi_dev->wakeup.state.enabled); | ||
174 | } | ||
175 | } | 172 | } |
176 | 173 | ||
177 | return 0; | 174 | return 0; |
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index a212bfeddf8c..bc428a9607df 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
@@ -41,9 +41,10 @@ static inline int acpi_debugfs_init(void) { return 0; } | |||
41 | int acpi_power_init(void); | 41 | int acpi_power_init(void); |
42 | int acpi_device_sleep_wake(struct acpi_device *dev, | 42 | int acpi_device_sleep_wake(struct acpi_device *dev, |
43 | int enable, int sleep_state, int dev_state); | 43 | int enable, int sleep_state, int dev_state); |
44 | int acpi_power_get_inferred_state(struct acpi_device *device); | 44 | int acpi_power_get_inferred_state(struct acpi_device *device, int *state); |
45 | int acpi_power_on_resources(struct acpi_device *device, int state); | ||
45 | int acpi_power_transition(struct acpi_device *device, int state); | 46 | int acpi_power_transition(struct acpi_device *device, int state); |
46 | extern int acpi_power_nocheck; | 47 | int acpi_bus_init_power(struct acpi_device *device); |
47 | 48 | ||
48 | int acpi_wakeup_device_init(void); | 49 | int acpi_wakeup_device_init(void); |
49 | void acpi_early_processor_set_pdc(void); | 50 | void acpi_early_processor_set_pdc(void); |
@@ -82,8 +83,16 @@ extern int acpi_sleep_init(void); | |||
82 | 83 | ||
83 | #ifdef CONFIG_ACPI_SLEEP | 84 | #ifdef CONFIG_ACPI_SLEEP |
84 | int acpi_sleep_proc_init(void); | 85 | int acpi_sleep_proc_init(void); |
86 | int suspend_nvs_alloc(void); | ||
87 | void suspend_nvs_free(void); | ||
88 | int suspend_nvs_save(void); | ||
89 | void suspend_nvs_restore(void); | ||
85 | #else | 90 | #else |
86 | static inline int acpi_sleep_proc_init(void) { return 0; } | 91 | static inline int acpi_sleep_proc_init(void) { return 0; } |
92 | static inline int suspend_nvs_alloc(void) { return 0; } | ||
93 | static inline void suspend_nvs_free(void) {} | ||
94 | static inline int suspend_nvs_save(void) {} | ||
95 | static inline void suspend_nvs_restore(void) {} | ||
87 | #endif | 96 | #endif |
88 | 97 | ||
89 | #endif /* _ACPI_INTERNAL_H_ */ | 98 | #endif /* _ACPI_INTERNAL_H_ */ |
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c new file mode 100644 index 000000000000..54b6ab8040a6 --- /dev/null +++ b/drivers/acpi/nvs.c | |||
@@ -0,0 +1,144 @@ | |||
1 | /* | ||
2 | * nvs.c - Routines for saving and restoring ACPI NVS memory region | ||
3 | * | ||
4 | * Copyright (C) 2008-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | */ | ||
8 | |||
9 | #include <linux/io.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/list.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/acpi.h> | ||
15 | #include <acpi/acpiosxf.h> | ||
16 | |||
17 | /* | ||
18 | * Platforms, like ACPI, may want us to save some memory used by them during | ||
19 | * suspend and to restore the contents of this memory during the subsequent | ||
20 | * resume. The code below implements a mechanism allowing us to do that. | ||
21 | */ | ||
22 | |||
23 | struct nvs_page { | ||
24 | unsigned long phys_start; | ||
25 | unsigned int size; | ||
26 | void *kaddr; | ||
27 | void *data; | ||
28 | struct list_head node; | ||
29 | }; | ||
30 | |||
31 | static LIST_HEAD(nvs_list); | ||
32 | |||
33 | /** | ||
34 | * suspend_nvs_register - register platform NVS memory region to save | ||
35 | * @start - physical address of the region | ||
36 | * @size - size of the region | ||
37 | * | ||
38 | * The NVS region need not be page-aligned (both ends) and we arrange | ||
39 | * things so that the data from page-aligned addresses in this region will | ||
40 | * be copied into separate RAM pages. | ||
41 | */ | ||
42 | int suspend_nvs_register(unsigned long start, unsigned long size) | ||
43 | { | ||
44 | struct nvs_page *entry, *next; | ||
45 | |||
46 | while (size > 0) { | ||
47 | unsigned int nr_bytes; | ||
48 | |||
49 | entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL); | ||
50 | if (!entry) | ||
51 | goto Error; | ||
52 | |||
53 | list_add_tail(&entry->node, &nvs_list); | ||
54 | entry->phys_start = start; | ||
55 | nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK); | ||
56 | entry->size = (size < nr_bytes) ? size : nr_bytes; | ||
57 | |||
58 | start += entry->size; | ||
59 | size -= entry->size; | ||
60 | } | ||
61 | return 0; | ||
62 | |||
63 | Error: | ||
64 | list_for_each_entry_safe(entry, next, &nvs_list, node) { | ||
65 | list_del(&entry->node); | ||
66 | kfree(entry); | ||
67 | } | ||
68 | return -ENOMEM; | ||
69 | } | ||
70 | |||
71 | /** | ||
72 | * suspend_nvs_free - free data pages allocated for saving NVS regions | ||
73 | */ | ||
74 | void suspend_nvs_free(void) | ||
75 | { | ||
76 | struct nvs_page *entry; | ||
77 | |||
78 | list_for_each_entry(entry, &nvs_list, node) | ||
79 | if (entry->data) { | ||
80 | free_page((unsigned long)entry->data); | ||
81 | entry->data = NULL; | ||
82 | if (entry->kaddr) { | ||
83 | acpi_os_unmap_memory(entry->kaddr, entry->size); | ||
84 | entry->kaddr = NULL; | ||
85 | } | ||
86 | } | ||
87 | } | ||
88 | |||
89 | /** | ||
90 | * suspend_nvs_alloc - allocate memory necessary for saving NVS regions | ||
91 | */ | ||
92 | int suspend_nvs_alloc(void) | ||
93 | { | ||
94 | struct nvs_page *entry; | ||
95 | |||
96 | list_for_each_entry(entry, &nvs_list, node) { | ||
97 | entry->data = (void *)__get_free_page(GFP_KERNEL); | ||
98 | if (!entry->data) { | ||
99 | suspend_nvs_free(); | ||
100 | return -ENOMEM; | ||
101 | } | ||
102 | } | ||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | /** | ||
107 | * suspend_nvs_save - save NVS memory regions | ||
108 | */ | ||
109 | int suspend_nvs_save(void) | ||
110 | { | ||
111 | struct nvs_page *entry; | ||
112 | |||
113 | printk(KERN_INFO "PM: Saving platform NVS memory\n"); | ||
114 | |||
115 | list_for_each_entry(entry, &nvs_list, node) | ||
116 | if (entry->data) { | ||
117 | entry->kaddr = acpi_os_map_memory(entry->phys_start, | ||
118 | entry->size); | ||
119 | if (!entry->kaddr) { | ||
120 | suspend_nvs_free(); | ||
121 | return -ENOMEM; | ||
122 | } | ||
123 | memcpy(entry->data, entry->kaddr, entry->size); | ||
124 | } | ||
125 | |||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * suspend_nvs_restore - restore NVS memory regions | ||
131 | * | ||
132 | * This function is going to be called with interrupts disabled, so it | ||
133 | * cannot iounmap the virtual addresses used to access the NVS region. | ||
134 | */ | ||
135 | void suspend_nvs_restore(void) | ||
136 | { | ||
137 | struct nvs_page *entry; | ||
138 | |||
139 | printk(KERN_INFO "PM: Restoring platform NVS memory\n"); | ||
140 | |||
141 | list_for_each_entry(entry, &nvs_list, node) | ||
142 | if (entry->data) | ||
143 | memcpy(entry->kaddr, entry->data, entry->size); | ||
144 | } | ||
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 966feddf6b1b..3a7b4879fd99 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -110,9 +110,6 @@ struct acpi_ioremap { | |||
110 | static LIST_HEAD(acpi_ioremaps); | 110 | static LIST_HEAD(acpi_ioremaps); |
111 | static DEFINE_SPINLOCK(acpi_ioremap_lock); | 111 | static DEFINE_SPINLOCK(acpi_ioremap_lock); |
112 | 112 | ||
113 | #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */ | ||
114 | static char osi_setup_string[OSI_STRING_LENGTH_MAX]; | ||
115 | |||
116 | static void __init acpi_osi_setup_late(void); | 113 | static void __init acpi_osi_setup_late(void); |
117 | 114 | ||
118 | /* | 115 | /* |
@@ -152,8 +149,7 @@ static struct osi_linux { | |||
152 | unsigned int enable:1; | 149 | unsigned int enable:1; |
153 | unsigned int dmi:1; | 150 | unsigned int dmi:1; |
154 | unsigned int cmdline:1; | 151 | unsigned int cmdline:1; |
155 | unsigned int known:1; | 152 | } osi_linux = {0, 0, 0}; |
156 | } osi_linux = { 0, 0, 0, 0}; | ||
157 | 153 | ||
158 | static u32 acpi_osi_handler(acpi_string interface, u32 supported) | 154 | static u32 acpi_osi_handler(acpi_string interface, u32 supported) |
159 | { | 155 | { |
@@ -324,7 +320,7 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size) | |||
324 | 320 | ||
325 | pg_off = round_down(phys, PAGE_SIZE); | 321 | pg_off = round_down(phys, PAGE_SIZE); |
326 | pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; | 322 | pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; |
327 | virt = ioremap(pg_off, pg_sz); | 323 | virt = ioremap_cache(pg_off, pg_sz); |
328 | if (!virt) { | 324 | if (!virt) { |
329 | kfree(map); | 325 | kfree(map); |
330 | return NULL; | 326 | return NULL; |
@@ -646,7 +642,7 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) | |||
646 | virt_addr = acpi_map_vaddr_lookup(phys_addr, size); | 642 | virt_addr = acpi_map_vaddr_lookup(phys_addr, size); |
647 | rcu_read_unlock(); | 643 | rcu_read_unlock(); |
648 | if (!virt_addr) { | 644 | if (!virt_addr) { |
649 | virt_addr = ioremap(phys_addr, size); | 645 | virt_addr = ioremap_cache(phys_addr, size); |
650 | unmap = 1; | 646 | unmap = 1; |
651 | } | 647 | } |
652 | if (!value) | 648 | if (!value) |
@@ -682,7 +678,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) | |||
682 | virt_addr = acpi_map_vaddr_lookup(phys_addr, size); | 678 | virt_addr = acpi_map_vaddr_lookup(phys_addr, size); |
683 | rcu_read_unlock(); | 679 | rcu_read_unlock(); |
684 | if (!virt_addr) { | 680 | if (!virt_addr) { |
685 | virt_addr = ioremap(phys_addr, size); | 681 | virt_addr = ioremap_cache(phys_addr, size); |
686 | unmap = 1; | 682 | unmap = 1; |
687 | } | 683 | } |
688 | 684 | ||
@@ -1055,13 +1051,53 @@ static int __init acpi_os_name_setup(char *str) | |||
1055 | 1051 | ||
1056 | __setup("acpi_os_name=", acpi_os_name_setup); | 1052 | __setup("acpi_os_name=", acpi_os_name_setup); |
1057 | 1053 | ||
1054 | #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */ | ||
1055 | #define OSI_STRING_ENTRIES_MAX 16 /* arbitrary */ | ||
1056 | |||
1057 | struct osi_setup_entry { | ||
1058 | char string[OSI_STRING_LENGTH_MAX]; | ||
1059 | bool enable; | ||
1060 | }; | ||
1061 | |||
1062 | static struct osi_setup_entry __initdata osi_setup_entries[OSI_STRING_ENTRIES_MAX]; | ||
1063 | |||
1064 | void __init acpi_osi_setup(char *str) | ||
1065 | { | ||
1066 | struct osi_setup_entry *osi; | ||
1067 | bool enable = true; | ||
1068 | int i; | ||
1069 | |||
1070 | if (!acpi_gbl_create_osi_method) | ||
1071 | return; | ||
1072 | |||
1073 | if (str == NULL || *str == '\0') { | ||
1074 | printk(KERN_INFO PREFIX "_OSI method disabled\n"); | ||
1075 | acpi_gbl_create_osi_method = FALSE; | ||
1076 | return; | ||
1077 | } | ||
1078 | |||
1079 | if (*str == '!') { | ||
1080 | str++; | ||
1081 | enable = false; | ||
1082 | } | ||
1083 | |||
1084 | for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { | ||
1085 | osi = &osi_setup_entries[i]; | ||
1086 | if (!strcmp(osi->string, str)) { | ||
1087 | osi->enable = enable; | ||
1088 | break; | ||
1089 | } else if (osi->string[0] == '\0') { | ||
1090 | osi->enable = enable; | ||
1091 | strncpy(osi->string, str, OSI_STRING_LENGTH_MAX); | ||
1092 | break; | ||
1093 | } | ||
1094 | } | ||
1095 | } | ||
1096 | |||
1058 | static void __init set_osi_linux(unsigned int enable) | 1097 | static void __init set_osi_linux(unsigned int enable) |
1059 | { | 1098 | { |
1060 | if (osi_linux.enable != enable) { | 1099 | if (osi_linux.enable != enable) |
1061 | osi_linux.enable = enable; | 1100 | osi_linux.enable = enable; |
1062 | printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n", | ||
1063 | enable ? "Add": "Delet"); | ||
1064 | } | ||
1065 | 1101 | ||
1066 | if (osi_linux.enable) | 1102 | if (osi_linux.enable) |
1067 | acpi_osi_setup("Linux"); | 1103 | acpi_osi_setup("Linux"); |
@@ -1073,7 +1109,8 @@ static void __init set_osi_linux(unsigned int enable) | |||
1073 | 1109 | ||
1074 | static void __init acpi_cmdline_osi_linux(unsigned int enable) | 1110 | static void __init acpi_cmdline_osi_linux(unsigned int enable) |
1075 | { | 1111 | { |
1076 | osi_linux.cmdline = 1; /* cmdline set the default */ | 1112 | osi_linux.cmdline = 1; /* cmdline set the default and override DMI */ |
1113 | osi_linux.dmi = 0; | ||
1077 | set_osi_linux(enable); | 1114 | set_osi_linux(enable); |
1078 | 1115 | ||
1079 | return; | 1116 | return; |
@@ -1081,15 +1118,12 @@ static void __init acpi_cmdline_osi_linux(unsigned int enable) | |||
1081 | 1118 | ||
1082 | void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d) | 1119 | void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d) |
1083 | { | 1120 | { |
1084 | osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */ | ||
1085 | |||
1086 | printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); | 1121 | printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); |
1087 | 1122 | ||
1088 | if (enable == -1) | 1123 | if (enable == -1) |
1089 | return; | 1124 | return; |
1090 | 1125 | ||
1091 | osi_linux.known = 1; /* DMI knows which OSI(Linux) default needed */ | 1126 | osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */ |
1092 | |||
1093 | set_osi_linux(enable); | 1127 | set_osi_linux(enable); |
1094 | 1128 | ||
1095 | return; | 1129 | return; |
@@ -1104,37 +1138,44 @@ void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d) | |||
1104 | */ | 1138 | */ |
1105 | static void __init acpi_osi_setup_late(void) | 1139 | static void __init acpi_osi_setup_late(void) |
1106 | { | 1140 | { |
1107 | char *str = osi_setup_string; | 1141 | struct osi_setup_entry *osi; |
1142 | char *str; | ||
1143 | int i; | ||
1144 | acpi_status status; | ||
1108 | 1145 | ||
1109 | if (*str == '\0') | 1146 | for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { |
1110 | return; | 1147 | osi = &osi_setup_entries[i]; |
1148 | str = osi->string; | ||
1111 | 1149 | ||
1112 | if (!strcmp("!Linux", str)) { | 1150 | if (*str == '\0') |
1113 | acpi_cmdline_osi_linux(0); /* !enable */ | 1151 | break; |
1114 | } else if (*str == '!') { | 1152 | if (osi->enable) { |
1115 | if (acpi_remove_interface(++str) == AE_OK) | 1153 | status = acpi_install_interface(str); |
1116 | printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str); | 1154 | |
1117 | } else if (!strcmp("Linux", str)) { | 1155 | if (ACPI_SUCCESS(status)) |
1118 | acpi_cmdline_osi_linux(1); /* enable */ | 1156 | printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str); |
1119 | } else { | 1157 | } else { |
1120 | if (acpi_install_interface(str) == AE_OK) | 1158 | status = acpi_remove_interface(str); |
1121 | printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str); | 1159 | |
1160 | if (ACPI_SUCCESS(status)) | ||
1161 | printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str); | ||
1162 | } | ||
1122 | } | 1163 | } |
1123 | } | 1164 | } |
1124 | 1165 | ||
1125 | int __init acpi_osi_setup(char *str) | 1166 | static int __init osi_setup(char *str) |
1126 | { | 1167 | { |
1127 | if (str == NULL || *str == '\0') { | 1168 | if (str && !strcmp("Linux", str)) |
1128 | printk(KERN_INFO PREFIX "_OSI method disabled\n"); | 1169 | acpi_cmdline_osi_linux(1); |
1129 | acpi_gbl_create_osi_method = FALSE; | 1170 | else if (str && !strcmp("!Linux", str)) |
1130 | } else { | 1171 | acpi_cmdline_osi_linux(0); |
1131 | strncpy(osi_setup_string, str, OSI_STRING_LENGTH_MAX); | 1172 | else |
1132 | } | 1173 | acpi_osi_setup(str); |
1133 | 1174 | ||
1134 | return 1; | 1175 | return 1; |
1135 | } | 1176 | } |
1136 | 1177 | ||
1137 | __setup("acpi_osi=", acpi_osi_setup); | 1178 | __setup("acpi_osi=", osi_setup); |
1138 | 1179 | ||
1139 | /* enable serialization to combat AE_ALREADY_EXISTS errors */ | 1180 | /* enable serialization to combat AE_ALREADY_EXISTS errors */ |
1140 | static int __init acpi_serialize_setup(char *str) | 1181 | static int __init acpi_serialize_setup(char *str) |
@@ -1530,7 +1571,7 @@ acpi_status __init acpi_os_initialize(void) | |||
1530 | return AE_OK; | 1571 | return AE_OK; |
1531 | } | 1572 | } |
1532 | 1573 | ||
1533 | acpi_status acpi_os_initialize1(void) | 1574 | acpi_status __init acpi_os_initialize1(void) |
1534 | { | 1575 | { |
1535 | kacpid_wq = create_workqueue("kacpid"); | 1576 | kacpid_wq = create_workqueue("kacpid"); |
1536 | kacpi_notify_wq = create_workqueue("kacpi_notify"); | 1577 | kacpi_notify_wq = create_workqueue("kacpi_notify"); |
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 67dedeed144c..0003f1009885 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c | |||
@@ -56,9 +56,6 @@ ACPI_MODULE_NAME("power"); | |||
56 | #define ACPI_POWER_RESOURCE_STATE_ON 0x01 | 56 | #define ACPI_POWER_RESOURCE_STATE_ON 0x01 |
57 | #define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF | 57 | #define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF |
58 | 58 | ||
59 | int acpi_power_nocheck; | ||
60 | module_param_named(power_nocheck, acpi_power_nocheck, bool, 000); | ||
61 | |||
62 | static int acpi_power_add(struct acpi_device *device); | 59 | static int acpi_power_add(struct acpi_device *device); |
63 | static int acpi_power_remove(struct acpi_device *device, int type); | 60 | static int acpi_power_remove(struct acpi_device *device, int type); |
64 | static int acpi_power_resume(struct acpi_device *device); | 61 | static int acpi_power_resume(struct acpi_device *device); |
@@ -213,11 +210,13 @@ static int acpi_power_on(acpi_handle handle) | |||
213 | resource->name)); | 210 | resource->name)); |
214 | } else { | 211 | } else { |
215 | result = __acpi_power_on(resource); | 212 | result = __acpi_power_on(resource); |
213 | if (result) | ||
214 | resource->ref_count--; | ||
216 | } | 215 | } |
217 | 216 | ||
218 | mutex_unlock(&resource->resource_lock); | 217 | mutex_unlock(&resource->resource_lock); |
219 | 218 | ||
220 | return 0; | 219 | return result; |
221 | } | 220 | } |
222 | 221 | ||
223 | static int acpi_power_off_device(acpi_handle handle) | 222 | static int acpi_power_off_device(acpi_handle handle) |
@@ -264,6 +263,35 @@ static int acpi_power_off_device(acpi_handle handle) | |||
264 | return result; | 263 | return result; |
265 | } | 264 | } |
266 | 265 | ||
266 | static void __acpi_power_off_list(struct acpi_handle_list *list, int num_res) | ||
267 | { | ||
268 | int i; | ||
269 | |||
270 | for (i = num_res - 1; i >= 0 ; i--) | ||
271 | acpi_power_off_device(list->handles[i]); | ||
272 | } | ||
273 | |||
274 | static void acpi_power_off_list(struct acpi_handle_list *list) | ||
275 | { | ||
276 | __acpi_power_off_list(list, list->count); | ||
277 | } | ||
278 | |||
279 | static int acpi_power_on_list(struct acpi_handle_list *list) | ||
280 | { | ||
281 | int result = 0; | ||
282 | int i; | ||
283 | |||
284 | for (i = 0; i < list->count; i++) { | ||
285 | result = acpi_power_on(list->handles[i]); | ||
286 | if (result) { | ||
287 | __acpi_power_off_list(list, i); | ||
288 | break; | ||
289 | } | ||
290 | } | ||
291 | |||
292 | return result; | ||
293 | } | ||
294 | |||
267 | /** | 295 | /** |
268 | * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in | 296 | * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in |
269 | * ACPI 3.0) _PSW (Power State Wake) | 297 | * ACPI 3.0) _PSW (Power State Wake) |
@@ -421,19 +449,16 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev) | |||
421 | Device Power Management | 449 | Device Power Management |
422 | -------------------------------------------------------------------------- */ | 450 | -------------------------------------------------------------------------- */ |
423 | 451 | ||
424 | int acpi_power_get_inferred_state(struct acpi_device *device) | 452 | int acpi_power_get_inferred_state(struct acpi_device *device, int *state) |
425 | { | 453 | { |
426 | int result = 0; | 454 | int result = 0; |
427 | struct acpi_handle_list *list = NULL; | 455 | struct acpi_handle_list *list = NULL; |
428 | int list_state = 0; | 456 | int list_state = 0; |
429 | int i = 0; | 457 | int i = 0; |
430 | 458 | ||
431 | 459 | if (!device || !state) | |
432 | if (!device) | ||
433 | return -EINVAL; | 460 | return -EINVAL; |
434 | 461 | ||
435 | device->power.state = ACPI_STATE_UNKNOWN; | ||
436 | |||
437 | /* | 462 | /* |
438 | * We know a device's inferred power state when all the resources | 463 | * We know a device's inferred power state when all the resources |
439 | * required for a given D-state are 'on'. | 464 | * required for a given D-state are 'on'. |
@@ -448,66 +473,51 @@ int acpi_power_get_inferred_state(struct acpi_device *device) | |||
448 | return result; | 473 | return result; |
449 | 474 | ||
450 | if (list_state == ACPI_POWER_RESOURCE_STATE_ON) { | 475 | if (list_state == ACPI_POWER_RESOURCE_STATE_ON) { |
451 | device->power.state = i; | 476 | *state = i; |
452 | return 0; | 477 | return 0; |
453 | } | 478 | } |
454 | } | 479 | } |
455 | 480 | ||
456 | device->power.state = ACPI_STATE_D3; | 481 | *state = ACPI_STATE_D3; |
457 | |||
458 | return 0; | 482 | return 0; |
459 | } | 483 | } |
460 | 484 | ||
461 | int acpi_power_transition(struct acpi_device *device, int state) | 485 | int acpi_power_on_resources(struct acpi_device *device, int state) |
462 | { | 486 | { |
463 | int result = 0; | 487 | if (!device || state < ACPI_STATE_D0 || state > ACPI_STATE_D3) |
464 | struct acpi_handle_list *cl = NULL; /* Current Resources */ | 488 | return -EINVAL; |
465 | struct acpi_handle_list *tl = NULL; /* Target Resources */ | ||
466 | int i = 0; | ||
467 | 489 | ||
490 | return acpi_power_on_list(&device->power.states[state].resources); | ||
491 | } | ||
492 | |||
493 | int acpi_power_transition(struct acpi_device *device, int state) | ||
494 | { | ||
495 | int result; | ||
468 | 496 | ||
469 | if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) | 497 | if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) |
470 | return -EINVAL; | 498 | return -EINVAL; |
471 | 499 | ||
500 | if (device->power.state == state) | ||
501 | return 0; | ||
502 | |||
472 | if ((device->power.state < ACPI_STATE_D0) | 503 | if ((device->power.state < ACPI_STATE_D0) |
473 | || (device->power.state > ACPI_STATE_D3)) | 504 | || (device->power.state > ACPI_STATE_D3)) |
474 | return -ENODEV; | 505 | return -ENODEV; |
475 | 506 | ||
476 | cl = &device->power.states[device->power.state].resources; | ||
477 | tl = &device->power.states[state].resources; | ||
478 | |||
479 | /* TBD: Resources must be ordered. */ | 507 | /* TBD: Resources must be ordered. */ |
480 | 508 | ||
481 | /* | 509 | /* |
482 | * First we reference all power resources required in the target list | 510 | * First we reference all power resources required in the target list |
483 | * (e.g. so the device doesn't lose power while transitioning). | 511 | * (e.g. so the device doesn't lose power while transitioning). Then, |
512 | * we dereference all power resources used in the current list. | ||
484 | */ | 513 | */ |
485 | for (i = 0; i < tl->count; i++) { | 514 | result = acpi_power_on_list(&device->power.states[state].resources); |
486 | result = acpi_power_on(tl->handles[i]); | 515 | if (!result) |
487 | if (result) | 516 | acpi_power_off_list( |
488 | goto end; | 517 | &device->power.states[device->power.state].resources); |
489 | } | ||
490 | |||
491 | if (device->power.state == state) { | ||
492 | goto end; | ||
493 | } | ||
494 | |||
495 | /* | ||
496 | * Then we dereference all power resources used in the current list. | ||
497 | */ | ||
498 | for (i = 0; i < cl->count; i++) { | ||
499 | result = acpi_power_off_device(cl->handles[i]); | ||
500 | if (result) | ||
501 | goto end; | ||
502 | } | ||
503 | 518 | ||
504 | end: | 519 | /* We shouldn't change the state unless the above operations succeed. */ |
505 | if (result) | 520 | device->power.state = result ? ACPI_STATE_UNKNOWN : state; |
506 | device->power.state = ACPI_STATE_UNKNOWN; | ||
507 | else { | ||
508 | /* We shouldn't change the state till all above operations succeed */ | ||
509 | device->power.state = state; | ||
510 | } | ||
511 | 521 | ||
512 | return result; | 522 | return result; |
513 | } | 523 | } |
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index afad67769db6..f5f986991b52 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c | |||
@@ -311,7 +311,9 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) | |||
311 | dev->pnp.bus_id, | 311 | dev->pnp.bus_id, |
312 | (u32) dev->wakeup.sleep_state, | 312 | (u32) dev->wakeup.sleep_state, |
313 | dev->wakeup.flags.run_wake ? '*' : ' ', | 313 | dev->wakeup.flags.run_wake ? '*' : ' ', |
314 | dev->wakeup.state.enabled ? "enabled" : "disabled"); | 314 | (device_may_wakeup(&dev->dev) |
315 | || (ldev && device_may_wakeup(ldev))) ? | ||
316 | "enabled" : "disabled"); | ||
315 | if (ldev) | 317 | if (ldev) |
316 | seq_printf(seq, "%s:%s", | 318 | seq_printf(seq, "%s:%s", |
317 | ldev->bus ? ldev->bus->name : "no-bus", | 319 | ldev->bus ? ldev->bus->name : "no-bus", |
@@ -328,8 +330,10 @@ static void physical_device_enable_wakeup(struct acpi_device *adev) | |||
328 | { | 330 | { |
329 | struct device *dev = acpi_get_physical_device(adev->handle); | 331 | struct device *dev = acpi_get_physical_device(adev->handle); |
330 | 332 | ||
331 | if (dev && device_can_wakeup(dev)) | 333 | if (dev && device_can_wakeup(dev)) { |
332 | device_set_wakeup_enable(dev, adev->wakeup.state.enabled); | 334 | bool enable = !device_may_wakeup(dev); |
335 | device_set_wakeup_enable(dev, enable); | ||
336 | } | ||
333 | } | 337 | } |
334 | 338 | ||
335 | static ssize_t | 339 | static ssize_t |
@@ -341,7 +345,6 @@ acpi_system_write_wakeup_device(struct file *file, | |||
341 | char strbuf[5]; | 345 | char strbuf[5]; |
342 | char str[5] = ""; | 346 | char str[5] = ""; |
343 | unsigned int len = count; | 347 | unsigned int len = count; |
344 | struct acpi_device *found_dev = NULL; | ||
345 | 348 | ||
346 | if (len > 4) | 349 | if (len > 4) |
347 | len = 4; | 350 | len = 4; |
@@ -361,33 +364,13 @@ acpi_system_write_wakeup_device(struct file *file, | |||
361 | continue; | 364 | continue; |
362 | 365 | ||
363 | if (!strncmp(dev->pnp.bus_id, str, 4)) { | 366 | if (!strncmp(dev->pnp.bus_id, str, 4)) { |
364 | dev->wakeup.state.enabled = | 367 | if (device_can_wakeup(&dev->dev)) { |
365 | dev->wakeup.state.enabled ? 0 : 1; | 368 | bool enable = !device_may_wakeup(&dev->dev); |
366 | found_dev = dev; | 369 | device_set_wakeup_enable(&dev->dev, enable); |
367 | break; | 370 | } else { |
368 | } | ||
369 | } | ||
370 | if (found_dev) { | ||
371 | physical_device_enable_wakeup(found_dev); | ||
372 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { | ||
373 | struct acpi_device *dev = container_of(node, | ||
374 | struct | ||
375 | acpi_device, | ||
376 | wakeup_list); | ||
377 | |||
378 | if ((dev != found_dev) && | ||
379 | (dev->wakeup.gpe_number == | ||
380 | found_dev->wakeup.gpe_number) | ||
381 | && (dev->wakeup.gpe_device == | ||
382 | found_dev->wakeup.gpe_device)) { | ||
383 | printk(KERN_WARNING | ||
384 | "ACPI: '%s' and '%s' have the same GPE, " | ||
385 | "can't disable/enable one separately\n", | ||
386 | dev->pnp.bus_id, found_dev->pnp.bus_id); | ||
387 | dev->wakeup.state.enabled = | ||
388 | found_dev->wakeup.state.enabled; | ||
389 | physical_device_enable_wakeup(dev); | 371 | physical_device_enable_wakeup(dev); |
390 | } | 372 | } |
373 | break; | ||
391 | } | 374 | } |
392 | } | 375 | } |
393 | mutex_unlock(&acpi_device_lock); | 376 | mutex_unlock(&acpi_device_lock); |
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 85e48047d7b0..360a74e6add0 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c | |||
@@ -40,10 +40,6 @@ | |||
40 | #include <linux/pm.h> | 40 | #include <linux/pm.h> |
41 | #include <linux/cpufreq.h> | 41 | #include <linux/cpufreq.h> |
42 | #include <linux/cpu.h> | 42 | #include <linux/cpu.h> |
43 | #ifdef CONFIG_ACPI_PROCFS | ||
44 | #include <linux/proc_fs.h> | ||
45 | #include <linux/seq_file.h> | ||
46 | #endif | ||
47 | #include <linux/dmi.h> | 43 | #include <linux/dmi.h> |
48 | #include <linux/moduleparam.h> | 44 | #include <linux/moduleparam.h> |
49 | #include <linux/cpuidle.h> | 45 | #include <linux/cpuidle.h> |
@@ -246,53 +242,6 @@ static int acpi_processor_errata(struct acpi_processor *pr) | |||
246 | return result; | 242 | return result; |
247 | } | 243 | } |
248 | 244 | ||
249 | #ifdef CONFIG_ACPI_PROCFS | ||
250 | static struct proc_dir_entry *acpi_processor_dir = NULL; | ||
251 | |||
252 | static int __cpuinit acpi_processor_add_fs(struct acpi_device *device) | ||
253 | { | ||
254 | struct proc_dir_entry *entry = NULL; | ||
255 | |||
256 | |||
257 | if (!acpi_device_dir(device)) { | ||
258 | acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), | ||
259 | acpi_processor_dir); | ||
260 | if (!acpi_device_dir(device)) | ||
261 | return -ENODEV; | ||
262 | } | ||
263 | |||
264 | /* 'throttling' [R/W] */ | ||
265 | entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING, | ||
266 | S_IFREG | S_IRUGO | S_IWUSR, | ||
267 | acpi_device_dir(device), | ||
268 | &acpi_processor_throttling_fops, | ||
269 | acpi_driver_data(device)); | ||
270 | if (!entry) | ||
271 | return -EIO; | ||
272 | return 0; | ||
273 | } | ||
274 | static int acpi_processor_remove_fs(struct acpi_device *device) | ||
275 | { | ||
276 | |||
277 | if (acpi_device_dir(device)) { | ||
278 | remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING, | ||
279 | acpi_device_dir(device)); | ||
280 | remove_proc_entry(acpi_device_bid(device), acpi_processor_dir); | ||
281 | acpi_device_dir(device) = NULL; | ||
282 | } | ||
283 | |||
284 | return 0; | ||
285 | } | ||
286 | #else | ||
287 | static inline int acpi_processor_add_fs(struct acpi_device *device) | ||
288 | { | ||
289 | return 0; | ||
290 | } | ||
291 | static inline int acpi_processor_remove_fs(struct acpi_device *device) | ||
292 | { | ||
293 | return 0; | ||
294 | } | ||
295 | #endif | ||
296 | /* -------------------------------------------------------------------------- | 245 | /* -------------------------------------------------------------------------- |
297 | Driver Interface | 246 | Driver Interface |
298 | -------------------------------------------------------------------------- */ | 247 | -------------------------------------------------------------------------- */ |
@@ -478,8 +427,13 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, | |||
478 | if (action == CPU_ONLINE && pr) { | 427 | if (action == CPU_ONLINE && pr) { |
479 | acpi_processor_ppc_has_changed(pr, 0); | 428 | acpi_processor_ppc_has_changed(pr, 0); |
480 | acpi_processor_cst_has_changed(pr); | 429 | acpi_processor_cst_has_changed(pr); |
430 | acpi_processor_reevaluate_tstate(pr, action); | ||
481 | acpi_processor_tstate_has_changed(pr); | 431 | acpi_processor_tstate_has_changed(pr); |
482 | } | 432 | } |
433 | if (action == CPU_DEAD && pr) { | ||
434 | /* invalidate the flag.throttling after one CPU is offline */ | ||
435 | acpi_processor_reevaluate_tstate(pr, action); | ||
436 | } | ||
483 | return NOTIFY_OK; | 437 | return NOTIFY_OK; |
484 | } | 438 | } |
485 | 439 | ||
@@ -537,14 +491,10 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) | |||
537 | 491 | ||
538 | per_cpu(processors, pr->id) = pr; | 492 | per_cpu(processors, pr->id) = pr; |
539 | 493 | ||
540 | result = acpi_processor_add_fs(device); | ||
541 | if (result) | ||
542 | goto err_free_cpumask; | ||
543 | |||
544 | sysdev = get_cpu_sysdev(pr->id); | 494 | sysdev = get_cpu_sysdev(pr->id); |
545 | if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) { | 495 | if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) { |
546 | result = -EFAULT; | 496 | result = -EFAULT; |
547 | goto err_remove_fs; | 497 | goto err_free_cpumask; |
548 | } | 498 | } |
549 | 499 | ||
550 | #ifdef CONFIG_CPU_FREQ | 500 | #ifdef CONFIG_CPU_FREQ |
@@ -590,8 +540,6 @@ err_thermal_unregister: | |||
590 | thermal_cooling_device_unregister(pr->cdev); | 540 | thermal_cooling_device_unregister(pr->cdev); |
591 | err_power_exit: | 541 | err_power_exit: |
592 | acpi_processor_power_exit(pr, device); | 542 | acpi_processor_power_exit(pr, device); |
593 | err_remove_fs: | ||
594 | acpi_processor_remove_fs(device); | ||
595 | err_free_cpumask: | 543 | err_free_cpumask: |
596 | free_cpumask_var(pr->throttling.shared_cpu_map); | 544 | free_cpumask_var(pr->throttling.shared_cpu_map); |
597 | 545 | ||
@@ -620,8 +568,6 @@ static int acpi_processor_remove(struct acpi_device *device, int type) | |||
620 | 568 | ||
621 | sysfs_remove_link(&device->dev.kobj, "sysdev"); | 569 | sysfs_remove_link(&device->dev.kobj, "sysdev"); |
622 | 570 | ||
623 | acpi_processor_remove_fs(device); | ||
624 | |||
625 | if (pr->cdev) { | 571 | if (pr->cdev) { |
626 | sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); | 572 | sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); |
627 | sysfs_remove_link(&pr->cdev->device.kobj, "device"); | 573 | sysfs_remove_link(&pr->cdev->device.kobj, "device"); |
@@ -854,12 +800,6 @@ static int __init acpi_processor_init(void) | |||
854 | 800 | ||
855 | memset(&errata, 0, sizeof(errata)); | 801 | memset(&errata, 0, sizeof(errata)); |
856 | 802 | ||
857 | #ifdef CONFIG_ACPI_PROCFS | ||
858 | acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir); | ||
859 | if (!acpi_processor_dir) | ||
860 | return -ENOMEM; | ||
861 | #endif | ||
862 | |||
863 | if (!cpuidle_register_driver(&acpi_idle_driver)) { | 803 | if (!cpuidle_register_driver(&acpi_idle_driver)) { |
864 | printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", | 804 | printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", |
865 | acpi_idle_driver.name); | 805 | acpi_idle_driver.name); |
@@ -885,10 +825,6 @@ static int __init acpi_processor_init(void) | |||
885 | out_cpuidle: | 825 | out_cpuidle: |
886 | cpuidle_unregister_driver(&acpi_idle_driver); | 826 | cpuidle_unregister_driver(&acpi_idle_driver); |
887 | 827 | ||
888 | #ifdef CONFIG_ACPI_PROCFS | ||
889 | remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); | ||
890 | #endif | ||
891 | |||
892 | return result; | 828 | return result; |
893 | } | 829 | } |
894 | 830 | ||
@@ -907,10 +843,6 @@ static void __exit acpi_processor_exit(void) | |||
907 | 843 | ||
908 | cpuidle_unregister_driver(&acpi_idle_driver); | 844 | cpuidle_unregister_driver(&acpi_idle_driver); |
909 | 845 | ||
910 | #ifdef CONFIG_ACPI_PROCFS | ||
911 | remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); | ||
912 | #endif | ||
913 | |||
914 | return; | 846 | return; |
915 | } | 847 | } |
916 | 848 | ||
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index fde49b9b1d99..79cb65332894 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c | |||
@@ -156,15 +156,6 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state) | |||
156 | return 0; | 156 | return 0; |
157 | } | 157 | } |
158 | 158 | ||
159 | static int acpi_thermal_cpufreq_increase(unsigned int cpu) | ||
160 | { | ||
161 | return -ENODEV; | ||
162 | } | ||
163 | static int acpi_thermal_cpufreq_decrease(unsigned int cpu) | ||
164 | { | ||
165 | return -ENODEV; | ||
166 | } | ||
167 | |||
168 | #endif | 159 | #endif |
169 | 160 | ||
170 | int acpi_processor_get_limit_info(struct acpi_processor *pr) | 161 | int acpi_processor_get_limit_info(struct acpi_processor *pr) |
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index ff3632717c51..fa84e9744330 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
@@ -32,10 +32,6 @@ | |||
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/sched.h> | 33 | #include <linux/sched.h> |
34 | #include <linux/cpufreq.h> | 34 | #include <linux/cpufreq.h> |
35 | #ifdef CONFIG_ACPI_PROCFS | ||
36 | #include <linux/proc_fs.h> | ||
37 | #include <linux/seq_file.h> | ||
38 | #endif | ||
39 | 35 | ||
40 | #include <asm/io.h> | 36 | #include <asm/io.h> |
41 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
@@ -370,6 +366,58 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr) | |||
370 | } | 366 | } |
371 | 367 | ||
372 | /* | 368 | /* |
369 | * This function is used to reevaluate whether the T-state is valid | ||
370 | * after one CPU is onlined/offlined. | ||
371 | * It is noted that it won't reevaluate the following properties for | ||
372 | * the T-state. | ||
373 | * 1. Control method. | ||
374 | * 2. the number of supported T-state | ||
375 | * 3. TSD domain | ||
376 | */ | ||
377 | void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, | ||
378 | unsigned long action) | ||
379 | { | ||
380 | int result = 0; | ||
381 | |||
382 | if (action == CPU_DEAD) { | ||
383 | /* When one CPU is offline, the T-state throttling | ||
384 | * will be invalidated. | ||
385 | */ | ||
386 | pr->flags.throttling = 0; | ||
387 | return; | ||
388 | } | ||
389 | /* the following is to recheck whether the T-state is valid for | ||
390 | * the online CPU | ||
391 | */ | ||
392 | if (!pr->throttling.state_count) { | ||
393 | /* If the number of T-state is invalid, it is | ||
394 | * invalidated. | ||
395 | */ | ||
396 | pr->flags.throttling = 0; | ||
397 | return; | ||
398 | } | ||
399 | pr->flags.throttling = 1; | ||
400 | |||
401 | /* Disable throttling (if enabled). We'll let subsequent | ||
402 | * policy (e.g.thermal) decide to lower performance if it | ||
403 | * so chooses, but for now we'll crank up the speed. | ||
404 | */ | ||
405 | |||
406 | result = acpi_processor_get_throttling(pr); | ||
407 | if (result) | ||
408 | goto end; | ||
409 | |||
410 | if (pr->throttling.state) { | ||
411 | result = acpi_processor_set_throttling(pr, 0, false); | ||
412 | if (result) | ||
413 | goto end; | ||
414 | } | ||
415 | |||
416 | end: | ||
417 | if (result) | ||
418 | pr->flags.throttling = 0; | ||
419 | } | ||
420 | /* | ||
373 | * _PTC - Processor Throttling Control (and status) register location | 421 | * _PTC - Processor Throttling Control (and status) register location |
374 | */ | 422 | */ |
375 | static int acpi_processor_get_throttling_control(struct acpi_processor *pr) | 423 | static int acpi_processor_get_throttling_control(struct acpi_processor *pr) |
@@ -876,7 +924,11 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr) | |||
876 | */ | 924 | */ |
877 | cpumask_copy(saved_mask, ¤t->cpus_allowed); | 925 | cpumask_copy(saved_mask, ¤t->cpus_allowed); |
878 | /* FIXME: use work_on_cpu() */ | 926 | /* FIXME: use work_on_cpu() */ |
879 | set_cpus_allowed_ptr(current, cpumask_of(pr->id)); | 927 | if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { |
928 | /* Can't migrate to the target pr->id CPU. Exit */ | ||
929 | free_cpumask_var(saved_mask); | ||
930 | return -ENODEV; | ||
931 | } | ||
880 | ret = pr->throttling.acpi_processor_get_throttling(pr); | 932 | ret = pr->throttling.acpi_processor_get_throttling(pr); |
881 | /* restore the previous state */ | 933 | /* restore the previous state */ |
882 | set_cpus_allowed_ptr(current, saved_mask); | 934 | set_cpus_allowed_ptr(current, saved_mask); |
@@ -1051,6 +1103,14 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, | |||
1051 | return -ENOMEM; | 1103 | return -ENOMEM; |
1052 | } | 1104 | } |
1053 | 1105 | ||
1106 | if (cpu_is_offline(pr->id)) { | ||
1107 | /* | ||
1108 | * the cpu pointed by pr->id is offline. Unnecessary to change | ||
1109 | * the throttling state any more. | ||
1110 | */ | ||
1111 | return -ENODEV; | ||
1112 | } | ||
1113 | |||
1054 | cpumask_copy(saved_mask, ¤t->cpus_allowed); | 1114 | cpumask_copy(saved_mask, ¤t->cpus_allowed); |
1055 | t_state.target_state = state; | 1115 | t_state.target_state = state; |
1056 | p_throttling = &(pr->throttling); | 1116 | p_throttling = &(pr->throttling); |
@@ -1074,7 +1134,11 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, | |||
1074 | */ | 1134 | */ |
1075 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { | 1135 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { |
1076 | /* FIXME: use work_on_cpu() */ | 1136 | /* FIXME: use work_on_cpu() */ |
1077 | set_cpus_allowed_ptr(current, cpumask_of(pr->id)); | 1137 | if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { |
1138 | /* Can't migrate to the pr->id CPU. Exit */ | ||
1139 | ret = -ENODEV; | ||
1140 | goto exit; | ||
1141 | } | ||
1078 | ret = p_throttling->acpi_processor_set_throttling(pr, | 1142 | ret = p_throttling->acpi_processor_set_throttling(pr, |
1079 | t_state.target_state, force); | 1143 | t_state.target_state, force); |
1080 | } else { | 1144 | } else { |
@@ -1106,7 +1170,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, | |||
1106 | } | 1170 | } |
1107 | t_state.cpu = i; | 1171 | t_state.cpu = i; |
1108 | /* FIXME: use work_on_cpu() */ | 1172 | /* FIXME: use work_on_cpu() */ |
1109 | set_cpus_allowed_ptr(current, cpumask_of(i)); | 1173 | if (set_cpus_allowed_ptr(current, cpumask_of(i))) |
1174 | continue; | ||
1110 | ret = match_pr->throttling. | 1175 | ret = match_pr->throttling. |
1111 | acpi_processor_set_throttling( | 1176 | acpi_processor_set_throttling( |
1112 | match_pr, t_state.target_state, force); | 1177 | match_pr, t_state.target_state, force); |
@@ -1126,6 +1191,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, | |||
1126 | /* restore the previous state */ | 1191 | /* restore the previous state */ |
1127 | /* FIXME: use work_on_cpu() */ | 1192 | /* FIXME: use work_on_cpu() */ |
1128 | set_cpus_allowed_ptr(current, saved_mask); | 1193 | set_cpus_allowed_ptr(current, saved_mask); |
1194 | exit: | ||
1129 | free_cpumask_var(online_throttling_cpus); | 1195 | free_cpumask_var(online_throttling_cpus); |
1130 | free_cpumask_var(saved_mask); | 1196 | free_cpumask_var(saved_mask); |
1131 | return ret; | 1197 | return ret; |
@@ -1216,113 +1282,3 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) | |||
1216 | return result; | 1282 | return result; |
1217 | } | 1283 | } |
1218 | 1284 | ||
1219 | #ifdef CONFIG_ACPI_PROCFS | ||
1220 | /* proc interface */ | ||
1221 | static int acpi_processor_throttling_seq_show(struct seq_file *seq, | ||
1222 | void *offset) | ||
1223 | { | ||
1224 | struct acpi_processor *pr = seq->private; | ||
1225 | int i = 0; | ||
1226 | int result = 0; | ||
1227 | |||
1228 | if (!pr) | ||
1229 | goto end; | ||
1230 | |||
1231 | if (!(pr->throttling.state_count > 0)) { | ||
1232 | seq_puts(seq, "<not supported>\n"); | ||
1233 | goto end; | ||
1234 | } | ||
1235 | |||
1236 | result = acpi_processor_get_throttling(pr); | ||
1237 | |||
1238 | if (result) { | ||
1239 | seq_puts(seq, | ||
1240 | "Could not determine current throttling state.\n"); | ||
1241 | goto end; | ||
1242 | } | ||
1243 | |||
1244 | seq_printf(seq, "state count: %d\n" | ||
1245 | "active state: T%d\n" | ||
1246 | "state available: T%d to T%d\n", | ||
1247 | pr->throttling.state_count, pr->throttling.state, | ||
1248 | pr->throttling_platform_limit, | ||
1249 | pr->throttling.state_count - 1); | ||
1250 | |||
1251 | seq_puts(seq, "states:\n"); | ||
1252 | if (pr->throttling.acpi_processor_get_throttling == | ||
1253 | acpi_processor_get_throttling_fadt) { | ||
1254 | for (i = 0; i < pr->throttling.state_count; i++) | ||
1255 | seq_printf(seq, " %cT%d: %02d%%\n", | ||
1256 | (i == pr->throttling.state ? '*' : ' '), i, | ||
1257 | (pr->throttling.states[i].performance ? pr-> | ||
1258 | throttling.states[i].performance / 10 : 0)); | ||
1259 | } else { | ||
1260 | for (i = 0; i < pr->throttling.state_count; i++) | ||
1261 | seq_printf(seq, " %cT%d: %02d%%\n", | ||
1262 | (i == pr->throttling.state ? '*' : ' '), i, | ||
1263 | (int)pr->throttling.states_tss[i]. | ||
1264 | freqpercentage); | ||
1265 | } | ||
1266 | |||
1267 | end: | ||
1268 | return 0; | ||
1269 | } | ||
1270 | |||
1271 | static int acpi_processor_throttling_open_fs(struct inode *inode, | ||
1272 | struct file *file) | ||
1273 | { | ||
1274 | return single_open(file, acpi_processor_throttling_seq_show, | ||
1275 | PDE(inode)->data); | ||
1276 | } | ||
1277 | |||
1278 | static ssize_t acpi_processor_write_throttling(struct file *file, | ||
1279 | const char __user * buffer, | ||
1280 | size_t count, loff_t * data) | ||
1281 | { | ||
1282 | int result = 0; | ||
1283 | struct seq_file *m = file->private_data; | ||
1284 | struct acpi_processor *pr = m->private; | ||
1285 | char state_string[5] = ""; | ||
1286 | char *charp = NULL; | ||
1287 | size_t state_val = 0; | ||
1288 | char tmpbuf[5] = ""; | ||
1289 | |||
1290 | if (!pr || (count > sizeof(state_string) - 1)) | ||
1291 | return -EINVAL; | ||
1292 | |||
1293 | if (copy_from_user(state_string, buffer, count)) | ||
1294 | return -EFAULT; | ||
1295 | |||
1296 | state_string[count] = '\0'; | ||
1297 | if ((count > 0) && (state_string[count-1] == '\n')) | ||
1298 | state_string[count-1] = '\0'; | ||
1299 | |||
1300 | charp = state_string; | ||
1301 | if ((state_string[0] == 't') || (state_string[0] == 'T')) | ||
1302 | charp++; | ||
1303 | |||
1304 | state_val = simple_strtoul(charp, NULL, 0); | ||
1305 | if (state_val >= pr->throttling.state_count) | ||
1306 | return -EINVAL; | ||
1307 | |||
1308 | snprintf(tmpbuf, 5, "%zu", state_val); | ||
1309 | |||
1310 | if (strcmp(tmpbuf, charp) != 0) | ||
1311 | return -EINVAL; | ||
1312 | |||
1313 | result = acpi_processor_set_throttling(pr, state_val, false); | ||
1314 | if (result) | ||
1315 | return result; | ||
1316 | |||
1317 | return count; | ||
1318 | } | ||
1319 | |||
1320 | const struct file_operations acpi_processor_throttling_fops = { | ||
1321 | .owner = THIS_MODULE, | ||
1322 | .open = acpi_processor_throttling_open_fs, | ||
1323 | .read = seq_read, | ||
1324 | .write = acpi_processor_write_throttling, | ||
1325 | .llseek = seq_lseek, | ||
1326 | .release = single_release, | ||
1327 | }; | ||
1328 | #endif | ||
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index e5dbedb16bbf..51ae3794ec7f 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c | |||
@@ -484,6 +484,8 @@ acpi_sbs_add_fs(struct proc_dir_entry **dir, | |||
484 | const struct file_operations *state_fops, | 484 | const struct file_operations *state_fops, |
485 | const struct file_operations *alarm_fops, void *data) | 485 | const struct file_operations *alarm_fops, void *data) |
486 | { | 486 | { |
487 | printk(KERN_WARNING PREFIX "Deprecated procfs I/F for SBS is loaded," | ||
488 | " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); | ||
487 | if (!*dir) { | 489 | if (!*dir) { |
488 | *dir = proc_mkdir(dir_name, parent_dir); | 490 | *dir = proc_mkdir(dir_name, parent_dir); |
489 | if (!*dir) { | 491 | if (!*dir) { |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 2b6c21d86b98..64d4da0d6d52 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -705,54 +705,85 @@ static int acpi_bus_get_perf_flags(struct acpi_device *device) | |||
705 | } | 705 | } |
706 | 706 | ||
707 | static acpi_status | 707 | static acpi_status |
708 | acpi_bus_extract_wakeup_device_power_package(struct acpi_device *device, | 708 | acpi_bus_extract_wakeup_device_power_package(acpi_handle handle, |
709 | union acpi_object *package) | 709 | struct acpi_device_wakeup *wakeup) |
710 | { | 710 | { |
711 | int i = 0; | 711 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
712 | union acpi_object *package = NULL; | ||
712 | union acpi_object *element = NULL; | 713 | union acpi_object *element = NULL; |
714 | acpi_status status; | ||
715 | int i = 0; | ||
713 | 716 | ||
714 | if (!device || !package || (package->package.count < 2)) | 717 | if (!wakeup) |
715 | return AE_BAD_PARAMETER; | 718 | return AE_BAD_PARAMETER; |
716 | 719 | ||
720 | /* _PRW */ | ||
721 | status = acpi_evaluate_object(handle, "_PRW", NULL, &buffer); | ||
722 | if (ACPI_FAILURE(status)) { | ||
723 | ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW")); | ||
724 | return status; | ||
725 | } | ||
726 | |||
727 | package = (union acpi_object *)buffer.pointer; | ||
728 | |||
729 | if (!package || (package->package.count < 2)) { | ||
730 | status = AE_BAD_DATA; | ||
731 | goto out; | ||
732 | } | ||
733 | |||
717 | element = &(package->package.elements[0]); | 734 | element = &(package->package.elements[0]); |
718 | if (!element) | 735 | if (!element) { |
719 | return AE_BAD_PARAMETER; | 736 | status = AE_BAD_DATA; |
737 | goto out; | ||
738 | } | ||
720 | if (element->type == ACPI_TYPE_PACKAGE) { | 739 | if (element->type == ACPI_TYPE_PACKAGE) { |
721 | if ((element->package.count < 2) || | 740 | if ((element->package.count < 2) || |
722 | (element->package.elements[0].type != | 741 | (element->package.elements[0].type != |
723 | ACPI_TYPE_LOCAL_REFERENCE) | 742 | ACPI_TYPE_LOCAL_REFERENCE) |
724 | || (element->package.elements[1].type != ACPI_TYPE_INTEGER)) | 743 | || (element->package.elements[1].type != ACPI_TYPE_INTEGER)) { |
725 | return AE_BAD_DATA; | 744 | status = AE_BAD_DATA; |
726 | device->wakeup.gpe_device = | 745 | goto out; |
746 | } | ||
747 | wakeup->gpe_device = | ||
727 | element->package.elements[0].reference.handle; | 748 | element->package.elements[0].reference.handle; |
728 | device->wakeup.gpe_number = | 749 | wakeup->gpe_number = |
729 | (u32) element->package.elements[1].integer.value; | 750 | (u32) element->package.elements[1].integer.value; |
730 | } else if (element->type == ACPI_TYPE_INTEGER) { | 751 | } else if (element->type == ACPI_TYPE_INTEGER) { |
731 | device->wakeup.gpe_number = element->integer.value; | 752 | wakeup->gpe_device = NULL; |
732 | } else | 753 | wakeup->gpe_number = element->integer.value; |
733 | return AE_BAD_DATA; | 754 | } else { |
755 | status = AE_BAD_DATA; | ||
756 | goto out; | ||
757 | } | ||
734 | 758 | ||
735 | element = &(package->package.elements[1]); | 759 | element = &(package->package.elements[1]); |
736 | if (element->type != ACPI_TYPE_INTEGER) { | 760 | if (element->type != ACPI_TYPE_INTEGER) { |
737 | return AE_BAD_DATA; | 761 | status = AE_BAD_DATA; |
762 | goto out; | ||
738 | } | 763 | } |
739 | device->wakeup.sleep_state = element->integer.value; | 764 | wakeup->sleep_state = element->integer.value; |
740 | 765 | ||
741 | if ((package->package.count - 2) > ACPI_MAX_HANDLES) { | 766 | if ((package->package.count - 2) > ACPI_MAX_HANDLES) { |
742 | return AE_NO_MEMORY; | 767 | status = AE_NO_MEMORY; |
768 | goto out; | ||
743 | } | 769 | } |
744 | device->wakeup.resources.count = package->package.count - 2; | 770 | wakeup->resources.count = package->package.count - 2; |
745 | for (i = 0; i < device->wakeup.resources.count; i++) { | 771 | for (i = 0; i < wakeup->resources.count; i++) { |
746 | element = &(package->package.elements[i + 2]); | 772 | element = &(package->package.elements[i + 2]); |
747 | if (element->type != ACPI_TYPE_LOCAL_REFERENCE) | 773 | if (element->type != ACPI_TYPE_LOCAL_REFERENCE) { |
748 | return AE_BAD_DATA; | 774 | status = AE_BAD_DATA; |
775 | goto out; | ||
776 | } | ||
749 | 777 | ||
750 | device->wakeup.resources.handles[i] = element->reference.handle; | 778 | wakeup->resources.handles[i] = element->reference.handle; |
751 | } | 779 | } |
752 | 780 | ||
753 | acpi_gpe_can_wake(device->wakeup.gpe_device, device->wakeup.gpe_number); | 781 | acpi_setup_gpe_for_wake(handle, wakeup->gpe_device, wakeup->gpe_number); |
754 | 782 | ||
755 | return AE_OK; | 783 | out: |
784 | kfree(buffer.pointer); | ||
785 | |||
786 | return status; | ||
756 | } | 787 | } |
757 | 788 | ||
758 | static void acpi_bus_set_run_wake_flags(struct acpi_device *device) | 789 | static void acpi_bus_set_run_wake_flags(struct acpi_device *device) |
@@ -772,7 +803,7 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device) | |||
772 | /* Power button, Lid switch always enable wakeup */ | 803 | /* Power button, Lid switch always enable wakeup */ |
773 | if (!acpi_match_device_ids(device, button_device_ids)) { | 804 | if (!acpi_match_device_ids(device, button_device_ids)) { |
774 | device->wakeup.flags.run_wake = 1; | 805 | device->wakeup.flags.run_wake = 1; |
775 | device->wakeup.flags.always_enabled = 1; | 806 | device_set_wakeup_capable(&device->dev, true); |
776 | return; | 807 | return; |
777 | } | 808 | } |
778 | 809 | ||
@@ -787,26 +818,15 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device) | |||
787 | static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) | 818 | static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) |
788 | { | 819 | { |
789 | acpi_status status = 0; | 820 | acpi_status status = 0; |
790 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
791 | union acpi_object *package = NULL; | ||
792 | int psw_error; | 821 | int psw_error; |
793 | 822 | ||
794 | /* _PRW */ | 823 | status = acpi_bus_extract_wakeup_device_power_package(device->handle, |
795 | status = acpi_evaluate_object(device->handle, "_PRW", NULL, &buffer); | 824 | &device->wakeup); |
796 | if (ACPI_FAILURE(status)) { | ||
797 | ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW")); | ||
798 | goto end; | ||
799 | } | ||
800 | |||
801 | package = (union acpi_object *)buffer.pointer; | ||
802 | status = acpi_bus_extract_wakeup_device_power_package(device, package); | ||
803 | if (ACPI_FAILURE(status)) { | 825 | if (ACPI_FAILURE(status)) { |
804 | ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package")); | 826 | ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package")); |
805 | goto end; | 827 | goto end; |
806 | } | 828 | } |
807 | 829 | ||
808 | kfree(buffer.pointer); | ||
809 | |||
810 | device->wakeup.flags.valid = 1; | 830 | device->wakeup.flags.valid = 1; |
811 | device->wakeup.prepare_count = 0; | 831 | device->wakeup.prepare_count = 0; |
812 | acpi_bus_set_run_wake_flags(device); | 832 | acpi_bus_set_run_wake_flags(device); |
@@ -827,6 +847,8 @@ end: | |||
827 | return 0; | 847 | return 0; |
828 | } | 848 | } |
829 | 849 | ||
850 | static void acpi_bus_add_power_resource(acpi_handle handle); | ||
851 | |||
830 | static int acpi_bus_get_power_flags(struct acpi_device *device) | 852 | static int acpi_bus_get_power_flags(struct acpi_device *device) |
831 | { | 853 | { |
832 | acpi_status status = 0; | 854 | acpi_status status = 0; |
@@ -855,8 +877,12 @@ static int acpi_bus_get_power_flags(struct acpi_device *device) | |||
855 | acpi_evaluate_reference(device->handle, object_name, NULL, | 877 | acpi_evaluate_reference(device->handle, object_name, NULL, |
856 | &ps->resources); | 878 | &ps->resources); |
857 | if (ps->resources.count) { | 879 | if (ps->resources.count) { |
880 | int j; | ||
881 | |||
858 | device->power.flags.power_resources = 1; | 882 | device->power.flags.power_resources = 1; |
859 | ps->flags.valid = 1; | 883 | ps->flags.valid = 1; |
884 | for (j = 0; j < ps->resources.count; j++) | ||
885 | acpi_bus_add_power_resource(ps->resources.handles[j]); | ||
860 | } | 886 | } |
861 | 887 | ||
862 | /* Evaluate "_PSx" to see if we can do explicit sets */ | 888 | /* Evaluate "_PSx" to see if we can do explicit sets */ |
@@ -881,10 +907,7 @@ static int acpi_bus_get_power_flags(struct acpi_device *device) | |||
881 | device->power.states[ACPI_STATE_D3].flags.valid = 1; | 907 | device->power.states[ACPI_STATE_D3].flags.valid = 1; |
882 | device->power.states[ACPI_STATE_D3].power = 0; | 908 | device->power.states[ACPI_STATE_D3].power = 0; |
883 | 909 | ||
884 | /* TBD: System wake support and resource requirements. */ | 910 | acpi_bus_init_power(device); |
885 | |||
886 | device->power.state = ACPI_STATE_UNKNOWN; | ||
887 | acpi_bus_get_power(device->handle, &(device->power.state)); | ||
888 | 911 | ||
889 | return 0; | 912 | return 0; |
890 | } | 913 | } |
@@ -1306,6 +1329,20 @@ end: | |||
1306 | #define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \ | 1329 | #define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \ |
1307 | ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING) | 1330 | ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING) |
1308 | 1331 | ||
1332 | static void acpi_bus_add_power_resource(acpi_handle handle) | ||
1333 | { | ||
1334 | struct acpi_bus_ops ops = { | ||
1335 | .acpi_op_add = 1, | ||
1336 | .acpi_op_start = 1, | ||
1337 | }; | ||
1338 | struct acpi_device *device = NULL; | ||
1339 | |||
1340 | acpi_bus_get_device(handle, &device); | ||
1341 | if (!device) | ||
1342 | acpi_add_single_object(&device, handle, ACPI_BUS_TYPE_POWER, | ||
1343 | ACPI_STA_DEFAULT, &ops); | ||
1344 | } | ||
1345 | |||
1309 | static int acpi_bus_type_and_status(acpi_handle handle, int *type, | 1346 | static int acpi_bus_type_and_status(acpi_handle handle, int *type, |
1310 | unsigned long long *sta) | 1347 | unsigned long long *sta) |
1311 | { | 1348 | { |
@@ -1351,6 +1388,7 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl, | |||
1351 | struct acpi_bus_ops *ops = context; | 1388 | struct acpi_bus_ops *ops = context; |
1352 | int type; | 1389 | int type; |
1353 | unsigned long long sta; | 1390 | unsigned long long sta; |
1391 | struct acpi_device_wakeup wakeup; | ||
1354 | struct acpi_device *device; | 1392 | struct acpi_device *device; |
1355 | acpi_status status; | 1393 | acpi_status status; |
1356 | int result; | 1394 | int result; |
@@ -1360,8 +1398,10 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl, | |||
1360 | return AE_OK; | 1398 | return AE_OK; |
1361 | 1399 | ||
1362 | if (!(sta & ACPI_STA_DEVICE_PRESENT) && | 1400 | if (!(sta & ACPI_STA_DEVICE_PRESENT) && |
1363 | !(sta & ACPI_STA_DEVICE_FUNCTIONING)) | 1401 | !(sta & ACPI_STA_DEVICE_FUNCTIONING)) { |
1402 | acpi_bus_extract_wakeup_device_power_package(handle, &wakeup); | ||
1364 | return AE_CTRL_DEPTH; | 1403 | return AE_CTRL_DEPTH; |
1404 | } | ||
1365 | 1405 | ||
1366 | /* | 1406 | /* |
1367 | * We may already have an acpi_device from a previous enumeration. If | 1407 | * We may already have an acpi_device from a previous enumeration. If |
@@ -1444,7 +1484,7 @@ int acpi_bus_start(struct acpi_device *device) | |||
1444 | 1484 | ||
1445 | result = acpi_bus_scan(device->handle, &ops, NULL); | 1485 | result = acpi_bus_scan(device->handle, &ops, NULL); |
1446 | 1486 | ||
1447 | acpi_update_gpes(); | 1487 | acpi_update_all_gpes(); |
1448 | 1488 | ||
1449 | return result; | 1489 | return result; |
1450 | } | 1490 | } |
@@ -1550,6 +1590,8 @@ int __init acpi_scan_init(void) | |||
1550 | printk(KERN_ERR PREFIX "Could not register bus type\n"); | 1590 | printk(KERN_ERR PREFIX "Could not register bus type\n"); |
1551 | } | 1591 | } |
1552 | 1592 | ||
1593 | acpi_power_init(); | ||
1594 | |||
1553 | /* | 1595 | /* |
1554 | * Enumerate devices in the ACPI namespace. | 1596 | * Enumerate devices in the ACPI namespace. |
1555 | */ | 1597 | */ |
@@ -1561,7 +1603,7 @@ int __init acpi_scan_init(void) | |||
1561 | if (result) | 1603 | if (result) |
1562 | acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL); | 1604 | acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL); |
1563 | else | 1605 | else |
1564 | acpi_update_gpes(); | 1606 | acpi_update_all_gpes(); |
1565 | 1607 | ||
1566 | return result; | 1608 | return result; |
1567 | } | 1609 | } |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 721d93b3ceee..75c232084740 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -27,8 +27,6 @@ | |||
27 | 27 | ||
28 | static u8 sleep_states[ACPI_S_STATE_COUNT]; | 28 | static u8 sleep_states[ACPI_S_STATE_COUNT]; |
29 | 29 | ||
30 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; | ||
31 | |||
32 | static void acpi_sleep_tts_switch(u32 acpi_state) | 30 | static void acpi_sleep_tts_switch(u32 acpi_state) |
33 | { | 31 | { |
34 | union acpi_object in_arg = { ACPI_TYPE_INTEGER }; | 32 | union acpi_object in_arg = { ACPI_TYPE_INTEGER }; |
@@ -81,6 +79,8 @@ static int acpi_sleep_prepare(u32 acpi_state) | |||
81 | } | 79 | } |
82 | 80 | ||
83 | #ifdef CONFIG_ACPI_SLEEP | 81 | #ifdef CONFIG_ACPI_SLEEP |
82 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; | ||
83 | |||
84 | /* | 84 | /* |
85 | * The ACPI specification wants us to save NVS memory regions during hibernation | 85 | * The ACPI specification wants us to save NVS memory regions during hibernation |
86 | * and to restore them during the subsequent resume. Windows does that also for | 86 | * and to restore them during the subsequent resume. Windows does that also for |
@@ -124,8 +124,7 @@ static int acpi_pm_freeze(void) | |||
124 | static int acpi_pm_pre_suspend(void) | 124 | static int acpi_pm_pre_suspend(void) |
125 | { | 125 | { |
126 | acpi_pm_freeze(); | 126 | acpi_pm_freeze(); |
127 | suspend_nvs_save(); | 127 | return suspend_nvs_save(); |
128 | return 0; | ||
129 | } | 128 | } |
130 | 129 | ||
131 | /** | 130 | /** |
@@ -151,7 +150,7 @@ static int acpi_pm_prepare(void) | |||
151 | { | 150 | { |
152 | int error = __acpi_pm_prepare(); | 151 | int error = __acpi_pm_prepare(); |
153 | if (!error) | 152 | if (!error) |
154 | acpi_pm_pre_suspend(); | 153 | error = acpi_pm_pre_suspend(); |
155 | 154 | ||
156 | return error; | 155 | return error; |
157 | } | 156 | } |
@@ -427,6 +426,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | |||
427 | DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"), | 426 | DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"), |
428 | }, | 427 | }, |
429 | }, | 428 | }, |
429 | { | ||
430 | .callback = init_nvs_nosave, | ||
431 | .ident = "Sony Vaio VGN-NW130D", | ||
432 | .matches = { | ||
433 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
434 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), | ||
435 | }, | ||
436 | }, | ||
437 | { | ||
438 | .callback = init_nvs_nosave, | ||
439 | .ident = "Averatec AV1020-ED2", | ||
440 | .matches = { | ||
441 | DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), | ||
442 | DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), | ||
443 | }, | ||
444 | }, | ||
430 | {}, | 445 | {}, |
431 | }; | 446 | }; |
432 | #endif /* CONFIG_SUSPEND */ | 447 | #endif /* CONFIG_SUSPEND */ |
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index f8588f81048a..61891e75583d 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c | |||
@@ -438,7 +438,7 @@ static void delete_gpe_attr_array(void) | |||
438 | return; | 438 | return; |
439 | } | 439 | } |
440 | 440 | ||
441 | void acpi_os_gpe_count(u32 gpe_number) | 441 | static void gpe_count(u32 gpe_number) |
442 | { | 442 | { |
443 | acpi_gpe_count++; | 443 | acpi_gpe_count++; |
444 | 444 | ||
@@ -454,7 +454,7 @@ void acpi_os_gpe_count(u32 gpe_number) | |||
454 | return; | 454 | return; |
455 | } | 455 | } |
456 | 456 | ||
457 | void acpi_os_fixed_event_count(u32 event_number) | 457 | static void fixed_event_count(u32 event_number) |
458 | { | 458 | { |
459 | if (!all_counters) | 459 | if (!all_counters) |
460 | return; | 460 | return; |
@@ -468,6 +468,16 @@ void acpi_os_fixed_event_count(u32 event_number) | |||
468 | return; | 468 | return; |
469 | } | 469 | } |
470 | 470 | ||
471 | static void acpi_gbl_event_handler(u32 event_type, acpi_handle device, | ||
472 | u32 event_number, void *context) | ||
473 | { | ||
474 | if (event_type == ACPI_EVENT_TYPE_GPE) | ||
475 | gpe_count(event_number); | ||
476 | |||
477 | if (event_type == ACPI_EVENT_TYPE_FIXED) | ||
478 | fixed_event_count(event_number); | ||
479 | } | ||
480 | |||
471 | static int get_status(u32 index, acpi_event_status *status, | 481 | static int get_status(u32 index, acpi_event_status *status, |
472 | acpi_handle *handle) | 482 | acpi_handle *handle) |
473 | { | 483 | { |
@@ -601,6 +611,7 @@ end: | |||
601 | 611 | ||
602 | void acpi_irq_stats_init(void) | 612 | void acpi_irq_stats_init(void) |
603 | { | 613 | { |
614 | acpi_status status; | ||
604 | int i; | 615 | int i; |
605 | 616 | ||
606 | if (all_counters) | 617 | if (all_counters) |
@@ -619,6 +630,10 @@ void acpi_irq_stats_init(void) | |||
619 | if (all_counters == NULL) | 630 | if (all_counters == NULL) |
620 | goto fail; | 631 | goto fail; |
621 | 632 | ||
633 | status = acpi_install_global_event_handler(acpi_gbl_event_handler, NULL); | ||
634 | if (ACPI_FAILURE(status)) | ||
635 | goto fail; | ||
636 | |||
622 | counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters), | 637 | counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters), |
623 | GFP_KERNEL); | 638 | GFP_KERNEL); |
624 | if (counter_attrs == NULL) | 639 | if (counter_attrs == NULL) |
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 5a27b0a31315..2607e17b520f 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c | |||
@@ -1059,8 +1059,9 @@ static int acpi_thermal_resume(struct acpi_device *device) | |||
1059 | break; | 1059 | break; |
1060 | tz->trips.active[i].flags.enabled = 1; | 1060 | tz->trips.active[i].flags.enabled = 1; |
1061 | for (j = 0; j < tz->trips.active[i].devices.count; j++) { | 1061 | for (j = 0; j < tz->trips.active[i].devices.count; j++) { |
1062 | result = acpi_bus_get_power(tz->trips.active[i].devices. | 1062 | result = acpi_bus_update_power( |
1063 | handles[j], &power_state); | 1063 | tz->trips.active[i].devices.handles[j], |
1064 | &power_state); | ||
1064 | if (result || (power_state != ACPI_STATE_D0)) { | 1065 | if (result || (power_state != ACPI_STATE_D0)) { |
1065 | tz->trips.active[i].flags.enabled = 0; | 1066 | tz->trips.active[i].flags.enabled = 0; |
1066 | break; | 1067 | break; |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 5cd0228d2daa..177b4ddc3479 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/input.h> | 33 | #include <linux/input.h> |
34 | #include <linux/backlight.h> | 34 | #include <linux/backlight.h> |
35 | #include <linux/thermal.h> | 35 | #include <linux/thermal.h> |
36 | #include <linux/video_output.h> | ||
37 | #include <linux/sort.h> | 36 | #include <linux/sort.h> |
38 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
39 | #include <linux/pci_ids.h> | 38 | #include <linux/pci_ids.h> |
@@ -172,9 +171,6 @@ struct acpi_video_device_cap { | |||
172 | u8 _BQC:1; /* Get current brightness level */ | 171 | u8 _BQC:1; /* Get current brightness level */ |
173 | u8 _BCQ:1; /* Some buggy BIOS uses _BCQ instead of _BQC */ | 172 | u8 _BCQ:1; /* Some buggy BIOS uses _BCQ instead of _BQC */ |
174 | u8 _DDC:1; /*Return the EDID for this device */ | 173 | u8 _DDC:1; /*Return the EDID for this device */ |
175 | u8 _DCS:1; /*Return status of output device */ | ||
176 | u8 _DGS:1; /*Query graphics state */ | ||
177 | u8 _DSS:1; /*Device state set */ | ||
178 | }; | 174 | }; |
179 | 175 | ||
180 | struct acpi_video_brightness_flags { | 176 | struct acpi_video_brightness_flags { |
@@ -202,7 +198,6 @@ struct acpi_video_device { | |||
202 | struct acpi_video_device_brightness *brightness; | 198 | struct acpi_video_device_brightness *brightness; |
203 | struct backlight_device *backlight; | 199 | struct backlight_device *backlight; |
204 | struct thermal_cooling_device *cooling_dev; | 200 | struct thermal_cooling_device *cooling_dev; |
205 | struct output_device *output_dev; | ||
206 | }; | 201 | }; |
207 | 202 | ||
208 | static const char device_decode[][30] = { | 203 | static const char device_decode[][30] = { |
@@ -226,10 +221,6 @@ static int acpi_video_get_next_level(struct acpi_video_device *device, | |||
226 | u32 level_current, u32 event); | 221 | u32 level_current, u32 event); |
227 | static int acpi_video_switch_brightness(struct acpi_video_device *device, | 222 | static int acpi_video_switch_brightness(struct acpi_video_device *device, |
228 | int event); | 223 | int event); |
229 | static int acpi_video_device_get_state(struct acpi_video_device *device, | ||
230 | unsigned long long *state); | ||
231 | static int acpi_video_output_get(struct output_device *od); | ||
232 | static int acpi_video_device_set_state(struct acpi_video_device *device, int state); | ||
233 | 224 | ||
234 | /*backlight device sysfs support*/ | 225 | /*backlight device sysfs support*/ |
235 | static int acpi_video_get_brightness(struct backlight_device *bd) | 226 | static int acpi_video_get_brightness(struct backlight_device *bd) |
@@ -265,30 +256,6 @@ static struct backlight_ops acpi_backlight_ops = { | |||
265 | .update_status = acpi_video_set_brightness, | 256 | .update_status = acpi_video_set_brightness, |
266 | }; | 257 | }; |
267 | 258 | ||
268 | /*video output device sysfs support*/ | ||
269 | static int acpi_video_output_get(struct output_device *od) | ||
270 | { | ||
271 | unsigned long long state; | ||
272 | struct acpi_video_device *vd = | ||
273 | (struct acpi_video_device *)dev_get_drvdata(&od->dev); | ||
274 | acpi_video_device_get_state(vd, &state); | ||
275 | return (int)state; | ||
276 | } | ||
277 | |||
278 | static int acpi_video_output_set(struct output_device *od) | ||
279 | { | ||
280 | unsigned long state = od->request_state; | ||
281 | struct acpi_video_device *vd= | ||
282 | (struct acpi_video_device *)dev_get_drvdata(&od->dev); | ||
283 | return acpi_video_device_set_state(vd, state); | ||
284 | } | ||
285 | |||
286 | static struct output_properties acpi_output_properties = { | ||
287 | .set_state = acpi_video_output_set, | ||
288 | .get_status = acpi_video_output_get, | ||
289 | }; | ||
290 | |||
291 | |||
292 | /* thermal cooling device callbacks */ | 259 | /* thermal cooling device callbacks */ |
293 | static int video_get_max_state(struct thermal_cooling_device *cooling_dev, unsigned | 260 | static int video_get_max_state(struct thermal_cooling_device *cooling_dev, unsigned |
294 | long *state) | 261 | long *state) |
@@ -344,34 +311,6 @@ static struct thermal_cooling_device_ops video_cooling_ops = { | |||
344 | Video Management | 311 | Video Management |
345 | -------------------------------------------------------------------------- */ | 312 | -------------------------------------------------------------------------- */ |
346 | 313 | ||
347 | /* device */ | ||
348 | |||
349 | static int | ||
350 | acpi_video_device_get_state(struct acpi_video_device *device, | ||
351 | unsigned long long *state) | ||
352 | { | ||
353 | int status; | ||
354 | |||
355 | status = acpi_evaluate_integer(device->dev->handle, "_DCS", NULL, state); | ||
356 | |||
357 | return status; | ||
358 | } | ||
359 | |||
360 | static int | ||
361 | acpi_video_device_set_state(struct acpi_video_device *device, int state) | ||
362 | { | ||
363 | int status; | ||
364 | union acpi_object arg0 = { ACPI_TYPE_INTEGER }; | ||
365 | struct acpi_object_list args = { 1, &arg0 }; | ||
366 | unsigned long long ret; | ||
367 | |||
368 | |||
369 | arg0.integer.value = state; | ||
370 | status = acpi_evaluate_integer(device->dev->handle, "_DSS", &args, &ret); | ||
371 | |||
372 | return status; | ||
373 | } | ||
374 | |||
375 | static int | 314 | static int |
376 | acpi_video_device_lcd_query_levels(struct acpi_video_device *device, | 315 | acpi_video_device_lcd_query_levels(struct acpi_video_device *device, |
377 | union acpi_object **levels) | 316 | union acpi_object **levels) |
@@ -831,15 +770,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
831 | if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) { | 770 | if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) { |
832 | device->cap._DDC = 1; | 771 | device->cap._DDC = 1; |
833 | } | 772 | } |
834 | if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DCS", &h_dummy1))) { | ||
835 | device->cap._DCS = 1; | ||
836 | } | ||
837 | if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DGS", &h_dummy1))) { | ||
838 | device->cap._DGS = 1; | ||
839 | } | ||
840 | if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DSS", &h_dummy1))) { | ||
841 | device->cap._DSS = 1; | ||
842 | } | ||
843 | 773 | ||
844 | if (acpi_video_backlight_support()) { | 774 | if (acpi_video_backlight_support()) { |
845 | struct backlight_properties props; | 775 | struct backlight_properties props; |
@@ -904,21 +834,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
904 | printk(KERN_ERR PREFIX "Create sysfs link\n"); | 834 | printk(KERN_ERR PREFIX "Create sysfs link\n"); |
905 | 835 | ||
906 | } | 836 | } |
907 | |||
908 | if (acpi_video_display_switch_support()) { | ||
909 | |||
910 | if (device->cap._DCS && device->cap._DSS) { | ||
911 | static int count; | ||
912 | char *name; | ||
913 | name = kasprintf(GFP_KERNEL, "acpi_video%d", count); | ||
914 | if (!name) | ||
915 | return; | ||
916 | count++; | ||
917 | device->output_dev = video_output_register(name, | ||
918 | NULL, device, &acpi_output_properties); | ||
919 | kfree(name); | ||
920 | } | ||
921 | } | ||
922 | } | 837 | } |
923 | 838 | ||
924 | /* | 839 | /* |
@@ -1360,6 +1275,9 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id, | |||
1360 | if (!video_device) | 1275 | if (!video_device) |
1361 | continue; | 1276 | continue; |
1362 | 1277 | ||
1278 | if (!video_device->cap._DDC) | ||
1279 | continue; | ||
1280 | |||
1363 | if (type) { | 1281 | if (type) { |
1364 | switch (type) { | 1282 | switch (type) { |
1365 | case ACPI_VIDEO_DISPLAY_CRT: | 1283 | case ACPI_VIDEO_DISPLAY_CRT: |
@@ -1452,7 +1370,6 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device) | |||
1452 | thermal_cooling_device_unregister(device->cooling_dev); | 1370 | thermal_cooling_device_unregister(device->cooling_dev); |
1453 | device->cooling_dev = NULL; | 1371 | device->cooling_dev = NULL; |
1454 | } | 1372 | } |
1455 | video_output_unregister(device->output_dev); | ||
1456 | 1373 | ||
1457 | return 0; | 1374 | return 0; |
1458 | } | 1375 | } |
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index b83676126598..42d3d72dae85 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c | |||
@@ -17,15 +17,14 @@ | |||
17 | * capabilities the graphics cards plugged in support. The check for general | 17 | * capabilities the graphics cards plugged in support. The check for general |
18 | * video capabilities will be triggered by the first caller of | 18 | * video capabilities will be triggered by the first caller of |
19 | * acpi_video_get_capabilities(NULL); which will happen when the first | 19 | * acpi_video_get_capabilities(NULL); which will happen when the first |
20 | * backlight (or display output) switching supporting driver calls: | 20 | * backlight switching supporting driver calls: |
21 | * acpi_video_backlight_support(); | 21 | * acpi_video_backlight_support(); |
22 | * | 22 | * |
23 | * Depending on whether ACPI graphics extensions (cmp. ACPI spec Appendix B) | 23 | * Depending on whether ACPI graphics extensions (cmp. ACPI spec Appendix B) |
24 | * are available, video.ko should be used to handle the device. | 24 | * are available, video.ko should be used to handle the device. |
25 | * | 25 | * |
26 | * Otherwise vendor specific drivers like thinkpad_acpi, asus_acpi, | 26 | * Otherwise vendor specific drivers like thinkpad_acpi, asus_acpi, |
27 | * sony_acpi,... can take care about backlight brightness and display output | 27 | * sony_acpi,... can take care about backlight brightness. |
28 | * switching. | ||
29 | * | 28 | * |
30 | * If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m) | 29 | * If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m) |
31 | * this file will not be compiled, acpi_video_get_capabilities() and | 30 | * this file will not be compiled, acpi_video_get_capabilities() and |
@@ -83,11 +82,6 @@ long acpi_is_video_device(struct acpi_device *device) | |||
83 | if (!device) | 82 | if (!device) |
84 | return 0; | 83 | return 0; |
85 | 84 | ||
86 | /* Is this device able to support video switching ? */ | ||
87 | if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) || | ||
88 | ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy))) | ||
89 | video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING; | ||
90 | |||
91 | /* Is this device able to retrieve a video ROM ? */ | 85 | /* Is this device able to retrieve a video ROM ? */ |
92 | if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy))) | 86 | if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy))) |
93 | video_caps |= ACPI_VIDEO_ROM_AVAILABLE; | 87 | video_caps |= ACPI_VIDEO_ROM_AVAILABLE; |
@@ -161,8 +155,6 @@ long acpi_video_get_capabilities(acpi_handle graphics_handle) | |||
161 | * | 155 | * |
162 | * if (dmi_name_in_vendors("XY")) { | 156 | * if (dmi_name_in_vendors("XY")) { |
163 | * acpi_video_support |= | 157 | * acpi_video_support |= |
164 | * ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR; | ||
165 | * acpi_video_support |= | ||
166 | * ACPI_VIDEO_BACKLIGHT_DMI_VENDOR; | 158 | * ACPI_VIDEO_BACKLIGHT_DMI_VENDOR; |
167 | *} | 159 | *} |
168 | */ | 160 | */ |
@@ -212,33 +204,8 @@ int acpi_video_backlight_support(void) | |||
212 | EXPORT_SYMBOL(acpi_video_backlight_support); | 204 | EXPORT_SYMBOL(acpi_video_backlight_support); |
213 | 205 | ||
214 | /* | 206 | /* |
215 | * Returns true if video.ko can do display output switching. | 207 | * Use acpi_backlight=vendor/video to force that backlight switching |
216 | * This does not work well/at all with binary graphics drivers | 208 | * is processed by vendor specific acpi drivers or video.ko driver. |
217 | * which disable system io ranges and do it on their own. | ||
218 | */ | ||
219 | int acpi_video_display_switch_support(void) | ||
220 | { | ||
221 | if (!acpi_video_caps_checked) | ||
222 | acpi_video_get_capabilities(NULL); | ||
223 | |||
224 | if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR) | ||
225 | return 0; | ||
226 | else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO) | ||
227 | return 1; | ||
228 | |||
229 | if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR) | ||
230 | return 0; | ||
231 | else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO) | ||
232 | return 1; | ||
233 | |||
234 | return acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING; | ||
235 | } | ||
236 | EXPORT_SYMBOL(acpi_video_display_switch_support); | ||
237 | |||
238 | /* | ||
239 | * Use acpi_display_output=vendor/video or acpi_backlight=vendor/video | ||
240 | * To force that backlight or display output switching is processed by vendor | ||
241 | * specific acpi drivers or video.ko driver. | ||
242 | */ | 209 | */ |
243 | static int __init acpi_backlight(char *str) | 210 | static int __init acpi_backlight(char *str) |
244 | { | 211 | { |
@@ -255,19 +222,3 @@ static int __init acpi_backlight(char *str) | |||
255 | return 1; | 222 | return 1; |
256 | } | 223 | } |
257 | __setup("acpi_backlight=", acpi_backlight); | 224 | __setup("acpi_backlight=", acpi_backlight); |
258 | |||
259 | static int __init acpi_display_output(char *str) | ||
260 | { | ||
261 | if (str == NULL || *str == '\0') | ||
262 | return 1; | ||
263 | else { | ||
264 | if (!strcmp("vendor", str)) | ||
265 | acpi_video_support |= | ||
266 | ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR; | ||
267 | if (!strcmp("video", str)) | ||
268 | acpi_video_support |= | ||
269 | ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO; | ||
270 | } | ||
271 | return 1; | ||
272 | } | ||
273 | __setup("acpi_display_output=", acpi_display_output); | ||
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c index f62a50c3ed34..ed6501452507 100644 --- a/drivers/acpi/wakeup.c +++ b/drivers/acpi/wakeup.c | |||
@@ -37,15 +37,16 @@ void acpi_enable_wakeup_devices(u8 sleep_state) | |||
37 | container_of(node, struct acpi_device, wakeup_list); | 37 | container_of(node, struct acpi_device, wakeup_list); |
38 | 38 | ||
39 | if (!dev->wakeup.flags.valid | 39 | if (!dev->wakeup.flags.valid |
40 | || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count) | 40 | || sleep_state > (u32) dev->wakeup.sleep_state |
41 | || sleep_state > (u32) dev->wakeup.sleep_state) | 41 | || !(device_may_wakeup(&dev->dev) |
42 | || dev->wakeup.prepare_count)) | ||
42 | continue; | 43 | continue; |
43 | 44 | ||
44 | if (dev->wakeup.state.enabled) | 45 | if (device_may_wakeup(&dev->dev)) |
45 | acpi_enable_wakeup_device_power(dev, sleep_state); | 46 | acpi_enable_wakeup_device_power(dev, sleep_state); |
46 | 47 | ||
47 | /* The wake-up power should have been enabled already. */ | 48 | /* The wake-up power should have been enabled already. */ |
48 | acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number, | 49 | acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number, |
49 | ACPI_GPE_ENABLE); | 50 | ACPI_GPE_ENABLE); |
50 | } | 51 | } |
51 | } | 52 | } |
@@ -63,14 +64,15 @@ void acpi_disable_wakeup_devices(u8 sleep_state) | |||
63 | container_of(node, struct acpi_device, wakeup_list); | 64 | container_of(node, struct acpi_device, wakeup_list); |
64 | 65 | ||
65 | if (!dev->wakeup.flags.valid | 66 | if (!dev->wakeup.flags.valid |
66 | || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count) | 67 | || sleep_state > (u32) dev->wakeup.sleep_state |
67 | || (sleep_state > (u32) dev->wakeup.sleep_state)) | 68 | || !(device_may_wakeup(&dev->dev) |
69 | || dev->wakeup.prepare_count)) | ||
68 | continue; | 70 | continue; |
69 | 71 | ||
70 | acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number, | 72 | acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number, |
71 | ACPI_GPE_DISABLE); | 73 | ACPI_GPE_DISABLE); |
72 | 74 | ||
73 | if (dev->wakeup.state.enabled) | 75 | if (device_may_wakeup(&dev->dev)) |
74 | acpi_disable_wakeup_device_power(dev); | 76 | acpi_disable_wakeup_device_power(dev); |
75 | } | 77 | } |
76 | } | 78 | } |
@@ -84,8 +86,8 @@ int __init acpi_wakeup_device_init(void) | |||
84 | struct acpi_device *dev = container_of(node, | 86 | struct acpi_device *dev = container_of(node, |
85 | struct acpi_device, | 87 | struct acpi_device, |
86 | wakeup_list); | 88 | wakeup_list); |
87 | if (dev->wakeup.flags.always_enabled) | 89 | if (device_can_wakeup(&dev->dev)) |
88 | dev->wakeup.state.enabled = 1; | 90 | device_set_wakeup_enable(&dev->dev, true); |
89 | } | 91 | } |
90 | mutex_unlock(&acpi_device_lock); | 92 | mutex_unlock(&acpi_device_lock); |
91 | return 0; | 93 | return 0; |