aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 14:22:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 14:22:45 -0400
commit02b9735c12892e04d3e101b06e4c6d64a814f566 (patch)
tree7907deb1cbfd1599d4f34d414873170d3266f164
parent75acebf2423ab13ff6198daa6e17ef7a2543bfe4 (diff)
parentf1728fd1599112239ed5cebc7be9810264db6792 (diff)
Merge tag 'pm+acpi-fixes-3.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull ACPI and power management fixes from Rafael Wysocki: "All of these commits are fixes that have emerged recently and some of them fix bugs introduced during this merge window. Specifics: 1) ACPI-based PCI hotplug (ACPIPHP) fixes related to spurious events After the recent ACPIPHP changes we've seen some interesting breakage on a system that triggers device check notifications during boot for non-existing devices. Although those notifications are really spurious, we should be able to deal with them nevertheless and that shouldn't introduce too much overhead. Four commits to make that work properly. 2) Memory hotplug and hibernation mutual exclusion rework This was maent to be a cleanup, but it happens to fix a classical ABBA deadlock between system suspend/hibernation and ACPI memory hotplug which is possible if they are started roughly at the same time. Three commits rework memory hotplug so that it doesn't acquire pm_mutex and make hibernation use device_hotplug_lock which prevents it from racing with memory hotplug. 3) ACPI Intel LPSS (Low-Power Subsystem) driver crash fix The ACPI LPSS driver crashes during boot on Apple Macbook Air with Haswell that has slightly unusual BIOS configuration in which one of the LPSS device's _CRS method doesn't return all of the information expected by the driver. Fix from Mika Westerberg, for stable. 4) ACPICA fix related to Store->ArgX operation AML interpreter fix for obscure breakage that causes AML to be executed incorrectly on some machines (observed in practice). From Bob Moore. 5) ACPI core fix for PCI ACPI device objects lookup There still are cases in which there is more than one ACPI device object matching a given PCI device and we don't choose the one that the BIOS expects us to choose, so this makes the lookup take more criteria into account in those cases. 6) Fix to prevent cpuidle from crashing in some rare cases If the result of cpuidle_get_driver() is NULL, which can happen on some systems, cpuidle_driver_ref() will crash trying to use that pointer and the Daniel Fu's fix prevents that from happening. 7) cpufreq fixes related to CPU hotplug Stephen Boyd reported a number of concurrency problems with cpufreq related to CPU hotplug which are addressed by a series of fixes from Srivatsa S Bhat and Viresh Kumar. 8) cpufreq fix for time conversion in time_in_state attribute Time conversion carried out by cpufreq when user space attempts to read /sys/devices/system/cpu/cpu*/cpufreq/stats/time_in_state won't work correcty if cputime_t doesn't map directly to jiffies. Fix from Andreas Schwab. 9) Revert of a troublesome cpufreq commit Commit 7c30ed5 (cpufreq: make sure frequency transitions are serialized) was intended to address some known concurrency problems in cpufreq related to the ordering of transitions, but unfortunately it introduced several problems of its own, so I decided to revert it now and address the original problems later in a more robust way. 10) Intel Haswell CPU models for intel_pstate from Nell Hardcastle. 11) cpufreq fixes related to system suspend/resume The recent cpufreq changes that made it preserve CPU sysfs attributes over suspend/resume cycles introduced a possible NULL pointer dereference that caused it to crash during the second attempt to suspend. Three commits from Srivatsa S Bhat fix that problem and a couple of related issues. 12) cpufreq locking fix cpufreq_policy_restore() should acquire the lock for reading, but it acquires it for writing. Fix from Lan Tianyu" * tag 'pm+acpi-fixes-3.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (25 commits) cpufreq: Acquire the lock in cpufreq_policy_restore() for reading cpufreq: Prevent problems in update_policy_cpu() if last_cpu == new_cpu cpufreq: Restructure if/else block to avoid unintended behavior cpufreq: Fix crash in cpufreq-stats during suspend/resume intel_pstate: Add Haswell CPU models Revert "cpufreq: make sure frequency transitions are serialized" cpufreq: Use signed type for 'ret' variable, to store negative error values cpufreq: Remove temporary fix for race between CPU hotplug and sysfs-writes cpufreq: Synchronize the cpufreq store_*() routines with CPU hotplug cpufreq: Invoke __cpufreq_remove_dev_finish() after releasing cpu_hotplug.lock cpufreq: Split __cpufreq_remove_dev() into two parts cpufreq: Fix wrong time unit conversion cpufreq: serialize calls to __cpufreq_governor() cpufreq: don't allow governor limits to be changed when it is disabled ACPI / bind: Prefer device objects with _STA to those without it ACPI / hotplug / PCI: Avoid parent bus rescans on spurious device checks ACPI / hotplug / PCI: Use _OST to notify firmware about notify status ACPI / hotplug / PCI: Avoid doing too much for spurious notifies ACPICA: Fix for a Store->ArgX when ArgX contains a reference to a field. ACPI / hotplug / PCI: Don't trim devices before scanning the namespace ...
-rw-r--r--drivers/acpi/acpi_lpss.c3
-rw-r--r--drivers/acpi/acpica/exstore.c166
-rw-r--r--drivers/acpi/glue.c35
-rw-r--r--drivers/acpi/scan.c15
-rw-r--r--drivers/cpufreq/cpufreq.c152
-rw-r--r--drivers/cpufreq/cpufreq_stats.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/cpuidle/driver.c3
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c61
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--kernel/power/hibernate.c45
-rw-r--r--kernel/power/user.c24
-rw-r--r--mm/memory_hotplug.c4
13 files changed, 328 insertions, 188 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 6a382188fa20..fb78bb9ad8f6 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -257,12 +257,13 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
257 pdata->mmio_size = resource_size(&rentry->res); 257 pdata->mmio_size = resource_size(&rentry->res);
258 pdata->mmio_base = ioremap(rentry->res.start, 258 pdata->mmio_base = ioremap(rentry->res.start,
259 pdata->mmio_size); 259 pdata->mmio_size);
260 pdata->dev_desc = dev_desc;
261 break; 260 break;
262 } 261 }
263 262
264 acpi_dev_free_resource_list(&resource_list); 263 acpi_dev_free_resource_list(&resource_list);
265 264
265 pdata->dev_desc = dev_desc;
266
266 if (dev_desc->clk_required) { 267 if (dev_desc->clk_required) {
267 ret = register_device_clock(adev, pdata); 268 ret = register_device_clock(adev, pdata);
268 if (ret) { 269 if (ret) {
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 2bdba6f7d762..f0b09bf9887d 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -57,6 +57,11 @@ acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
57 union acpi_operand_object *dest_desc, 57 union acpi_operand_object *dest_desc,
58 struct acpi_walk_state *walk_state); 58 struct acpi_walk_state *walk_state);
59 59
60static acpi_status
61acpi_ex_store_direct_to_node(union acpi_operand_object *source_desc,
62 struct acpi_namespace_node *node,
63 struct acpi_walk_state *walk_state);
64
60/******************************************************************************* 65/*******************************************************************************
61 * 66 *
62 * FUNCTION: acpi_ex_store 67 * FUNCTION: acpi_ex_store
@@ -375,7 +380,11 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
375 * When storing into an object the data is converted to the 380 * When storing into an object the data is converted to the
376 * target object type then stored in the object. This means 381 * target object type then stored in the object. This means
377 * that the target object type (for an initialized target) will 382 * that the target object type (for an initialized target) will
378 * not be changed by a store operation. 383 * not be changed by a store operation. A copy_object can change
384 * the target type, however.
385 *
386 * The implicit_conversion flag is set to NO/FALSE only when
387 * storing to an arg_x -- as per the rules of the ACPI spec.
379 * 388 *
380 * Assumes parameters are already validated. 389 * Assumes parameters are already validated.
381 * 390 *
@@ -399,7 +408,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
399 target_type = acpi_ns_get_type(node); 408 target_type = acpi_ns_get_type(node);
400 target_desc = acpi_ns_get_attached_object(node); 409 target_desc = acpi_ns_get_attached_object(node);
401 410
402 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Storing %p(%s) into node %p(%s)\n", 411 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Storing %p (%s) to node %p (%s)\n",
403 source_desc, 412 source_desc,
404 acpi_ut_get_object_type_name(source_desc), node, 413 acpi_ut_get_object_type_name(source_desc), node,
405 acpi_ut_get_type_name(target_type))); 414 acpi_ut_get_type_name(target_type)));
@@ -413,45 +422,30 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
413 return_ACPI_STATUS(status); 422 return_ACPI_STATUS(status);
414 } 423 }
415 424
416 /* If no implicit conversion, drop into the default case below */
417
418 if ((!implicit_conversion) ||
419 ((walk_state->opcode == AML_COPY_OP) &&
420 (target_type != ACPI_TYPE_LOCAL_REGION_FIELD) &&
421 (target_type != ACPI_TYPE_LOCAL_BANK_FIELD) &&
422 (target_type != ACPI_TYPE_LOCAL_INDEX_FIELD))) {
423 /*
424 * Force execution of default (no implicit conversion). Note:
425 * copy_object does not perform an implicit conversion, as per the ACPI
426 * spec -- except in case of region/bank/index fields -- because these
427 * objects must retain their original type permanently.
428 */
429 target_type = ACPI_TYPE_ANY;
430 }
431
432 /* Do the actual store operation */ 425 /* Do the actual store operation */
433 426
434 switch (target_type) { 427 switch (target_type) {
435 case ACPI_TYPE_BUFFER_FIELD:
436 case ACPI_TYPE_LOCAL_REGION_FIELD:
437 case ACPI_TYPE_LOCAL_BANK_FIELD:
438 case ACPI_TYPE_LOCAL_INDEX_FIELD:
439
440 /* For fields, copy the source data to the target field. */
441
442 status = acpi_ex_write_data_to_field(source_desc, target_desc,
443 &walk_state->result_obj);
444 break;
445
446 case ACPI_TYPE_INTEGER: 428 case ACPI_TYPE_INTEGER:
447 case ACPI_TYPE_STRING: 429 case ACPI_TYPE_STRING:
448 case ACPI_TYPE_BUFFER: 430 case ACPI_TYPE_BUFFER:
449 /* 431 /*
450 * These target types are all of type Integer/String/Buffer, and 432 * The simple data types all support implicit source operand
451 * therefore support implicit conversion before the store. 433 * conversion before the store.
452 *
453 * Copy and/or convert the source object to a new target object
454 */ 434 */
435
436 if ((walk_state->opcode == AML_COPY_OP) || !implicit_conversion) {
437 /*
438 * However, copy_object and Stores to arg_x do not perform
439 * an implicit conversion, as per the ACPI specification.
440 * A direct store is performed instead.
441 */
442 status = acpi_ex_store_direct_to_node(source_desc, node,
443 walk_state);
444 break;
445 }
446
447 /* Store with implicit source operand conversion support */
448
455 status = 449 status =
456 acpi_ex_store_object_to_object(source_desc, target_desc, 450 acpi_ex_store_object_to_object(source_desc, target_desc,
457 &new_desc, walk_state); 451 &new_desc, walk_state);
@@ -465,13 +459,12 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
465 * the Name's type to that of the value being stored in it. 459 * the Name's type to that of the value being stored in it.
466 * source_desc reference count is incremented by attach_object. 460 * source_desc reference count is incremented by attach_object.
467 * 461 *
468 * Note: This may change the type of the node if an explicit store 462 * Note: This may change the type of the node if an explicit
469 * has been performed such that the node/object type has been 463 * store has been performed such that the node/object type
470 * changed. 464 * has been changed.
471 */ 465 */
472 status = 466 status = acpi_ns_attach_object(node, new_desc,
473 acpi_ns_attach_object(node, new_desc, 467 new_desc->common.type);
474 new_desc->common.type);
475 468
476 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 469 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
477 "Store %s into %s via Convert/Attach\n", 470 "Store %s into %s via Convert/Attach\n",
@@ -482,38 +475,83 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
482 } 475 }
483 break; 476 break;
484 477
485 default: 478 case ACPI_TYPE_BUFFER_FIELD:
486 479 case ACPI_TYPE_LOCAL_REGION_FIELD:
487 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 480 case ACPI_TYPE_LOCAL_BANK_FIELD:
488 "Storing [%s] (%p) directly into node [%s] (%p)" 481 case ACPI_TYPE_LOCAL_INDEX_FIELD:
489 " with no implicit conversion\n", 482 /*
490 acpi_ut_get_object_type_name(source_desc), 483 * For all fields, always write the source data to the target
491 source_desc, 484 * field. Any required implicit source operand conversion is
492 acpi_ut_get_object_type_name(target_desc), 485 * performed in the function below as necessary. Note, field
493 node)); 486 * objects must retain their original type permanently.
487 */
488 status = acpi_ex_write_data_to_field(source_desc, target_desc,
489 &walk_state->result_obj);
490 break;
494 491
492 default:
495 /* 493 /*
496 * No conversions for all other types. Directly store a copy of 494 * No conversions for all other types. Directly store a copy of
497 * the source object. NOTE: This is a departure from the ACPI 495 * the source object. This is the ACPI spec-defined behavior for
498 * spec, which states "If conversion is impossible, abort the 496 * the copy_object operator.
499 * running control method".
500 * 497 *
501 * This code implements "If conversion is impossible, treat the 498 * NOTE: For the Store operator, this is a departure from the
502 * Store operation as a CopyObject". 499 * ACPI spec, which states "If conversion is impossible, abort
500 * the running control method". Instead, this code implements
501 * "If conversion is impossible, treat the Store operation as
502 * a CopyObject".
503 */ 503 */
504 status = 504 status = acpi_ex_store_direct_to_node(source_desc, node,
505 acpi_ut_copy_iobject_to_iobject(source_desc, &new_desc, 505 walk_state);
506 walk_state);
507 if (ACPI_FAILURE(status)) {
508 return_ACPI_STATUS(status);
509 }
510
511 status =
512 acpi_ns_attach_object(node, new_desc,
513 new_desc->common.type);
514 acpi_ut_remove_reference(new_desc);
515 break; 506 break;
516 } 507 }
517 508
518 return_ACPI_STATUS(status); 509 return_ACPI_STATUS(status);
519} 510}
511
512/*******************************************************************************
513 *
514 * FUNCTION: acpi_ex_store_direct_to_node
515 *
516 * PARAMETERS: source_desc - Value to be stored
517 * node - Named object to receive the value
518 * walk_state - Current walk state
519 *
520 * RETURN: Status
521 *
522 * DESCRIPTION: "Store" an object directly to a node. This involves a copy
523 * and an attach.
524 *
525 ******************************************************************************/
526
527static acpi_status
528acpi_ex_store_direct_to_node(union acpi_operand_object *source_desc,
529 struct acpi_namespace_node *node,
530 struct acpi_walk_state *walk_state)
531{
532 acpi_status status;
533 union acpi_operand_object *new_desc;
534
535 ACPI_FUNCTION_TRACE(ex_store_direct_to_node);
536
537 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
538 "Storing [%s] (%p) directly into node [%s] (%p)"
539 " with no implicit conversion\n",
540 acpi_ut_get_object_type_name(source_desc),
541 source_desc, acpi_ut_get_type_name(node->type),
542 node));
543
544 /* Copy the source object to a new object */
545
546 status =
547 acpi_ut_copy_iobject_to_iobject(source_desc, &new_desc, walk_state);
548 if (ACPI_FAILURE(status)) {
549 return_ACPI_STATUS(status);
550 }
551
552 /* Attach the new object to the node */
553
554 status = acpi_ns_attach_object(node, new_desc, new_desc->common.type);
555 acpi_ut_remove_reference(new_desc);
556 return_ACPI_STATUS(status);
557}
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 94672297e1b1..10f0f40587bb 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -79,6 +79,9 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
79 return ret; 79 return ret;
80} 80}
81 81
82#define FIND_CHILD_MIN_SCORE 1
83#define FIND_CHILD_MAX_SCORE 2
84
82static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used, 85static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
83 void *not_used, void **ret_p) 86 void *not_used, void **ret_p)
84{ 87{
@@ -92,14 +95,17 @@ static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
92 return AE_OK; 95 return AE_OK;
93} 96}
94 97
95static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge) 98static int do_find_child_checks(acpi_handle handle, bool is_bridge)
96{ 99{
100 bool sta_present = true;
97 unsigned long long sta; 101 unsigned long long sta;
98 acpi_status status; 102 acpi_status status;
99 103
100 status = acpi_bus_get_status_handle(handle, &sta); 104 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
101 if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED)) 105 if (status == AE_NOT_FOUND)
102 return false; 106 sta_present = false;
107 else if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
108 return -ENODEV;
103 109
104 if (is_bridge) { 110 if (is_bridge) {
105 void *test = NULL; 111 void *test = NULL;
@@ -107,16 +113,17 @@ static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge)
107 /* Check if this object has at least one child device. */ 113 /* Check if this object has at least one child device. */
108 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 114 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
109 acpi_dev_present, NULL, NULL, &test); 115 acpi_dev_present, NULL, NULL, &test);
110 return !!test; 116 if (!test)
117 return -ENODEV;
111 } 118 }
112 return true; 119 return sta_present ? FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
113} 120}
114 121
115struct find_child_context { 122struct find_child_context {
116 u64 addr; 123 u64 addr;
117 bool is_bridge; 124 bool is_bridge;
118 acpi_handle ret; 125 acpi_handle ret;
119 bool ret_checked; 126 int ret_score;
120}; 127};
121 128
122static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used, 129static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
@@ -125,6 +132,7 @@ static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
125 struct find_child_context *context = data; 132 struct find_child_context *context = data;
126 unsigned long long addr; 133 unsigned long long addr;
127 acpi_status status; 134 acpi_status status;
135 int score;
128 136
129 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr); 137 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
130 if (ACPI_FAILURE(status) || addr != context->addr) 138 if (ACPI_FAILURE(status) || addr != context->addr)
@@ -144,15 +152,20 @@ static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
144 * its handle if so. Second, check the same for the object that we've 152 * its handle if so. Second, check the same for the object that we've
145 * just found. 153 * just found.
146 */ 154 */
147 if (!context->ret_checked) { 155 if (!context->ret_score) {
148 if (acpi_extra_checks_passed(context->ret, context->is_bridge)) 156 score = do_find_child_checks(context->ret, context->is_bridge);
157 if (score == FIND_CHILD_MAX_SCORE)
149 return AE_CTRL_TERMINATE; 158 return AE_CTRL_TERMINATE;
150 else 159 else
151 context->ret_checked = true; 160 context->ret_score = score;
152 } 161 }
153 if (acpi_extra_checks_passed(handle, context->is_bridge)) { 162 score = do_find_child_checks(handle, context->is_bridge);
163 if (score == FIND_CHILD_MAX_SCORE) {
154 context->ret = handle; 164 context->ret = handle;
155 return AE_CTRL_TERMINATE; 165 return AE_CTRL_TERMINATE;
166 } else if (score > context->ret_score) {
167 context->ret = handle;
168 context->ret_score = score;
156 } 169 }
157 return AE_OK; 170 return AE_OK;
158} 171}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 61d090b6ce25..fbdb82e70d10 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -204,8 +204,6 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
204 return -EINVAL; 204 return -EINVAL;
205 } 205 }
206 206
207 lock_device_hotplug();
208
209 /* 207 /*
210 * Carry out two passes here and ignore errors in the first pass, 208 * Carry out two passes here and ignore errors in the first pass,
211 * because if the devices in question are memory blocks and 209 * because if the devices in question are memory blocks and
@@ -236,9 +234,6 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
236 ACPI_UINT32_MAX, 234 ACPI_UINT32_MAX,
237 acpi_bus_online_companions, NULL, 235 acpi_bus_online_companions, NULL,
238 NULL, NULL); 236 NULL, NULL);
239
240 unlock_device_hotplug();
241
242 put_device(&device->dev); 237 put_device(&device->dev);
243 return -EBUSY; 238 return -EBUSY;
244 } 239 }
@@ -249,8 +244,6 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
249 244
250 acpi_bus_trim(device); 245 acpi_bus_trim(device);
251 246
252 unlock_device_hotplug();
253
254 /* Device node has been unregistered. */ 247 /* Device node has been unregistered. */
255 put_device(&device->dev); 248 put_device(&device->dev);
256 device = NULL; 249 device = NULL;
@@ -289,6 +282,7 @@ static void acpi_bus_device_eject(void *context)
289 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; 282 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
290 int error; 283 int error;
291 284
285 lock_device_hotplug();
292 mutex_lock(&acpi_scan_lock); 286 mutex_lock(&acpi_scan_lock);
293 287
294 acpi_bus_get_device(handle, &device); 288 acpi_bus_get_device(handle, &device);
@@ -312,6 +306,7 @@ static void acpi_bus_device_eject(void *context)
312 306
313 out: 307 out:
314 mutex_unlock(&acpi_scan_lock); 308 mutex_unlock(&acpi_scan_lock);
309 unlock_device_hotplug();
315 return; 310 return;
316 311
317 err_out: 312 err_out:
@@ -326,8 +321,8 @@ static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source)
326 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; 321 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
327 int error; 322 int error;
328 323
329 mutex_lock(&acpi_scan_lock);
330 lock_device_hotplug(); 324 lock_device_hotplug();
325 mutex_lock(&acpi_scan_lock);
331 326
332 if (ost_source != ACPI_NOTIFY_BUS_CHECK) { 327 if (ost_source != ACPI_NOTIFY_BUS_CHECK) {
333 acpi_bus_get_device(handle, &device); 328 acpi_bus_get_device(handle, &device);
@@ -353,9 +348,9 @@ static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source)
353 kobject_uevent(&device->dev.kobj, KOBJ_ONLINE); 348 kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
354 349
355 out: 350 out:
356 unlock_device_hotplug();
357 acpi_evaluate_hotplug_ost(handle, ost_source, ost_code, NULL); 351 acpi_evaluate_hotplug_ost(handle, ost_source, ost_code, NULL);
358 mutex_unlock(&acpi_scan_lock); 352 mutex_unlock(&acpi_scan_lock);
353 unlock_device_hotplug();
359} 354}
360 355
361static void acpi_scan_bus_check(void *context) 356static void acpi_scan_bus_check(void *context)
@@ -446,6 +441,7 @@ void acpi_bus_hot_remove_device(void *context)
446 acpi_handle handle = device->handle; 441 acpi_handle handle = device->handle;
447 int error; 442 int error;
448 443
444 lock_device_hotplug();
449 mutex_lock(&acpi_scan_lock); 445 mutex_lock(&acpi_scan_lock);
450 446
451 error = acpi_scan_hot_remove(device); 447 error = acpi_scan_hot_remove(device);
@@ -455,6 +451,7 @@ void acpi_bus_hot_remove_device(void *context)
455 NULL); 451 NULL);
456 452
457 mutex_unlock(&acpi_scan_lock); 453 mutex_unlock(&acpi_scan_lock);
454 unlock_device_hotplug();
458 kfree(context); 455 kfree(context);
459} 456}
460EXPORT_SYMBOL(acpi_bus_hot_remove_device); 457EXPORT_SYMBOL(acpi_bus_hot_remove_device);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 5c75e3147a60..43c24aa756f6 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -280,13 +280,6 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
280 switch (state) { 280 switch (state) {
281 281
282 case CPUFREQ_PRECHANGE: 282 case CPUFREQ_PRECHANGE:
283 if (WARN(policy->transition_ongoing ==
284 cpumask_weight(policy->cpus),
285 "In middle of another frequency transition\n"))
286 return;
287
288 policy->transition_ongoing++;
289
290 /* detect if the driver reported a value as "old frequency" 283 /* detect if the driver reported a value as "old frequency"
291 * which is not equal to what the cpufreq core thinks is 284 * which is not equal to what the cpufreq core thinks is
292 * "old frequency". 285 * "old frequency".
@@ -306,12 +299,6 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
306 break; 299 break;
307 300
308 case CPUFREQ_POSTCHANGE: 301 case CPUFREQ_POSTCHANGE:
309 if (WARN(!policy->transition_ongoing,
310 "No frequency transition in progress\n"))
311 return;
312
313 policy->transition_ongoing--;
314
315 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 302 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
316 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, 303 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
317 (unsigned long)freqs->cpu); 304 (unsigned long)freqs->cpu);
@@ -437,7 +424,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *policy,
437static ssize_t store_##file_name \ 424static ssize_t store_##file_name \
438(struct cpufreq_policy *policy, const char *buf, size_t count) \ 425(struct cpufreq_policy *policy, const char *buf, size_t count) \
439{ \ 426{ \
440 unsigned int ret; \ 427 int ret; \
441 struct cpufreq_policy new_policy; \ 428 struct cpufreq_policy new_policy; \
442 \ 429 \
443 ret = cpufreq_get_policy(&new_policy, policy->cpu); \ 430 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
@@ -490,7 +477,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
490static ssize_t store_scaling_governor(struct cpufreq_policy *policy, 477static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
491 const char *buf, size_t count) 478 const char *buf, size_t count)
492{ 479{
493 unsigned int ret; 480 int ret;
494 char str_governor[16]; 481 char str_governor[16];
495 struct cpufreq_policy new_policy; 482 struct cpufreq_policy new_policy;
496 483
@@ -694,8 +681,13 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
694 struct freq_attr *fattr = to_attr(attr); 681 struct freq_attr *fattr = to_attr(attr);
695 ssize_t ret = -EINVAL; 682 ssize_t ret = -EINVAL;
696 683
684 get_online_cpus();
685
686 if (!cpu_online(policy->cpu))
687 goto unlock;
688
697 if (!down_read_trylock(&cpufreq_rwsem)) 689 if (!down_read_trylock(&cpufreq_rwsem))
698 goto exit; 690 goto unlock;
699 691
700 if (lock_policy_rwsem_write(policy->cpu) < 0) 692 if (lock_policy_rwsem_write(policy->cpu) < 0)
701 goto up_read; 693 goto up_read;
@@ -709,7 +701,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
709 701
710up_read: 702up_read:
711 up_read(&cpufreq_rwsem); 703 up_read(&cpufreq_rwsem);
712exit: 704unlock:
705 put_online_cpus();
706
713 return ret; 707 return ret;
714} 708}
715 709
@@ -912,11 +906,11 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
912 struct cpufreq_policy *policy; 906 struct cpufreq_policy *policy;
913 unsigned long flags; 907 unsigned long flags;
914 908
915 write_lock_irqsave(&cpufreq_driver_lock, flags); 909 read_lock_irqsave(&cpufreq_driver_lock, flags);
916 910
917 policy = per_cpu(cpufreq_cpu_data_fallback, cpu); 911 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
918 912
919 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 913 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
920 914
921 return policy; 915 return policy;
922} 916}
@@ -953,6 +947,21 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
953 kfree(policy); 947 kfree(policy);
954} 948}
955 949
950static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
951{
952 if (cpu == policy->cpu)
953 return;
954
955 policy->last_cpu = policy->cpu;
956 policy->cpu = cpu;
957
958#ifdef CONFIG_CPU_FREQ_TABLE
959 cpufreq_frequency_table_update_policy_cpu(policy);
960#endif
961 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
962 CPUFREQ_UPDATE_POLICY_CPU, policy);
963}
964
956static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, 965static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
957 bool frozen) 966 bool frozen)
958{ 967{
@@ -1006,7 +1015,18 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1006 if (!policy) 1015 if (!policy)
1007 goto nomem_out; 1016 goto nomem_out;
1008 1017
1009 policy->cpu = cpu; 1018
1019 /*
1020 * In the resume path, since we restore a saved policy, the assignment
1021 * to policy->cpu is like an update of the existing policy, rather than
1022 * the creation of a brand new one. So we need to perform this update
1023 * by invoking update_policy_cpu().
1024 */
1025 if (frozen && cpu != policy->cpu)
1026 update_policy_cpu(policy, cpu);
1027 else
1028 policy->cpu = cpu;
1029
1010 policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 1030 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1011 cpumask_copy(policy->cpus, cpumask_of(cpu)); 1031 cpumask_copy(policy->cpus, cpumask_of(cpu));
1012 1032
@@ -1098,18 +1118,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1098 return __cpufreq_add_dev(dev, sif, false); 1118 return __cpufreq_add_dev(dev, sif, false);
1099} 1119}
1100 1120
1101static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1102{
1103 policy->last_cpu = policy->cpu;
1104 policy->cpu = cpu;
1105
1106#ifdef CONFIG_CPU_FREQ_TABLE
1107 cpufreq_frequency_table_update_policy_cpu(policy);
1108#endif
1109 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1110 CPUFREQ_UPDATE_POLICY_CPU, policy);
1111}
1112
1113static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, 1121static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1114 unsigned int old_cpu, bool frozen) 1122 unsigned int old_cpu, bool frozen)
1115{ 1123{
@@ -1141,22 +1149,14 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1141 return cpu_dev->id; 1149 return cpu_dev->id;
1142} 1150}
1143 1151
1144/** 1152static int __cpufreq_remove_dev_prepare(struct device *dev,
1145 * __cpufreq_remove_dev - remove a CPU device 1153 struct subsys_interface *sif,
1146 * 1154 bool frozen)
1147 * Removes the cpufreq interface for a CPU device.
1148 * Caller should already have policy_rwsem in write mode for this CPU.
1149 * This routine frees the rwsem before returning.
1150 */
1151static int __cpufreq_remove_dev(struct device *dev,
1152 struct subsys_interface *sif, bool frozen)
1153{ 1155{
1154 unsigned int cpu = dev->id, cpus; 1156 unsigned int cpu = dev->id, cpus;
1155 int new_cpu, ret; 1157 int new_cpu, ret;
1156 unsigned long flags; 1158 unsigned long flags;
1157 struct cpufreq_policy *policy; 1159 struct cpufreq_policy *policy;
1158 struct kobject *kobj;
1159 struct completion *cmp;
1160 1160
1161 pr_debug("%s: unregistering CPU %u\n", __func__, cpu); 1161 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1162 1162
@@ -1196,8 +1196,9 @@ static int __cpufreq_remove_dev(struct device *dev,
1196 cpumask_clear_cpu(cpu, policy->cpus); 1196 cpumask_clear_cpu(cpu, policy->cpus);
1197 unlock_policy_rwsem_write(cpu); 1197 unlock_policy_rwsem_write(cpu);
1198 1198
1199 if (cpu != policy->cpu && !frozen) { 1199 if (cpu != policy->cpu) {
1200 sysfs_remove_link(&dev->kobj, "cpufreq"); 1200 if (!frozen)
1201 sysfs_remove_link(&dev->kobj, "cpufreq");
1201 } else if (cpus > 1) { 1202 } else if (cpus > 1) {
1202 1203
1203 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); 1204 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
@@ -1213,6 +1214,33 @@ static int __cpufreq_remove_dev(struct device *dev,
1213 } 1214 }
1214 } 1215 }
1215 1216
1217 return 0;
1218}
1219
1220static int __cpufreq_remove_dev_finish(struct device *dev,
1221 struct subsys_interface *sif,
1222 bool frozen)
1223{
1224 unsigned int cpu = dev->id, cpus;
1225 int ret;
1226 unsigned long flags;
1227 struct cpufreq_policy *policy;
1228 struct kobject *kobj;
1229 struct completion *cmp;
1230
1231 read_lock_irqsave(&cpufreq_driver_lock, flags);
1232 policy = per_cpu(cpufreq_cpu_data, cpu);
1233 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1234
1235 if (!policy) {
1236 pr_debug("%s: No cpu_data found\n", __func__);
1237 return -EINVAL;
1238 }
1239
1240 lock_policy_rwsem_read(cpu);
1241 cpus = cpumask_weight(policy->cpus);
1242 unlock_policy_rwsem_read(cpu);
1243
1216 /* If cpu is last user of policy, free policy */ 1244 /* If cpu is last user of policy, free policy */
1217 if (cpus == 1) { 1245 if (cpus == 1) {
1218 if (cpufreq_driver->target) { 1246 if (cpufreq_driver->target) {
@@ -1272,6 +1300,27 @@ static int __cpufreq_remove_dev(struct device *dev,
1272 return 0; 1300 return 0;
1273} 1301}
1274 1302
1303/**
1304 * __cpufreq_remove_dev - remove a CPU device
1305 *
1306 * Removes the cpufreq interface for a CPU device.
1307 * Caller should already have policy_rwsem in write mode for this CPU.
1308 * This routine frees the rwsem before returning.
1309 */
1310static inline int __cpufreq_remove_dev(struct device *dev,
1311 struct subsys_interface *sif,
1312 bool frozen)
1313{
1314 int ret;
1315
1316 ret = __cpufreq_remove_dev_prepare(dev, sif, frozen);
1317
1318 if (!ret)
1319 ret = __cpufreq_remove_dev_finish(dev, sif, frozen);
1320
1321 return ret;
1322}
1323
1275static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 1324static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1276{ 1325{
1277 unsigned int cpu = dev->id; 1326 unsigned int cpu = dev->id;
@@ -1610,8 +1659,6 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
1610 1659
1611 if (cpufreq_disabled()) 1660 if (cpufreq_disabled())
1612 return -ENODEV; 1661 return -ENODEV;
1613 if (policy->transition_ongoing)
1614 return -EBUSY;
1615 1662
1616 /* Make sure that target_freq is within supported range */ 1663 /* Make sure that target_freq is within supported range */
1617 if (target_freq > policy->max) 1664 if (target_freq > policy->max)
@@ -1692,8 +1739,9 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
1692 policy->cpu, event); 1739 policy->cpu, event);
1693 1740
1694 mutex_lock(&cpufreq_governor_lock); 1741 mutex_lock(&cpufreq_governor_lock);
1695 if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) || 1742 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
1696 (policy->governor_enabled && (event == CPUFREQ_GOV_START))) { 1743 || (!policy->governor_enabled
1744 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
1697 mutex_unlock(&cpufreq_governor_lock); 1745 mutex_unlock(&cpufreq_governor_lock);
1698 return -EBUSY; 1746 return -EBUSY;
1699 } 1747 }
@@ -1994,7 +2042,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
1994 break; 2042 break;
1995 2043
1996 case CPU_DOWN_PREPARE: 2044 case CPU_DOWN_PREPARE:
1997 __cpufreq_remove_dev(dev, NULL, frozen); 2045 __cpufreq_remove_dev_prepare(dev, NULL, frozen);
2046 break;
2047
2048 case CPU_POST_DEAD:
2049 __cpufreq_remove_dev_finish(dev, NULL, frozen);
1998 break; 2050 break;
1999 2051
2000 case CPU_DOWN_FAILED: 2052 case CPU_DOWN_FAILED:
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 04452f026ed0..4cf0d2805cb2 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -74,7 +74,7 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
74 for (i = 0; i < stat->state_num; i++) { 74 for (i = 0; i < stat->state_num; i++) {
75 len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i], 75 len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
76 (unsigned long long) 76 (unsigned long long)
77 cputime64_to_clock_t(stat->time_in_state[i])); 77 jiffies_64_to_clock_t(stat->time_in_state[i]));
78 } 78 }
79 return len; 79 return len;
80} 80}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6efd96c196b2..9733f29ed148 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -522,6 +522,11 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
522 ICPU(0x2a, default_policy), 522 ICPU(0x2a, default_policy),
523 ICPU(0x2d, default_policy), 523 ICPU(0x2d, default_policy),
524 ICPU(0x3a, default_policy), 524 ICPU(0x3a, default_policy),
525 ICPU(0x3c, default_policy),
526 ICPU(0x3e, default_policy),
527 ICPU(0x3f, default_policy),
528 ICPU(0x45, default_policy),
529 ICPU(0x46, default_policy),
525 {} 530 {}
526}; 531};
527MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 532MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 3ac499d5a207..6e11701f0fca 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -331,7 +331,8 @@ struct cpuidle_driver *cpuidle_driver_ref(void)
331 spin_lock(&cpuidle_driver_lock); 331 spin_lock(&cpuidle_driver_lock);
332 332
333 drv = cpuidle_get_driver(); 333 drv = cpuidle_get_driver();
334 drv->refcnt++; 334 if (drv)
335 drv->refcnt++;
335 336
336 spin_unlock(&cpuidle_driver_lock); 337 spin_unlock(&cpuidle_driver_lock);
337 return drv; 338 return drv;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index f6488adf3af1..0b7d23b4ad95 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -487,7 +487,6 @@ static void acpiphp_bus_add(acpi_handle handle)
487{ 487{
488 struct acpi_device *adev = NULL; 488 struct acpi_device *adev = NULL;
489 489
490 acpiphp_bus_trim(handle);
491 acpi_bus_scan(handle); 490 acpi_bus_scan(handle);
492 acpi_bus_get_device(handle, &adev); 491 acpi_bus_get_device(handle, &adev);
493 if (adev) 492 if (adev)
@@ -529,6 +528,16 @@ static void check_hotplug_bridge(struct acpiphp_slot *slot, struct pci_dev *dev)
529 } 528 }
530} 529}
531 530
531static int acpiphp_rescan_slot(struct acpiphp_slot *slot)
532{
533 struct acpiphp_func *func;
534
535 list_for_each_entry(func, &slot->funcs, sibling)
536 acpiphp_bus_add(func_to_handle(func));
537
538 return pci_scan_slot(slot->bus, PCI_DEVFN(slot->device, 0));
539}
540
532/** 541/**
533 * enable_slot - enable, configure a slot 542 * enable_slot - enable, configure a slot
534 * @slot: slot to be enabled 543 * @slot: slot to be enabled
@@ -543,12 +552,9 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
543 struct acpiphp_func *func; 552 struct acpiphp_func *func;
544 int max, pass; 553 int max, pass;
545 LIST_HEAD(add_list); 554 LIST_HEAD(add_list);
555 int nr_found;
546 556
547 list_for_each_entry(func, &slot->funcs, sibling) 557 nr_found = acpiphp_rescan_slot(slot);
548 acpiphp_bus_add(func_to_handle(func));
549
550 pci_scan_slot(bus, PCI_DEVFN(slot->device, 0));
551
552 max = acpiphp_max_busnr(bus); 558 max = acpiphp_max_busnr(bus);
553 for (pass = 0; pass < 2; pass++) { 559 for (pass = 0; pass < 2; pass++) {
554 list_for_each_entry(dev, &bus->devices, bus_list) { 560 list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -567,8 +573,11 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
567 } 573 }
568 } 574 }
569 } 575 }
570
571 __pci_bus_assign_resources(bus, &add_list, NULL); 576 __pci_bus_assign_resources(bus, &add_list, NULL);
577 /* Nothing more to do here if there are no new devices on this bus. */
578 if (!nr_found && (slot->flags & SLOT_ENABLED))
579 return;
580
572 acpiphp_sanitize_bus(bus); 581 acpiphp_sanitize_bus(bus);
573 acpiphp_set_hpp_values(bus); 582 acpiphp_set_hpp_values(bus);
574 acpiphp_set_acpi_region(slot); 583 acpiphp_set_acpi_region(slot);
@@ -837,11 +846,22 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
837 case ACPI_NOTIFY_DEVICE_CHECK: 846 case ACPI_NOTIFY_DEVICE_CHECK:
838 /* device check */ 847 /* device check */
839 dbg("%s: Device check notify on %s\n", __func__, objname); 848 dbg("%s: Device check notify on %s\n", __func__, objname);
840 if (bridge) 849 if (bridge) {
841 acpiphp_check_bridge(bridge); 850 acpiphp_check_bridge(bridge);
842 else 851 } else {
843 acpiphp_check_bridge(func->parent); 852 struct acpiphp_slot *slot = func->slot;
853 int ret;
844 854
855 /*
856 * Check if anything has changed in the slot and rescan
857 * from the parent if that's the case.
858 */
859 mutex_lock(&slot->crit_sect);
860 ret = acpiphp_rescan_slot(slot);
861 mutex_unlock(&slot->crit_sect);
862 if (ret)
863 acpiphp_check_bridge(func->parent);
864 }
845 break; 865 break;
846 866
847 case ACPI_NOTIFY_EJECT_REQUEST: 867 case ACPI_NOTIFY_EJECT_REQUEST:
@@ -867,6 +887,8 @@ static void hotplug_event_work(struct work_struct *work)
867 hotplug_event(hp_work->handle, hp_work->type, context); 887 hotplug_event(hp_work->handle, hp_work->type, context);
868 888
869 acpi_scan_lock_release(); 889 acpi_scan_lock_release();
890 acpi_evaluate_hotplug_ost(hp_work->handle, hp_work->type,
891 ACPI_OST_SC_SUCCESS, NULL);
870 kfree(hp_work); /* allocated in handle_hotplug_event() */ 892 kfree(hp_work); /* allocated in handle_hotplug_event() */
871 put_bridge(context->func.parent); 893 put_bridge(context->func.parent);
872} 894}
@@ -882,11 +904,15 @@ static void hotplug_event_work(struct work_struct *work)
882static void handle_hotplug_event(acpi_handle handle, u32 type, void *data) 904static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
883{ 905{
884 struct acpiphp_context *context; 906 struct acpiphp_context *context;
907 u32 ost_code = ACPI_OST_SC_SUCCESS;
885 908
886 switch (type) { 909 switch (type) {
887 case ACPI_NOTIFY_BUS_CHECK: 910 case ACPI_NOTIFY_BUS_CHECK:
888 case ACPI_NOTIFY_DEVICE_CHECK: 911 case ACPI_NOTIFY_DEVICE_CHECK:
912 break;
889 case ACPI_NOTIFY_EJECT_REQUEST: 913 case ACPI_NOTIFY_EJECT_REQUEST:
914 ost_code = ACPI_OST_SC_EJECT_IN_PROGRESS;
915 acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
890 break; 916 break;
891 917
892 case ACPI_NOTIFY_DEVICE_WAKE: 918 case ACPI_NOTIFY_DEVICE_WAKE:
@@ -895,20 +921,21 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
895 case ACPI_NOTIFY_FREQUENCY_MISMATCH: 921 case ACPI_NOTIFY_FREQUENCY_MISMATCH:
896 acpi_handle_err(handle, "Device cannot be configured due " 922 acpi_handle_err(handle, "Device cannot be configured due "
897 "to a frequency mismatch\n"); 923 "to a frequency mismatch\n");
898 return; 924 goto out;
899 925
900 case ACPI_NOTIFY_BUS_MODE_MISMATCH: 926 case ACPI_NOTIFY_BUS_MODE_MISMATCH:
901 acpi_handle_err(handle, "Device cannot be configured due " 927 acpi_handle_err(handle, "Device cannot be configured due "
902 "to a bus mode mismatch\n"); 928 "to a bus mode mismatch\n");
903 return; 929 goto out;
904 930
905 case ACPI_NOTIFY_POWER_FAULT: 931 case ACPI_NOTIFY_POWER_FAULT:
906 acpi_handle_err(handle, "Device has suffered a power fault\n"); 932 acpi_handle_err(handle, "Device has suffered a power fault\n");
907 return; 933 goto out;
908 934
909 default: 935 default:
910 acpi_handle_warn(handle, "Unsupported event type 0x%x\n", type); 936 acpi_handle_warn(handle, "Unsupported event type 0x%x\n", type);
911 return; 937 ost_code = ACPI_OST_SC_UNRECOGNIZED_NOTIFY;
938 goto out;
912 } 939 }
913 940
914 mutex_lock(&acpiphp_context_lock); 941 mutex_lock(&acpiphp_context_lock);
@@ -917,8 +944,14 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
917 get_bridge(context->func.parent); 944 get_bridge(context->func.parent);
918 acpiphp_put_context(context); 945 acpiphp_put_context(context);
919 alloc_acpi_hp_work(handle, type, context, hotplug_event_work); 946 alloc_acpi_hp_work(handle, type, context, hotplug_event_work);
947 mutex_unlock(&acpiphp_context_lock);
948 return;
920 } 949 }
921 mutex_unlock(&acpiphp_context_lock); 950 mutex_unlock(&acpiphp_context_lock);
951 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
952
953 out:
954 acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
922} 955}
923 956
924/* 957/*
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index d568f3975eeb..fcabc42d66ab 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -85,7 +85,6 @@ struct cpufreq_policy {
85 struct list_head policy_list; 85 struct list_head policy_list;
86 struct kobject kobj; 86 struct kobject kobj;
87 struct completion kobj_unregister; 87 struct completion kobj_unregister;
88 int transition_ongoing; /* Tracks transition status */
89}; 88};
90 89
91/* Only for ACPI */ 90/* Only for ACPI */
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 3085e62a80a5..c9c759d5a15c 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -644,22 +644,23 @@ int hibernate(void)
644 if (error) 644 if (error)
645 goto Exit; 645 goto Exit;
646 646
647 /* Allocate memory management structures */
648 error = create_basic_memory_bitmaps();
649 if (error)
650 goto Exit;
651
652 printk(KERN_INFO "PM: Syncing filesystems ... "); 647 printk(KERN_INFO "PM: Syncing filesystems ... ");
653 sys_sync(); 648 sys_sync();
654 printk("done.\n"); 649 printk("done.\n");
655 650
656 error = freeze_processes(); 651 error = freeze_processes();
657 if (error) 652 if (error)
658 goto Free_bitmaps; 653 goto Exit;
654
655 lock_device_hotplug();
656 /* Allocate memory management structures */
657 error = create_basic_memory_bitmaps();
658 if (error)
659 goto Thaw;
659 660
660 error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM); 661 error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
661 if (error || freezer_test_done) 662 if (error || freezer_test_done)
662 goto Thaw; 663 goto Free_bitmaps;
663 664
664 if (in_suspend) { 665 if (in_suspend) {
665 unsigned int flags = 0; 666 unsigned int flags = 0;
@@ -682,14 +683,14 @@ int hibernate(void)
682 pr_debug("PM: Image restored successfully.\n"); 683 pr_debug("PM: Image restored successfully.\n");
683 } 684 }
684 685
686 Free_bitmaps:
687 free_basic_memory_bitmaps();
685 Thaw: 688 Thaw:
689 unlock_device_hotplug();
686 thaw_processes(); 690 thaw_processes();
687 691
688 /* Don't bother checking whether freezer_test_done is true */ 692 /* Don't bother checking whether freezer_test_done is true */
689 freezer_test_done = false; 693 freezer_test_done = false;
690
691 Free_bitmaps:
692 free_basic_memory_bitmaps();
693 Exit: 694 Exit:
694 pm_notifier_call_chain(PM_POST_HIBERNATION); 695 pm_notifier_call_chain(PM_POST_HIBERNATION);
695 pm_restore_console(); 696 pm_restore_console();
@@ -806,21 +807,20 @@ static int software_resume(void)
806 pm_prepare_console(); 807 pm_prepare_console();
807 error = pm_notifier_call_chain(PM_RESTORE_PREPARE); 808 error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
808 if (error) 809 if (error)
809 goto close_finish; 810 goto Close_Finish;
810
811 error = create_basic_memory_bitmaps();
812 if (error)
813 goto close_finish;
814 811
815 pr_debug("PM: Preparing processes for restore.\n"); 812 pr_debug("PM: Preparing processes for restore.\n");
816 error = freeze_processes(); 813 error = freeze_processes();
817 if (error) { 814 if (error)
818 swsusp_close(FMODE_READ); 815 goto Close_Finish;
819 goto Done;
820 }
821 816
822 pr_debug("PM: Loading hibernation image.\n"); 817 pr_debug("PM: Loading hibernation image.\n");
823 818
819 lock_device_hotplug();
820 error = create_basic_memory_bitmaps();
821 if (error)
822 goto Thaw;
823
824 error = swsusp_read(&flags); 824 error = swsusp_read(&flags);
825 swsusp_close(FMODE_READ); 825 swsusp_close(FMODE_READ);
826 if (!error) 826 if (!error)
@@ -828,9 +828,10 @@ static int software_resume(void)
828 828
829 printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n"); 829 printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n");
830 swsusp_free(); 830 swsusp_free();
831 thaw_processes();
832 Done:
833 free_basic_memory_bitmaps(); 831 free_basic_memory_bitmaps();
832 Thaw:
833 unlock_device_hotplug();
834 thaw_processes();
834 Finish: 835 Finish:
835 pm_notifier_call_chain(PM_POST_RESTORE); 836 pm_notifier_call_chain(PM_POST_RESTORE);
836 pm_restore_console(); 837 pm_restore_console();
@@ -840,7 +841,7 @@ static int software_resume(void)
840 mutex_unlock(&pm_mutex); 841 mutex_unlock(&pm_mutex);
841 pr_debug("PM: Hibernation image not present or could not be loaded.\n"); 842 pr_debug("PM: Hibernation image not present or could not be loaded.\n");
842 return error; 843 return error;
843close_finish: 844 Close_Finish:
844 swsusp_close(FMODE_READ); 845 swsusp_close(FMODE_READ);
845 goto Finish; 846 goto Finish;
846} 847}
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 4ed81e74f86f..72e8f4fd616d 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -60,11 +60,6 @@ static int snapshot_open(struct inode *inode, struct file *filp)
60 error = -ENOSYS; 60 error = -ENOSYS;
61 goto Unlock; 61 goto Unlock;
62 } 62 }
63 if(create_basic_memory_bitmaps()) {
64 atomic_inc(&snapshot_device_available);
65 error = -ENOMEM;
66 goto Unlock;
67 }
68 nonseekable_open(inode, filp); 63 nonseekable_open(inode, filp);
69 data = &snapshot_state; 64 data = &snapshot_state;
70 filp->private_data = data; 65 filp->private_data = data;
@@ -90,10 +85,9 @@ static int snapshot_open(struct inode *inode, struct file *filp)
90 if (error) 85 if (error)
91 pm_notifier_call_chain(PM_POST_RESTORE); 86 pm_notifier_call_chain(PM_POST_RESTORE);
92 } 87 }
93 if (error) { 88 if (error)
94 free_basic_memory_bitmaps();
95 atomic_inc(&snapshot_device_available); 89 atomic_inc(&snapshot_device_available);
96 } 90
97 data->frozen = 0; 91 data->frozen = 0;
98 data->ready = 0; 92 data->ready = 0;
99 data->platform_support = 0; 93 data->platform_support = 0;
@@ -111,11 +105,11 @@ static int snapshot_release(struct inode *inode, struct file *filp)
111 lock_system_sleep(); 105 lock_system_sleep();
112 106
113 swsusp_free(); 107 swsusp_free();
114 free_basic_memory_bitmaps();
115 data = filp->private_data; 108 data = filp->private_data;
116 free_all_swap_pages(data->swap); 109 free_all_swap_pages(data->swap);
117 if (data->frozen) { 110 if (data->frozen) {
118 pm_restore_gfp_mask(); 111 pm_restore_gfp_mask();
112 free_basic_memory_bitmaps();
119 thaw_processes(); 113 thaw_processes();
120 } 114 }
121 pm_notifier_call_chain(data->mode == O_RDONLY ? 115 pm_notifier_call_chain(data->mode == O_RDONLY ?
@@ -207,6 +201,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
207 if (!mutex_trylock(&pm_mutex)) 201 if (!mutex_trylock(&pm_mutex))
208 return -EBUSY; 202 return -EBUSY;
209 203
204 lock_device_hotplug();
210 data = filp->private_data; 205 data = filp->private_data;
211 206
212 switch (cmd) { 207 switch (cmd) {
@@ -220,14 +215,22 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
220 printk("done.\n"); 215 printk("done.\n");
221 216
222 error = freeze_processes(); 217 error = freeze_processes();
223 if (!error) 218 if (error)
219 break;
220
221 error = create_basic_memory_bitmaps();
222 if (error)
223 thaw_processes();
224 else
224 data->frozen = 1; 225 data->frozen = 1;
226
225 break; 227 break;
226 228
227 case SNAPSHOT_UNFREEZE: 229 case SNAPSHOT_UNFREEZE:
228 if (!data->frozen || data->ready) 230 if (!data->frozen || data->ready)
229 break; 231 break;
230 pm_restore_gfp_mask(); 232 pm_restore_gfp_mask();
233 free_basic_memory_bitmaps();
231 thaw_processes(); 234 thaw_processes();
232 data->frozen = 0; 235 data->frozen = 0;
233 break; 236 break;
@@ -371,6 +374,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
371 374
372 } 375 }
373 376
377 unlock_device_hotplug();
374 mutex_unlock(&pm_mutex); 378 mutex_unlock(&pm_mutex);
375 379
376 return error; 380 return error;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 0eb1a1df649d..ed85fe3870e2 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -52,14 +52,10 @@ DEFINE_MUTEX(mem_hotplug_mutex);
52void lock_memory_hotplug(void) 52void lock_memory_hotplug(void)
53{ 53{
54 mutex_lock(&mem_hotplug_mutex); 54 mutex_lock(&mem_hotplug_mutex);
55
56 /* for exclusive hibernation if CONFIG_HIBERNATION=y */
57 lock_system_sleep();
58} 55}
59 56
60void unlock_memory_hotplug(void) 57void unlock_memory_hotplug(void)
61{ 58{
62 unlock_system_sleep();
63 mutex_unlock(&mem_hotplug_mutex); 59 mutex_unlock(&mem_hotplug_mutex);
64} 60}
65 61