aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-03-08 12:17:27 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-03-08 12:17:27 -0500
commitabfba60c7a81b12236a07da4cbaf75a0e30040cd (patch)
treea68e5c437b82de235e40ec6eb40885392fa08d64
parentb01d4e68933ec23e43b1046fa35d593cefcf37d1 (diff)
parent19bc45a59caf0ed2c2576da4d89c0ef8a8be1f63 (diff)
Merge tag 'pm+acpi-3.14-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull ACPI and power management fixes from Rafael Wysocki: - ACPI tables in some BIOSes list device resources with size equal to 0, which doesn't make sense, so we should ignore them, but instead we try to use them and mangle things completely. Fix from Zhang Rui. - Several models of Samsung laptops accumulate EC events when they are in sleep states which leads to EC buffer overflows that prevent new events from being signaled after system resume or reboot. This has been affecting many users for quite a while and may be addressed by clearing the EC buffer during system resume and system startup on those machines. From Kieran Clancy. - If the ACPI sleep control and status registers are not present (which happens if the Hardware Reduced ACPI mode bit is set in the ACPI tables, but also may result from BIOS bugs), we should not try to use ACPI to power off the system and ACPI S5 should not be listed as supported. Fix from Aubrey Li. - There's a race condition in cpufreq_get() that leads to a kernel crash if that function is called at a wrong time. Fix from Aaron Plattner. - cpufreq policy objects have to be initialized entirely before they are first accessed by their users which isn't the case currently and that potentially leads to various kinds of breakage that is difficult to debug. Fix from Viresh Kumar. - Locking is missing in __cpufreq_add_dev() which leads to a race condition that may trigger a kernel crash. Fix from Viresh Kumar. * tag 'pm+acpi-3.14-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: ACPI / EC: Clear stale EC events on Samsung systems cpufreq: Initialize governor for a new policy under policy->rwsem cpufreq: Initialize policy before making it available for others to use cpufreq: use cpufreq_cpu_get() to avoid cpufreq_get() race conditions ACPI / sleep: pm_power_off needs more sanity checks to be installed ACPI / resources: ignore invalid ACPI device resources
-rw-r--r--drivers/acpi/ec.c64
-rw-r--r--drivers/acpi/resource.c10
-rw-r--r--drivers/acpi/sleep.c7
-rw-r--r--drivers/cpufreq/cpufreq.c51
4 files changed, 103 insertions, 29 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 959d41acc108..d7d32c28829b 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -67,6 +67,8 @@ enum ec_command {
67#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ 67#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
68#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ 68#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
69#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ 69#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
70#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
71 * when trying to clear the EC */
70 72
71enum { 73enum {
72 EC_FLAGS_QUERY_PENDING, /* Query is pending */ 74 EC_FLAGS_QUERY_PENDING, /* Query is pending */
@@ -116,6 +118,7 @@ EXPORT_SYMBOL(first_ec);
116static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ 118static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
117static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ 119static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
118static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ 120static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
121static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
119 122
120/* -------------------------------------------------------------------------- 123/* --------------------------------------------------------------------------
121 Transaction Management 124 Transaction Management
@@ -440,6 +443,29 @@ acpi_handle ec_get_handle(void)
440 443
441EXPORT_SYMBOL(ec_get_handle); 444EXPORT_SYMBOL(ec_get_handle);
442 445
446static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
447
448/*
449 * Clears stale _Q events that might have accumulated in the EC.
450 * Run with locked ec mutex.
451 */
452static void acpi_ec_clear(struct acpi_ec *ec)
453{
454 int i, status;
455 u8 value = 0;
456
457 for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
458 status = acpi_ec_query_unlocked(ec, &value);
459 if (status || !value)
460 break;
461 }
462
463 if (unlikely(i == ACPI_EC_CLEAR_MAX))
464 pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
465 else
466 pr_info("%d stale EC events cleared\n", i);
467}
468
443void acpi_ec_block_transactions(void) 469void acpi_ec_block_transactions(void)
444{ 470{
445 struct acpi_ec *ec = first_ec; 471 struct acpi_ec *ec = first_ec;
@@ -463,6 +489,10 @@ void acpi_ec_unblock_transactions(void)
463 mutex_lock(&ec->mutex); 489 mutex_lock(&ec->mutex);
464 /* Allow transactions to be carried out again */ 490 /* Allow transactions to be carried out again */
465 clear_bit(EC_FLAGS_BLOCKED, &ec->flags); 491 clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
492
493 if (EC_FLAGS_CLEAR_ON_RESUME)
494 acpi_ec_clear(ec);
495
466 mutex_unlock(&ec->mutex); 496 mutex_unlock(&ec->mutex);
467} 497}
468 498
@@ -821,6 +851,13 @@ static int acpi_ec_add(struct acpi_device *device)
821 851
822 /* EC is fully operational, allow queries */ 852 /* EC is fully operational, allow queries */
823 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); 853 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
854
855 /* Clear stale _Q events if hardware might require that */
856 if (EC_FLAGS_CLEAR_ON_RESUME) {
857 mutex_lock(&ec->mutex);
858 acpi_ec_clear(ec);
859 mutex_unlock(&ec->mutex);
860 }
824 return ret; 861 return ret;
825} 862}
826 863
@@ -922,6 +959,30 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
922 return 0; 959 return 0;
923} 960}
924 961
962/*
963 * On some hardware it is necessary to clear events accumulated by the EC during
964 * sleep. These ECs stop reporting GPEs until they are manually polled, if too
965 * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
966 *
967 * https://bugzilla.kernel.org/show_bug.cgi?id=44161
968 *
969 * Ideally, the EC should also be instructed NOT to accumulate events during
970 * sleep (which Windows seems to do somehow), but the interface to control this
971 * behaviour is not known at this time.
972 *
973 * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
974 * however it is very likely that other Samsung models are affected.
975 *
976 * On systems which don't accumulate _Q events during sleep, this extra check
977 * should be harmless.
978 */
979static int ec_clear_on_resume(const struct dmi_system_id *id)
980{
981 pr_debug("Detected system needing EC poll on resume.\n");
982 EC_FLAGS_CLEAR_ON_RESUME = 1;
983 return 0;
984}
985
925static struct dmi_system_id ec_dmi_table[] __initdata = { 986static struct dmi_system_id ec_dmi_table[] __initdata = {
926 { 987 {
927 ec_skip_dsdt_scan, "Compal JFL92", { 988 ec_skip_dsdt_scan, "Compal JFL92", {
@@ -965,6 +1026,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
965 ec_validate_ecdt, "ASUS hardware", { 1026 ec_validate_ecdt, "ASUS hardware", {
966 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."), 1027 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
967 DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL}, 1028 DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
1029 {
1030 ec_clear_on_resume, "Samsung hardware", {
1031 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
968 {}, 1032 {},
969}; 1033};
970 1034
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index b7201fc6f1e1..0bdacc5e26a3 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -77,18 +77,24 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
77 switch (ares->type) { 77 switch (ares->type) {
78 case ACPI_RESOURCE_TYPE_MEMORY24: 78 case ACPI_RESOURCE_TYPE_MEMORY24:
79 memory24 = &ares->data.memory24; 79 memory24 = &ares->data.memory24;
80 if (!memory24->address_length)
81 return false;
80 acpi_dev_get_memresource(res, memory24->minimum, 82 acpi_dev_get_memresource(res, memory24->minimum,
81 memory24->address_length, 83 memory24->address_length,
82 memory24->write_protect); 84 memory24->write_protect);
83 break; 85 break;
84 case ACPI_RESOURCE_TYPE_MEMORY32: 86 case ACPI_RESOURCE_TYPE_MEMORY32:
85 memory32 = &ares->data.memory32; 87 memory32 = &ares->data.memory32;
88 if (!memory32->address_length)
89 return false;
86 acpi_dev_get_memresource(res, memory32->minimum, 90 acpi_dev_get_memresource(res, memory32->minimum,
87 memory32->address_length, 91 memory32->address_length,
88 memory32->write_protect); 92 memory32->write_protect);
89 break; 93 break;
90 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: 94 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
91 fixed_memory32 = &ares->data.fixed_memory32; 95 fixed_memory32 = &ares->data.fixed_memory32;
96 if (!fixed_memory32->address_length)
97 return false;
92 acpi_dev_get_memresource(res, fixed_memory32->address, 98 acpi_dev_get_memresource(res, fixed_memory32->address,
93 fixed_memory32->address_length, 99 fixed_memory32->address_length,
94 fixed_memory32->write_protect); 100 fixed_memory32->write_protect);
@@ -144,12 +150,16 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
144 switch (ares->type) { 150 switch (ares->type) {
145 case ACPI_RESOURCE_TYPE_IO: 151 case ACPI_RESOURCE_TYPE_IO:
146 io = &ares->data.io; 152 io = &ares->data.io;
153 if (!io->address_length)
154 return false;
147 acpi_dev_get_ioresource(res, io->minimum, 155 acpi_dev_get_ioresource(res, io->minimum,
148 io->address_length, 156 io->address_length,
149 io->io_decode); 157 io->io_decode);
150 break; 158 break;
151 case ACPI_RESOURCE_TYPE_FIXED_IO: 159 case ACPI_RESOURCE_TYPE_FIXED_IO:
152 fixed_io = &ares->data.fixed_io; 160 fixed_io = &ares->data.fixed_io;
161 if (!fixed_io->address_length)
162 return false;
153 acpi_dev_get_ioresource(res, fixed_io->address, 163 acpi_dev_get_ioresource(res, fixed_io->address,
154 fixed_io->address_length, 164 fixed_io->address_length,
155 ACPI_DECODE_10); 165 ACPI_DECODE_10);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index b718806657cd..b0f6c4a2a119 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -807,7 +807,12 @@ int __init acpi_sleep_init(void)
807 acpi_sleep_hibernate_setup(); 807 acpi_sleep_hibernate_setup();
808 808
809 status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b); 809 status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
810 if (ACPI_SUCCESS(status)) { 810 /*
811 * Check both ACPI S5 object and ACPI sleep registers to
812 * install pm_power_off_prepare/pm_power_off hook
813 */
814 if (ACPI_SUCCESS(status) && acpi_gbl_FADT.sleep_control.address
815 && acpi_gbl_FADT.sleep_status.address) {
811 sleep_states[ACPI_STATE_S5] = 1; 816 sleep_states[ACPI_STATE_S5] = 1;
812 pm_power_off_prepare = acpi_power_off_prepare; 817 pm_power_off_prepare = acpi_power_off_prepare;
813 pm_power_off = acpi_power_off; 818 pm_power_off = acpi_power_off;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index cb003a6b72c8..cf485d928903 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1109,6 +1109,21 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1109 goto err_set_policy_cpu; 1109 goto err_set_policy_cpu;
1110 } 1110 }
1111 1111
1112 /* related cpus should atleast have policy->cpus */
1113 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1114
1115 /*
1116 * affected cpus must always be the one, which are online. We aren't
1117 * managing offline cpus here.
1118 */
1119 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1120
1121 if (!frozen) {
1122 policy->user_policy.min = policy->min;
1123 policy->user_policy.max = policy->max;
1124 }
1125
1126 down_write(&policy->rwsem);
1112 write_lock_irqsave(&cpufreq_driver_lock, flags); 1127 write_lock_irqsave(&cpufreq_driver_lock, flags);
1113 for_each_cpu(j, policy->cpus) 1128 for_each_cpu(j, policy->cpus)
1114 per_cpu(cpufreq_cpu_data, j) = policy; 1129 per_cpu(cpufreq_cpu_data, j) = policy;
@@ -1162,20 +1177,6 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1162 } 1177 }
1163 } 1178 }
1164 1179
1165 /* related cpus should atleast have policy->cpus */
1166 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1167
1168 /*
1169 * affected cpus must always be the one, which are online. We aren't
1170 * managing offline cpus here.
1171 */
1172 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1173
1174 if (!frozen) {
1175 policy->user_policy.min = policy->min;
1176 policy->user_policy.max = policy->max;
1177 }
1178
1179 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1180 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1180 CPUFREQ_START, policy); 1181 CPUFREQ_START, policy);
1181 1182
@@ -1206,6 +1207,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1206 policy->user_policy.policy = policy->policy; 1207 policy->user_policy.policy = policy->policy;
1207 policy->user_policy.governor = policy->governor; 1208 policy->user_policy.governor = policy->governor;
1208 } 1209 }
1210 up_write(&policy->rwsem);
1209 1211
1210 kobject_uevent(&policy->kobj, KOBJ_ADD); 1212 kobject_uevent(&policy->kobj, KOBJ_ADD);
1211 up_read(&cpufreq_rwsem); 1213 up_read(&cpufreq_rwsem);
@@ -1546,23 +1548,16 @@ static unsigned int __cpufreq_get(unsigned int cpu)
1546 */ 1548 */
1547unsigned int cpufreq_get(unsigned int cpu) 1549unsigned int cpufreq_get(unsigned int cpu)
1548{ 1550{
1549 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1551 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1550 unsigned int ret_freq = 0; 1552 unsigned int ret_freq = 0;
1551 1553
1552 if (cpufreq_disabled() || !cpufreq_driver) 1554 if (policy) {
1553 return -ENOENT; 1555 down_read(&policy->rwsem);
1554 1556 ret_freq = __cpufreq_get(cpu);
1555 BUG_ON(!policy); 1557 up_read(&policy->rwsem);
1556
1557 if (!down_read_trylock(&cpufreq_rwsem))
1558 return 0;
1559
1560 down_read(&policy->rwsem);
1561
1562 ret_freq = __cpufreq_get(cpu);
1563 1558
1564 up_read(&policy->rwsem); 1559 cpufreq_cpu_put(policy);
1565 up_read(&cpufreq_rwsem); 1560 }
1566 1561
1567 return ret_freq; 1562 return ret_freq;
1568} 1563}