summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/acpi/cppc_sysfs.txt69
-rw-r--r--drivers/acpi/ac.c48
-rw-r--r--drivers/acpi/battery.c81
-rw-r--r--drivers/acpi/cppc_acpi.c159
-rw-r--r--drivers/acpi/reboot.c4
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c80
-rw-r--r--drivers/mailbox/pcc.c81
-rw-r--r--include/acpi/cppc_acpi.h14
-rw-r--r--include/linux/acpi.h1
9 files changed, 400 insertions, 137 deletions
diff --git a/Documentation/acpi/cppc_sysfs.txt b/Documentation/acpi/cppc_sysfs.txt
new file mode 100644
index 000000000000..f20fb445135d
--- /dev/null
+++ b/Documentation/acpi/cppc_sysfs.txt
@@ -0,0 +1,69 @@
1
2 Collaborative Processor Performance Control (CPPC)
3
4CPPC defined in the ACPI spec describes a mechanism for the OS to manage the
5performance of a logical processor on a contigious and abstract performance
6scale. CPPC exposes a set of registers to describe abstract performance scale,
7to request performance levels and to measure per-cpu delivered performance.
8
9For more details on CPPC please refer to the ACPI specification at:
10
11http://uefi.org/specifications
12
13Some of the CPPC registers are exposed via sysfs under:
14
15/sys/devices/system/cpu/cpuX/acpi_cppc/
16
17for each cpu X
18
19--------------------------------------------------------------------------------
20
21$ ls -lR /sys/devices/system/cpu/cpu0/acpi_cppc/
22/sys/devices/system/cpu/cpu0/acpi_cppc/:
23total 0
24-r--r--r-- 1 root root 65536 Mar 5 19:38 feedback_ctrs
25-r--r--r-- 1 root root 65536 Mar 5 19:38 highest_perf
26-r--r--r-- 1 root root 65536 Mar 5 19:38 lowest_freq
27-r--r--r-- 1 root root 65536 Mar 5 19:38 lowest_nonlinear_perf
28-r--r--r-- 1 root root 65536 Mar 5 19:38 lowest_perf
29-r--r--r-- 1 root root 65536 Mar 5 19:38 nominal_freq
30-r--r--r-- 1 root root 65536 Mar 5 19:38 nominal_perf
31-r--r--r-- 1 root root 65536 Mar 5 19:38 reference_perf
32-r--r--r-- 1 root root 65536 Mar 5 19:38 wraparound_time
33
34--------------------------------------------------------------------------------
35
36* highest_perf : Highest performance of this processor (abstract scale).
37* nominal_perf : Highest sustained performance of this processor (abstract scale).
38* lowest_nonlinear_perf : Lowest performance of this processor with nonlinear
39 power savings (abstract scale).
40* lowest_perf : Lowest performance of this processor (abstract scale).
41
42* lowest_freq : CPU frequency corresponding to lowest_perf (in MHz).
43* nominal_freq : CPU frequency corresponding to nominal_perf (in MHz).
44 The above frequencies should only be used to report processor performance in
45 freqency instead of abstract scale. These values should not be used for any
46 functional decisions.
47
48* feedback_ctrs : Includes both Reference and delivered performance counter.
49 Reference counter ticks up proportional to processor's reference performance.
50 Delivered counter ticks up proportional to processor's delivered performance.
51* wraparound_time: Minimum time for the feedback counters to wraparound (seconds).
52* reference_perf : Performance level at which reference performance counter
53 accumulates (abstract scale).
54
55--------------------------------------------------------------------------------
56
57 Computing Average Delivered Performance
58
59Below describes the steps to compute the average performance delivered by taking
60two different snapshots of feedback counters at time T1 and T2.
61
62T1: Read feedback_ctrs as fbc_t1
63 Wait or run some workload
64T2: Read feedback_ctrs as fbc_t2
65
66delivered_counter_delta = fbc_t2[del] - fbc_t1[del]
67reference_counter_delta = fbc_t2[ref] - fbc_t1[ref]
68
69delivered_perf = (refernce_perf x delivered_counter_delta) / reference_counter_delta
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 2d8de2f8c1ed..b823a86c166d 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -87,6 +87,7 @@ static int acpi_ac_open_fs(struct inode *inode, struct file *file);
87 87
88 88
89static int ac_sleep_before_get_state_ms; 89static int ac_sleep_before_get_state_ms;
90static int ac_check_pmic = 1;
90 91
91static struct acpi_driver acpi_ac_driver = { 92static struct acpi_driver acpi_ac_driver = {
92 .name = "ac", 93 .name = "ac",
@@ -310,21 +311,43 @@ static int acpi_ac_battery_notify(struct notifier_block *nb,
310 return NOTIFY_OK; 311 return NOTIFY_OK;
311} 312}
312 313
313static int thinkpad_e530_quirk(const struct dmi_system_id *d) 314static int __init thinkpad_e530_quirk(const struct dmi_system_id *d)
314{ 315{
315 ac_sleep_before_get_state_ms = 1000; 316 ac_sleep_before_get_state_ms = 1000;
316 return 0; 317 return 0;
317} 318}
318 319
319static const struct dmi_system_id ac_dmi_table[] = { 320static int __init ac_do_not_check_pmic_quirk(const struct dmi_system_id *d)
321{
322 ac_check_pmic = 0;
323 return 0;
324}
325
326static const struct dmi_system_id ac_dmi_table[] __initconst = {
320 { 327 {
328 /* Thinkpad e530 */
321 .callback = thinkpad_e530_quirk, 329 .callback = thinkpad_e530_quirk,
322 .ident = "thinkpad e530",
323 .matches = { 330 .matches = {
324 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 331 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
325 DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"), 332 DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"),
326 }, 333 },
327 }, 334 },
335 {
336 /* ECS EF20EA */
337 .callback = ac_do_not_check_pmic_quirk,
338 .matches = {
339 DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"),
340 },
341 },
342 {
343 /* Lenovo Ideapad Miix 320 */
344 .callback = ac_do_not_check_pmic_quirk,
345 .matches = {
346 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
347 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "80XF"),
348 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
349 },
350 },
328 {}, 351 {},
329}; 352};
330 353
@@ -384,7 +407,6 @@ end:
384 kfree(ac); 407 kfree(ac);
385 } 408 }
386 409
387 dmi_check_system(ac_dmi_table);
388 return result; 410 return result;
389} 411}
390 412
@@ -442,13 +464,17 @@ static int __init acpi_ac_init(void)
442 if (acpi_disabled) 464 if (acpi_disabled)
443 return -ENODEV; 465 return -ENODEV;
444 466
445 for (i = 0; i < ARRAY_SIZE(acpi_ac_blacklist); i++) 467 dmi_check_system(ac_dmi_table);
446 if (acpi_dev_present(acpi_ac_blacklist[i].hid, "1", 468
447 acpi_ac_blacklist[i].hrv)) { 469 if (ac_check_pmic) {
448 pr_info(PREFIX "AC: found native %s PMIC, not loading\n", 470 for (i = 0; i < ARRAY_SIZE(acpi_ac_blacklist); i++)
449 acpi_ac_blacklist[i].hid); 471 if (acpi_dev_present(acpi_ac_blacklist[i].hid, "1",
450 return -ENODEV; 472 acpi_ac_blacklist[i].hrv)) {
451 } 473 pr_info(PREFIX "AC: found native %s PMIC, not loading\n",
474 acpi_ac_blacklist[i].hid);
475 return -ENODEV;
476 }
477 }
452 478
453#ifdef CONFIG_ACPI_PROCFS_POWER 479#ifdef CONFIG_ACPI_PROCFS_POWER
454 acpi_ac_dir = acpi_lock_ac_dir(); 480 acpi_ac_dir = acpi_lock_ac_dir();
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index bdb24d636d9a..572845fafb00 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -74,6 +74,8 @@ static async_cookie_t async_cookie;
74static bool battery_driver_registered; 74static bool battery_driver_registered;
75static int battery_bix_broken_package; 75static int battery_bix_broken_package;
76static int battery_notification_delay_ms; 76static int battery_notification_delay_ms;
77static int battery_ac_is_broken;
78static int battery_check_pmic = 1;
77static unsigned int cache_time = 1000; 79static unsigned int cache_time = 1000;
78module_param(cache_time, uint, 0644); 80module_param(cache_time, uint, 0644);
79MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); 81MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -215,6 +217,20 @@ static bool acpi_battery_is_degraded(struct acpi_battery *battery)
215 battery->full_charge_capacity < battery->design_capacity; 217 battery->full_charge_capacity < battery->design_capacity;
216} 218}
217 219
220static int acpi_battery_handle_discharging(struct acpi_battery *battery)
221{
222 /*
223 * Some devices wrongly report discharging if the battery's charge level
224 * was above the device's start charging threshold atm the AC adapter
225 * was plugged in and the device thus did not start a new charge cycle.
226 */
227 if ((battery_ac_is_broken || power_supply_is_system_supplied()) &&
228 battery->rate_now == 0)
229 return POWER_SUPPLY_STATUS_NOT_CHARGING;
230
231 return POWER_SUPPLY_STATUS_DISCHARGING;
232}
233
218static int acpi_battery_get_property(struct power_supply *psy, 234static int acpi_battery_get_property(struct power_supply *psy,
219 enum power_supply_property psp, 235 enum power_supply_property psp,
220 union power_supply_propval *val) 236 union power_supply_propval *val)
@@ -230,7 +246,7 @@ static int acpi_battery_get_property(struct power_supply *psy,
230 switch (psp) { 246 switch (psp) {
231 case POWER_SUPPLY_PROP_STATUS: 247 case POWER_SUPPLY_PROP_STATUS:
232 if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) 248 if (battery->state & ACPI_BATTERY_STATE_DISCHARGING)
233 val->intval = POWER_SUPPLY_STATUS_DISCHARGING; 249 val->intval = acpi_battery_handle_discharging(battery);
234 else if (battery->state & ACPI_BATTERY_STATE_CHARGING) 250 else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
235 val->intval = POWER_SUPPLY_STATUS_CHARGING; 251 val->intval = POWER_SUPPLY_STATUS_CHARGING;
236 else if (acpi_battery_is_charged(battery)) 252 else if (acpi_battery_is_charged(battery))
@@ -1332,23 +1348,64 @@ battery_notification_delay_quirk(const struct dmi_system_id *d)
1332 return 0; 1348 return 0;
1333} 1349}
1334 1350
1351static int __init
1352battery_ac_is_broken_quirk(const struct dmi_system_id *d)
1353{
1354 battery_ac_is_broken = 1;
1355 return 0;
1356}
1357
1358static int __init
1359battery_do_not_check_pmic_quirk(const struct dmi_system_id *d)
1360{
1361 battery_check_pmic = 0;
1362 return 0;
1363}
1364
1335static const struct dmi_system_id bat_dmi_table[] __initconst = { 1365static const struct dmi_system_id bat_dmi_table[] __initconst = {
1336 { 1366 {
1367 /* NEC LZ750/LS */
1337 .callback = battery_bix_broken_package_quirk, 1368 .callback = battery_bix_broken_package_quirk,
1338 .ident = "NEC LZ750/LS",
1339 .matches = { 1369 .matches = {
1340 DMI_MATCH(DMI_SYS_VENDOR, "NEC"), 1370 DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
1341 DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"), 1371 DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"),
1342 }, 1372 },
1343 }, 1373 },
1344 { 1374 {
1375 /* Acer Aspire V5-573G */
1345 .callback = battery_notification_delay_quirk, 1376 .callback = battery_notification_delay_quirk,
1346 .ident = "Acer Aspire V5-573G",
1347 .matches = { 1377 .matches = {
1348 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 1378 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1349 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"), 1379 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
1350 }, 1380 },
1351 }, 1381 },
1382 {
1383 /* Point of View mobii wintab p800w */
1384 .callback = battery_ac_is_broken_quirk,
1385 .matches = {
1386 DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
1387 DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
1388 DMI_MATCH(DMI_BIOS_VERSION, "3BAIR1013"),
1389 /* Above matches are too generic, add bios-date match */
1390 DMI_MATCH(DMI_BIOS_DATE, "08/22/2014"),
1391 },
1392 },
1393 {
1394 /* ECS EF20EA */
1395 .callback = battery_do_not_check_pmic_quirk,
1396 .matches = {
1397 DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"),
1398 },
1399 },
1400 {
1401 /* Lenovo Ideapad Miix 320 */
1402 .callback = battery_do_not_check_pmic_quirk,
1403 .matches = {
1404 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1405 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "80XF"),
1406 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
1407 },
1408 },
1352 {}, 1409 {},
1353}; 1410};
1354 1411
@@ -1488,16 +1545,18 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
1488 unsigned int i; 1545 unsigned int i;
1489 int result; 1546 int result;
1490 1547
1491 for (i = 0; i < ARRAY_SIZE(acpi_battery_blacklist); i++)
1492 if (acpi_dev_present(acpi_battery_blacklist[i], "1", -1)) {
1493 pr_info(PREFIX ACPI_BATTERY_DEVICE_NAME
1494 ": found native %s PMIC, not loading\n",
1495 acpi_battery_blacklist[i]);
1496 return;
1497 }
1498
1499 dmi_check_system(bat_dmi_table); 1548 dmi_check_system(bat_dmi_table);
1500 1549
1550 if (battery_check_pmic) {
1551 for (i = 0; i < ARRAY_SIZE(acpi_battery_blacklist); i++)
1552 if (acpi_dev_present(acpi_battery_blacklist[i], "1", -1)) {
1553 pr_info(PREFIX ACPI_BATTERY_DEVICE_NAME
1554 ": found native %s PMIC, not loading\n",
1555 acpi_battery_blacklist[i]);
1556 return;
1557 }
1558 }
1559
1501#ifdef CONFIG_ACPI_PROCFS_POWER 1560#ifdef CONFIG_ACPI_PROCFS_POWER
1502 acpi_battery_dir = acpi_lock_battery_dir(); 1561 acpi_battery_dir = acpi_lock_battery_dir();
1503 if (!acpi_battery_dir) 1562 if (!acpi_battery_dir)
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 735c74a4cbdb..d9ce4b162e2c 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -39,6 +39,7 @@
39 39
40#include <linux/cpufreq.h> 40#include <linux/cpufreq.h>
41#include <linux/delay.h> 41#include <linux/delay.h>
42#include <linux/iopoll.h>
42#include <linux/ktime.h> 43#include <linux/ktime.h>
43#include <linux/rwsem.h> 44#include <linux/rwsem.h>
44#include <linux/wait.h> 45#include <linux/wait.h>
@@ -49,7 +50,7 @@ struct cppc_pcc_data {
49 struct mbox_chan *pcc_channel; 50 struct mbox_chan *pcc_channel;
50 void __iomem *pcc_comm_addr; 51 void __iomem *pcc_comm_addr;
51 bool pcc_channel_acquired; 52 bool pcc_channel_acquired;
52 ktime_t deadline; 53 unsigned int deadline_us;
53 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal; 54 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
54 55
55 bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */ 56 bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */
@@ -156,6 +157,9 @@ show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
156show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf); 157show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
157show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf); 158show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
158show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf); 159show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
160show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
161show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
162
159show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf); 163show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
160show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time); 164show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
161 165
@@ -183,6 +187,8 @@ static struct attribute *cppc_attrs[] = {
183 &lowest_perf.attr, 187 &lowest_perf.attr,
184 &lowest_nonlinear_perf.attr, 188 &lowest_nonlinear_perf.attr,
185 &nominal_perf.attr, 189 &nominal_perf.attr,
190 &nominal_freq.attr,
191 &lowest_freq.attr,
186 NULL 192 NULL
187}; 193};
188 194
@@ -193,42 +199,31 @@ static struct kobj_type cppc_ktype = {
193 199
194static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit) 200static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
195{ 201{
196 int ret = -EIO, status = 0; 202 int ret, status;
197 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; 203 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
198 struct acpi_pcct_shared_memory __iomem *generic_comm_base = 204 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
199 pcc_ss_data->pcc_comm_addr; 205 pcc_ss_data->pcc_comm_addr;
200 ktime_t next_deadline = ktime_add(ktime_get(),
201 pcc_ss_data->deadline);
202 206
203 if (!pcc_ss_data->platform_owns_pcc) 207 if (!pcc_ss_data->platform_owns_pcc)
204 return 0; 208 return 0;
205 209
206 /* Retry in case the remote processor was too slow to catch up. */ 210 /*
207 while (!ktime_after(ktime_get(), next_deadline)) { 211 * Poll PCC status register every 3us(delay_us) for maximum of
208 /* 212 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
209 * Per spec, prior to boot the PCC space wil be initialized by 213 */
210 * platform and should have set the command completion bit when 214 ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
211 * PCC can be used by OSPM 215 status & PCC_CMD_COMPLETE_MASK, 3,
212 */ 216 pcc_ss_data->deadline_us);
213 status = readw_relaxed(&generic_comm_base->status);
214 if (status & PCC_CMD_COMPLETE_MASK) {
215 ret = 0;
216 if (chk_err_bit && (status & PCC_ERROR_MASK))
217 ret = -EIO;
218 break;
219 }
220 /*
221 * Reducing the bus traffic in case this loop takes longer than
222 * a few retries.
223 */
224 udelay(3);
225 }
226 217
227 if (likely(!ret)) 218 if (likely(!ret)) {
228 pcc_ss_data->platform_owns_pcc = false; 219 pcc_ss_data->platform_owns_pcc = false;
229 else 220 if (chk_err_bit && (status & PCC_ERROR_MASK))
230 pr_err("PCC check channel failed for ss: %d. Status=%x\n", 221 ret = -EIO;
231 pcc_ss_id, status); 222 }
223
224 if (unlikely(ret))
225 pr_err("PCC check channel failed for ss: %d. ret=%d\n",
226 pcc_ss_id, ret);
232 227
233 return ret; 228 return ret;
234} 229}
@@ -580,7 +575,7 @@ static int register_pcc_channel(int pcc_ss_idx)
580 * So add an arbitrary amount of wait on top of Nominal. 575 * So add an arbitrary amount of wait on top of Nominal.
581 */ 576 */
582 usecs_lat = NUM_RETRIES * cppc_ss->latency; 577 usecs_lat = NUM_RETRIES * cppc_ss->latency;
583 pcc_data[pcc_ss_idx]->deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC); 578 pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
584 pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time; 579 pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
585 pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate; 580 pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
586 pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency; 581 pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
@@ -613,7 +608,6 @@ bool __weak cpc_ffh_supported(void)
613 return false; 608 return false;
614} 609}
615 610
616
617/** 611/**
618 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace 612 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
619 * 613 *
@@ -641,6 +635,34 @@ int pcc_data_alloc(int pcc_ss_id)
641 635
642 return 0; 636 return 0;
643} 637}
638
639/* Check if CPPC revision + num_ent combination is supported */
640static bool is_cppc_supported(int revision, int num_ent)
641{
642 int expected_num_ent;
643
644 switch (revision) {
645 case CPPC_V2_REV:
646 expected_num_ent = CPPC_V2_NUM_ENT;
647 break;
648 case CPPC_V3_REV:
649 expected_num_ent = CPPC_V3_NUM_ENT;
650 break;
651 default:
652 pr_debug("Firmware exports unsupported CPPC revision: %d\n",
653 revision);
654 return false;
655 }
656
657 if (expected_num_ent != num_ent) {
658 pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
659 num_ent, expected_num_ent, revision);
660 return false;
661 }
662
663 return true;
664}
665
644/* 666/*
645 * An example CPC table looks like the following. 667 * An example CPC table looks like the following.
646 * 668 *
@@ -731,14 +753,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
731 cpc_obj->type); 753 cpc_obj->type);
732 goto out_free; 754 goto out_free;
733 } 755 }
734
735 /* Only support CPPCv2. Bail otherwise. */
736 if (num_ent != CPPC_NUM_ENT) {
737 pr_debug("Firmware exports %d entries. Expected: %d\n",
738 num_ent, CPPC_NUM_ENT);
739 goto out_free;
740 }
741
742 cpc_ptr->num_entries = num_ent; 756 cpc_ptr->num_entries = num_ent;
743 757
744 /* Second entry should be revision. */ 758 /* Second entry should be revision. */
@@ -750,12 +764,10 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
750 cpc_obj->type); 764 cpc_obj->type);
751 goto out_free; 765 goto out_free;
752 } 766 }
767 cpc_ptr->version = cpc_rev;
753 768
754 if (cpc_rev != CPPC_REV) { 769 if (!is_cppc_supported(cpc_rev, num_ent))
755 pr_debug("Firmware exports revision:%d. Expected:%d\n",
756 cpc_rev, CPPC_REV);
757 goto out_free; 770 goto out_free;
758 }
759 771
760 /* Iterate through remaining entries in _CPC */ 772 /* Iterate through remaining entries in _CPC */
761 for (i = 2; i < num_ent; i++) { 773 for (i = 2; i < num_ent; i++) {
@@ -808,6 +820,18 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
808 } 820 }
809 } 821 }
810 per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id; 822 per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
823
824 /*
825 * Initialize the remaining cpc_regs as unsupported.
826 * Example: In case FW exposes CPPC v2, the below loop will initialize
827 * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
828 */
829 for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
830 cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
831 cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
832 }
833
834
811 /* Store CPU Logical ID */ 835 /* Store CPU Logical ID */
812 cpc_ptr->cpu_id = pr->id; 836 cpc_ptr->cpu_id = pr->id;
813 837
@@ -1037,26 +1061,34 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1037{ 1061{
1038 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); 1062 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1039 struct cpc_register_resource *highest_reg, *lowest_reg, 1063 struct cpc_register_resource *highest_reg, *lowest_reg,
1040 *lowest_non_linear_reg, *nominal_reg; 1064 *lowest_non_linear_reg, *nominal_reg,
1041 u64 high, low, nom, min_nonlinear; 1065 *low_freq_reg = NULL, *nom_freq_reg = NULL;
1066 u64 high, low, nom, min_nonlinear, low_f = 0, nom_f = 0;
1042 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); 1067 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1043 struct cppc_pcc_data *pcc_ss_data; 1068 struct cppc_pcc_data *pcc_ss_data = NULL;
1044 int ret = 0, regs_in_pcc = 0; 1069 int ret = 0, regs_in_pcc = 0;
1045 1070
1046 if (!cpc_desc || pcc_ss_id < 0) { 1071 if (!cpc_desc) {
1047 pr_debug("No CPC descriptor for CPU:%d\n", cpunum); 1072 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1048 return -ENODEV; 1073 return -ENODEV;
1049 } 1074 }
1050 1075
1051 pcc_ss_data = pcc_data[pcc_ss_id];
1052 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF]; 1076 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1053 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF]; 1077 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1054 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF]; 1078 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1055 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; 1079 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1080 low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1081 nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1056 1082
1057 /* Are any of the regs PCC ?*/ 1083 /* Are any of the regs PCC ?*/
1058 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) || 1084 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1059 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg)) { 1085 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1086 CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1087 if (pcc_ss_id < 0) {
1088 pr_debug("Invalid pcc_ss_id\n");
1089 return -ENODEV;
1090 }
1091 pcc_ss_data = pcc_data[pcc_ss_id];
1060 regs_in_pcc = 1; 1092 regs_in_pcc = 1;
1061 down_write(&pcc_ss_data->pcc_lock); 1093 down_write(&pcc_ss_data->pcc_lock);
1062 /* Ring doorbell once to update PCC subspace */ 1094 /* Ring doorbell once to update PCC subspace */
@@ -1081,6 +1113,17 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1081 if (!high || !low || !nom || !min_nonlinear) 1113 if (!high || !low || !nom || !min_nonlinear)
1082 ret = -EFAULT; 1114 ret = -EFAULT;
1083 1115
1116 /* Read optional lowest and nominal frequencies if present */
1117 if (CPC_SUPPORTED(low_freq_reg))
1118 cpc_read(cpunum, low_freq_reg, &low_f);
1119
1120 if (CPC_SUPPORTED(nom_freq_reg))
1121 cpc_read(cpunum, nom_freq_reg, &nom_f);
1122
1123 perf_caps->lowest_freq = low_f;
1124 perf_caps->nominal_freq = nom_f;
1125
1126
1084out_err: 1127out_err:
1085 if (regs_in_pcc) 1128 if (regs_in_pcc)
1086 up_write(&pcc_ss_data->pcc_lock); 1129 up_write(&pcc_ss_data->pcc_lock);
@@ -1101,16 +1144,15 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1101 struct cpc_register_resource *delivered_reg, *reference_reg, 1144 struct cpc_register_resource *delivered_reg, *reference_reg,
1102 *ref_perf_reg, *ctr_wrap_reg; 1145 *ref_perf_reg, *ctr_wrap_reg;
1103 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); 1146 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1104 struct cppc_pcc_data *pcc_ss_data; 1147 struct cppc_pcc_data *pcc_ss_data = NULL;
1105 u64 delivered, reference, ref_perf, ctr_wrap_time; 1148 u64 delivered, reference, ref_perf, ctr_wrap_time;
1106 int ret = 0, regs_in_pcc = 0; 1149 int ret = 0, regs_in_pcc = 0;
1107 1150
1108 if (!cpc_desc || pcc_ss_id < 0) { 1151 if (!cpc_desc) {
1109 pr_debug("No CPC descriptor for CPU:%d\n", cpunum); 1152 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1110 return -ENODEV; 1153 return -ENODEV;
1111 } 1154 }
1112 1155
1113 pcc_ss_data = pcc_data[pcc_ss_id];
1114 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR]; 1156 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1115 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR]; 1157 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1116 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF]; 1158 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
@@ -1126,6 +1168,11 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1126 /* Are any of the regs PCC ?*/ 1168 /* Are any of the regs PCC ?*/
1127 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) || 1169 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1128 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) { 1170 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1171 if (pcc_ss_id < 0) {
1172 pr_debug("Invalid pcc_ss_id\n");
1173 return -ENODEV;
1174 }
1175 pcc_ss_data = pcc_data[pcc_ss_id];
1129 down_write(&pcc_ss_data->pcc_lock); 1176 down_write(&pcc_ss_data->pcc_lock);
1130 regs_in_pcc = 1; 1177 regs_in_pcc = 1;
1131 /* Ring doorbell once to update PCC subspace */ 1178 /* Ring doorbell once to update PCC subspace */
@@ -1176,15 +1223,14 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1176 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); 1223 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1177 struct cpc_register_resource *desired_reg; 1224 struct cpc_register_resource *desired_reg;
1178 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); 1225 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1179 struct cppc_pcc_data *pcc_ss_data; 1226 struct cppc_pcc_data *pcc_ss_data = NULL;
1180 int ret = 0; 1227 int ret = 0;
1181 1228
1182 if (!cpc_desc || pcc_ss_id < 0) { 1229 if (!cpc_desc) {
1183 pr_debug("No CPC descriptor for CPU:%d\n", cpu); 1230 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1184 return -ENODEV; 1231 return -ENODEV;
1185 } 1232 }
1186 1233
1187 pcc_ss_data = pcc_data[pcc_ss_id];
1188 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; 1234 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1189 1235
1190 /* 1236 /*
@@ -1195,6 +1241,11 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1195 * achieve that goal here 1241 * achieve that goal here
1196 */ 1242 */
1197 if (CPC_IN_PCC(desired_reg)) { 1243 if (CPC_IN_PCC(desired_reg)) {
1244 if (pcc_ss_id < 0) {
1245 pr_debug("Invalid pcc_ss_id\n");
1246 return -ENODEV;
1247 }
1248 pcc_ss_data = pcc_data[pcc_ss_id];
1198 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */ 1249 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1199 if (pcc_ss_data->platform_owns_pcc) { 1250 if (pcc_ss_data->platform_owns_pcc) {
1200 ret = check_pcc_chan(pcc_ss_id, false); 1251 ret = check_pcc_chan(pcc_ss_id, false);
diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c
index 71769fd687b2..6fa9c2a4cfe9 100644
--- a/drivers/acpi/reboot.c
+++ b/drivers/acpi/reboot.c
@@ -8,8 +8,8 @@ void acpi_reboot(void)
8{ 8{
9 struct acpi_generic_address *rr; 9 struct acpi_generic_address *rr;
10 struct pci_bus *bus0; 10 struct pci_bus *bus0;
11 u8 reset_value;
12 unsigned int devfn; 11 unsigned int devfn;
12 u8 reset_value;
13 13
14 if (acpi_disabled) 14 if (acpi_disabled)
15 return; 15 return;
@@ -40,7 +40,7 @@ void acpi_reboot(void)
40 /* Form PCI device/function pair. */ 40 /* Form PCI device/function pair. */
41 devfn = PCI_DEVFN((rr->address >> 32) & 0xffff, 41 devfn = PCI_DEVFN((rr->address >> 32) & 0xffff,
42 (rr->address >> 16) & 0xffff); 42 (rr->address >> 16) & 0xffff);
43 printk(KERN_DEBUG "Resetting with ACPI PCI RESET_REG."); 43 printk(KERN_DEBUG "Resetting with ACPI PCI RESET_REG.\n");
44 /* Write the value that resets us. */ 44 /* Write the value that resets us. */
45 pci_bus_write_config_byte(bus0, devfn, 45 pci_bus_write_config_byte(bus0, devfn,
46 (rr->address & 0xffff), reset_value); 46 (rr->address & 0xffff), reset_value);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index b15115a48775..3464580ac3ca 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -42,9 +42,6 @@
42 */ 42 */
43static struct cppc_cpudata **all_cpu_data; 43static struct cppc_cpudata **all_cpu_data;
44 44
45/* Capture the max KHz from DMI */
46static u64 cppc_dmi_max_khz;
47
48/* Callback function used to retrieve the max frequency from DMI */ 45/* Callback function used to retrieve the max frequency from DMI */
49static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) 46static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
50{ 47{
@@ -75,6 +72,64 @@ static u64 cppc_get_dmi_max_khz(void)
75 return (1000 * mhz); 72 return (1000 * mhz);
76} 73}
77 74
75/*
76 * If CPPC lowest_freq and nominal_freq registers are exposed then we can
77 * use them to convert perf to freq and vice versa
78 *
79 * If the perf/freq point lies between Nominal and Lowest, we can treat
80 * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line
81 * and extrapolate the rest
82 * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion
83 */
84static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
85 unsigned int perf)
86{
87 static u64 max_khz;
88 struct cppc_perf_caps *caps = &cpu->perf_caps;
89 u64 mul, div;
90
91 if (caps->lowest_freq && caps->nominal_freq) {
92 if (perf >= caps->nominal_perf) {
93 mul = caps->nominal_freq;
94 div = caps->nominal_perf;
95 } else {
96 mul = caps->nominal_freq - caps->lowest_freq;
97 div = caps->nominal_perf - caps->lowest_perf;
98 }
99 } else {
100 if (!max_khz)
101 max_khz = cppc_get_dmi_max_khz();
102 mul = max_khz;
103 div = cpu->perf_caps.highest_perf;
104 }
105 return (u64)perf * mul / div;
106}
107
108static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu,
109 unsigned int freq)
110{
111 static u64 max_khz;
112 struct cppc_perf_caps *caps = &cpu->perf_caps;
113 u64 mul, div;
114
115 if (caps->lowest_freq && caps->nominal_freq) {
116 if (freq >= caps->nominal_freq) {
117 mul = caps->nominal_perf;
118 div = caps->nominal_freq;
119 } else {
120 mul = caps->lowest_perf;
121 div = caps->lowest_freq;
122 }
123 } else {
124 if (!max_khz)
125 max_khz = cppc_get_dmi_max_khz();
126 mul = cpu->perf_caps.highest_perf;
127 div = max_khz;
128 }
129
130 return (u64)freq * mul / div;
131}
132
78static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, 133static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
79 unsigned int target_freq, 134 unsigned int target_freq,
80 unsigned int relation) 135 unsigned int relation)
@@ -86,7 +141,7 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
86 141
87 cpu = all_cpu_data[policy->cpu]; 142 cpu = all_cpu_data[policy->cpu];
88 143
89 desired_perf = (u64)target_freq * cpu->perf_caps.highest_perf / cppc_dmi_max_khz; 144 desired_perf = cppc_cpufreq_khz_to_perf(cpu, target_freq);
90 /* Return if it is exactly the same perf */ 145 /* Return if it is exactly the same perf */
91 if (desired_perf == cpu->perf_ctrls.desired_perf) 146 if (desired_perf == cpu->perf_ctrls.desired_perf)
92 return ret; 147 return ret;
@@ -186,24 +241,24 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
186 return ret; 241 return ret;
187 } 242 }
188 243
189 cppc_dmi_max_khz = cppc_get_dmi_max_khz(); 244 /* Convert the lowest and nominal freq from MHz to KHz */
245 cpu->perf_caps.lowest_freq *= 1000;
246 cpu->perf_caps.nominal_freq *= 1000;
190 247
191 /* 248 /*
192 * Set min to lowest nonlinear perf to avoid any efficiency penalty (see 249 * Set min to lowest nonlinear perf to avoid any efficiency penalty (see
193 * Section 8.4.7.1.1.5 of ACPI 6.1 spec) 250 * Section 8.4.7.1.1.5 of ACPI 6.1 spec)
194 */ 251 */
195 policy->min = cpu->perf_caps.lowest_nonlinear_perf * cppc_dmi_max_khz / 252 policy->min = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_nonlinear_perf);
196 cpu->perf_caps.highest_perf; 253 policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
197 policy->max = cppc_dmi_max_khz;
198 254
199 /* 255 /*
200 * Set cpuinfo.min_freq to Lowest to make the full range of performance 256 * Set cpuinfo.min_freq to Lowest to make the full range of performance
201 * available if userspace wants to use any perf between lowest & lowest 257 * available if userspace wants to use any perf between lowest & lowest
202 * nonlinear perf 258 * nonlinear perf
203 */ 259 */
204 policy->cpuinfo.min_freq = cpu->perf_caps.lowest_perf * cppc_dmi_max_khz / 260 policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_perf);
205 cpu->perf_caps.highest_perf; 261 policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
206 policy->cpuinfo.max_freq = cppc_dmi_max_khz;
207 262
208 policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num); 263 policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num);
209 policy->shared_type = cpu->shared_type; 264 policy->shared_type = cpu->shared_type;
@@ -229,7 +284,8 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
229 cpu->cur_policy = policy; 284 cpu->cur_policy = policy;
230 285
231 /* Set policy->cur to max now. The governors will adjust later. */ 286 /* Set policy->cur to max now. The governors will adjust later. */
232 policy->cur = cppc_dmi_max_khz; 287 policy->cur = cppc_cpufreq_perf_to_khz(cpu,
288 cpu->perf_caps.highest_perf);
233 cpu->perf_ctrls.desired_perf = cpu->perf_caps.highest_perf; 289 cpu->perf_ctrls.desired_perf = cpu->perf_caps.highest_perf;
234 290
235 ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls); 291 ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 3ef7f036ceea..fc3c237daef2 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -373,33 +373,24 @@ static const struct mbox_chan_ops pcc_chan_ops = {
373}; 373};
374 374
375/** 375/**
376 * parse_pcc_subspace - Parse the PCC table and verify PCC subspace 376 * parse_pcc_subspaces -- Count PCC subspaces defined
377 * entries. There should be one entry per PCC client.
378 * @header: Pointer to the ACPI subtable header under the PCCT. 377 * @header: Pointer to the ACPI subtable header under the PCCT.
379 * @end: End of subtable entry. 378 * @end: End of subtable entry.
380 * 379 *
381 * Return: 0 for Success, else errno. 380 * Return: If we find a PCC subspace entry of a valid type, return 0.
381 * Otherwise, return -EINVAL.
382 * 382 *
383 * This gets called for each entry in the PCC table. 383 * This gets called for each entry in the PCC table.
384 */ 384 */
385static int parse_pcc_subspace(struct acpi_subtable_header *header, 385static int parse_pcc_subspace(struct acpi_subtable_header *header,
386 const unsigned long end) 386 const unsigned long end)
387{ 387{
388 struct acpi_pcct_hw_reduced *pcct_ss; 388 struct acpi_pcct_subspace *ss = (struct acpi_pcct_subspace *) header;
389
390 if (pcc_mbox_ctrl.num_chans <= MAX_PCC_SUBSPACES) {
391 pcct_ss = (struct acpi_pcct_hw_reduced *) header;
392 389
393 if ((pcct_ss->header.type != 390 if (ss->header.type < ACPI_PCCT_TYPE_RESERVED)
394 ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE) 391 return 0;
395 && (pcct_ss->header.type !=
396 ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2)) {
397 pr_err("Incorrect PCC Subspace type detected\n");
398 return -EINVAL;
399 }
400 }
401 392
402 return 0; 393 return -EINVAL;
403} 394}
404 395
405/** 396/**
@@ -449,8 +440,8 @@ static int __init acpi_pcc_probe(void)
449 struct acpi_table_header *pcct_tbl; 440 struct acpi_table_header *pcct_tbl;
450 struct acpi_subtable_header *pcct_entry; 441 struct acpi_subtable_header *pcct_entry;
451 struct acpi_table_pcct *acpi_pcct_tbl; 442 struct acpi_table_pcct *acpi_pcct_tbl;
443 struct acpi_subtable_proc proc[ACPI_PCCT_TYPE_RESERVED];
452 int count, i, rc; 444 int count, i, rc;
453 int sum = 0;
454 acpi_status status = AE_OK; 445 acpi_status status = AE_OK;
455 446
456 /* Search for PCCT */ 447 /* Search for PCCT */
@@ -459,43 +450,41 @@ static int __init acpi_pcc_probe(void)
459 if (ACPI_FAILURE(status) || !pcct_tbl) 450 if (ACPI_FAILURE(status) || !pcct_tbl)
460 return -ENODEV; 451 return -ENODEV;
461 452
462 count = acpi_table_parse_entries(ACPI_SIG_PCCT, 453 /* Set up the subtable handlers */
463 sizeof(struct acpi_table_pcct), 454 for (i = ACPI_PCCT_TYPE_GENERIC_SUBSPACE;
464 ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE, 455 i < ACPI_PCCT_TYPE_RESERVED; i++) {
465 parse_pcc_subspace, MAX_PCC_SUBSPACES); 456 proc[i].id = i;
466 sum += (count > 0) ? count : 0; 457 proc[i].count = 0;
467 458 proc[i].handler = parse_pcc_subspace;
468 count = acpi_table_parse_entries(ACPI_SIG_PCCT, 459 }
469 sizeof(struct acpi_table_pcct),
470 ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2,
471 parse_pcc_subspace, MAX_PCC_SUBSPACES);
472 sum += (count > 0) ? count : 0;
473 460
474 if (sum == 0 || sum >= MAX_PCC_SUBSPACES) { 461 count = acpi_table_parse_entries_array(ACPI_SIG_PCCT,
475 pr_err("Error parsing PCC subspaces from PCCT\n"); 462 sizeof(struct acpi_table_pcct), proc,
463 ACPI_PCCT_TYPE_RESERVED, MAX_PCC_SUBSPACES);
464 if (count == 0 || count > MAX_PCC_SUBSPACES) {
465 pr_warn("Invalid PCCT: %d PCC subspaces\n", count);
476 return -EINVAL; 466 return -EINVAL;
477 } 467 }
478 468
479 pcc_mbox_channels = kzalloc(sizeof(struct mbox_chan) * 469 pcc_mbox_channels = kzalloc(sizeof(struct mbox_chan) * count, GFP_KERNEL);
480 sum, GFP_KERNEL);
481 if (!pcc_mbox_channels) { 470 if (!pcc_mbox_channels) {
482 pr_err("Could not allocate space for PCC mbox channels\n"); 471 pr_err("Could not allocate space for PCC mbox channels\n");
483 return -ENOMEM; 472 return -ENOMEM;
484 } 473 }
485 474
486 pcc_doorbell_vaddr = kcalloc(sum, sizeof(void *), GFP_KERNEL); 475 pcc_doorbell_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL);
487 if (!pcc_doorbell_vaddr) { 476 if (!pcc_doorbell_vaddr) {
488 rc = -ENOMEM; 477 rc = -ENOMEM;
489 goto err_free_mbox; 478 goto err_free_mbox;
490 } 479 }
491 480
492 pcc_doorbell_ack_vaddr = kcalloc(sum, sizeof(void *), GFP_KERNEL); 481 pcc_doorbell_ack_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL);
493 if (!pcc_doorbell_ack_vaddr) { 482 if (!pcc_doorbell_ack_vaddr) {
494 rc = -ENOMEM; 483 rc = -ENOMEM;
495 goto err_free_db_vaddr; 484 goto err_free_db_vaddr;
496 } 485 }
497 486
498 pcc_doorbell_irq = kcalloc(sum, sizeof(int), GFP_KERNEL); 487 pcc_doorbell_irq = kcalloc(count, sizeof(int), GFP_KERNEL);
499 if (!pcc_doorbell_irq) { 488 if (!pcc_doorbell_irq) {
500 rc = -ENOMEM; 489 rc = -ENOMEM;
501 goto err_free_db_ack_vaddr; 490 goto err_free_db_ack_vaddr;
@@ -509,18 +498,24 @@ static int __init acpi_pcc_probe(void)
509 if (acpi_pcct_tbl->flags & ACPI_PCCT_DOORBELL) 498 if (acpi_pcct_tbl->flags & ACPI_PCCT_DOORBELL)
510 pcc_mbox_ctrl.txdone_irq = true; 499 pcc_mbox_ctrl.txdone_irq = true;
511 500
512 for (i = 0; i < sum; i++) { 501 for (i = 0; i < count; i++) {
513 struct acpi_generic_address *db_reg; 502 struct acpi_generic_address *db_reg;
514 struct acpi_pcct_hw_reduced *pcct_ss; 503 struct acpi_pcct_subspace *pcct_ss;
515 pcc_mbox_channels[i].con_priv = pcct_entry; 504 pcc_mbox_channels[i].con_priv = pcct_entry;
516 505
517 pcct_ss = (struct acpi_pcct_hw_reduced *) pcct_entry; 506 if (pcct_entry->type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE ||
507 pcct_entry->type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
508 struct acpi_pcct_hw_reduced *pcct_hrss;
509
510 pcct_hrss = (struct acpi_pcct_hw_reduced *) pcct_entry;
518 511
519 if (pcc_mbox_ctrl.txdone_irq) { 512 if (pcc_mbox_ctrl.txdone_irq) {
520 rc = pcc_parse_subspace_irq(i, pcct_ss); 513 rc = pcc_parse_subspace_irq(i, pcct_hrss);
521 if (rc < 0) 514 if (rc < 0)
522 goto err; 515 goto err;
516 }
523 } 517 }
518 pcct_ss = (struct acpi_pcct_subspace *) pcct_entry;
524 519
525 /* If doorbell is in system memory cache the virt address */ 520 /* If doorbell is in system memory cache the virt address */
526 db_reg = &pcct_ss->doorbell_register; 521 db_reg = &pcct_ss->doorbell_register;
@@ -531,7 +526,7 @@ static int __init acpi_pcc_probe(void)
531 ((unsigned long) pcct_entry + pcct_entry->length); 526 ((unsigned long) pcct_entry + pcct_entry->length);
532 } 527 }
533 528
534 pcc_mbox_ctrl.num_chans = sum; 529 pcc_mbox_ctrl.num_chans = count;
535 530
536 pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl.num_chans); 531 pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl.num_chans);
537 532
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 2010c0516f27..8e0b8250a139 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -20,14 +20,16 @@
20#include <acpi/pcc.h> 20#include <acpi/pcc.h>
21#include <acpi/processor.h> 21#include <acpi/processor.h>
22 22
23/* Only support CPPCv2 for now. */ 23/* Support CPPCv2 and CPPCv3 */
24#define CPPC_NUM_ENT 21 24#define CPPC_V2_REV 2
25#define CPPC_REV 2 25#define CPPC_V3_REV 3
26#define CPPC_V2_NUM_ENT 21
27#define CPPC_V3_NUM_ENT 23
26 28
27#define PCC_CMD_COMPLETE_MASK (1 << 0) 29#define PCC_CMD_COMPLETE_MASK (1 << 0)
28#define PCC_ERROR_MASK (1 << 2) 30#define PCC_ERROR_MASK (1 << 2)
29 31
30#define MAX_CPC_REG_ENT 19 32#define MAX_CPC_REG_ENT 21
31 33
32/* CPPC specific PCC commands. */ 34/* CPPC specific PCC commands. */
33#define CMD_READ 0 35#define CMD_READ 0
@@ -91,6 +93,8 @@ enum cppc_regs {
91 AUTO_ACT_WINDOW, 93 AUTO_ACT_WINDOW,
92 ENERGY_PERF, 94 ENERGY_PERF,
93 REFERENCE_PERF, 95 REFERENCE_PERF,
96 LOWEST_FREQ,
97 NOMINAL_FREQ,
94}; 98};
95 99
96/* 100/*
@@ -104,6 +108,8 @@ struct cppc_perf_caps {
104 u32 nominal_perf; 108 u32 nominal_perf;
105 u32 lowest_perf; 109 u32 lowest_perf;
106 u32 lowest_nonlinear_perf; 110 u32 lowest_nonlinear_perf;
111 u32 lowest_freq;
112 u32 nominal_freq;
107}; 113};
108 114
109struct cppc_perf_ctrls { 115struct cppc_perf_ctrls {
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 15bfb15c2fa5..cb4d7b6b085c 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -578,6 +578,7 @@ int acpi_match_platform_list(const struct acpi_platform_list *plat);
578 578
579extern void acpi_early_init(void); 579extern void acpi_early_init(void);
580extern void acpi_subsystem_init(void); 580extern void acpi_subsystem_init(void);
581extern void arch_post_acpi_subsys_init(void);
581 582
582extern int acpi_nvs_register(__u64 start, __u64 size); 583extern int acpi_nvs_register(__u64 start, __u64 size);
583 584