aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/firmware/psci/psci.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-06 22:40:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-06 22:40:31 -0400
commit8f5e823f9131a430b12f73e9436d7486e20c16f5 (patch)
treeec3f03c236182d113dd71b1b7b59be73e54ebab1 /drivers/firmware/psci/psci.c
parent59df1c2bdecb0d1aaadfb8533df4bea528ee4952 (diff)
parente07095c9bbcd296401bec8b6852d258d7c926969 (diff)
Merge tag 'pm-5.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael Wysocki: "These fix the (Intel-specific) Performance and Energy Bias Hint (EPB) handling and expose it to user space via sysfs, fix and clean up several cpufreq drivers, add support for two new chips to the qoriq cpufreq driver, fix, simplify and clean up the cpufreq core and the schedutil governor, add support for "CPU" domains to the generic power domains (genpd) framework and provide low-level PSCI firmware support for that feature, fix the exynos cpuidle driver and fix a couple of issues in the devfreq subsystem and clean it up. Specifics: - Fix the handling of Performance and Energy Bias Hint (EPB) on Intel processors and expose it to user space via sysfs to avoid having to access it through the generic MSR I/F (Rafael Wysocki). - Improve the handling of global turbo changes made by the platform firmware in the intel_pstate driver (Rafael Wysocki). - Convert some slow-path static_cpu_has() callers to boot_cpu_has() in cpufreq (Borislav Petkov). - Fix the frequency calculation loop in the armada-37xx cpufreq driver (Gregory CLEMENT). - Fix possible object reference leaks in multuple cpufreq drivers (Wen Yang). - Fix kerneldoc comment in the centrino cpufreq driver (dongjian). - Clean up the ACPI and maple cpufreq drivers (Viresh Kumar, Mohan Kumar). - Add support for lx2160a and ls1028a to the qoriq cpufreq driver (Vabhav Sharma, Yuantian Tang). - Fix kobject memory leak in the cpufreq core (Viresh Kumar). - Simplify the IOwait boosting in the schedutil cpufreq governor and rework the TSC cpufreq notifier on x86 (Rafael Wysocki). - Clean up the cpufreq core and statistics code (Yue Hu, Kyle Lin). - Improve the cpufreq documentation, add SPDX license tags to some PM documentation files and unify copyright notices in them (Rafael Wysocki). - Add support for "CPU" domains to the generic power domains (genpd) framework and provide low-level PSCI firmware support for that feature (Ulf Hansson). - Rearrange the PSCI firmware support code and add support for SYSTEM_RESET2 to it (Ulf Hansson, Sudeep Holla). - Improve genpd support for devices in multiple power domains (Ulf Hansson). - Unify target residency for the AFTR and coupled AFTR states in the exynos cpuidle driver (Marek Szyprowski). - Introduce new helper routine in the operating performance points (OPP) framework (Andrew-sh.Cheng). - Add support for passing on-die termination (ODT) and auto power down parameters from the kernel to Trusted Firmware-A (TF-A) to the rk3399_dmc devfreq driver (Enric Balletbo i Serra). - Add tracing to devfreq (Lukasz Luba). - Make the exynos-bus devfreq driver suspend all devices on system shutdown (Marek Szyprowski). - Fix a few minor issues in the devfreq subsystem and clean it up somewhat (Enric Balletbo i Serra, MyungJoo Ham, Rob Herring, Saravana Kannan, Yangtao Li). - Improve system wakeup diagnostics (Stephen Boyd). - Rework filesystem sync messages emitted during system suspend and hibernation (Harry Pan)" * tag 'pm-5.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (72 commits) cpufreq: Fix kobject memleak cpufreq: armada-37xx: fix frequency calculation for opp cpufreq: centrino: Fix centrino_setpolicy() kerneldoc comment cpufreq: qoriq: add support for lx2160a x86: tsc: Rework time_cpufreq_notifier() PM / Domains: Allow to attach a CPU via genpd_dev_pm_attach_by_id|name() PM / Domains: Search for the CPU device outside the genpd lock PM / Domains: Drop unused in-parameter to some genpd functions PM / Domains: Use the base device for driver_deferred_probe_check_state() cpufreq: qoriq: Add ls1028a chip support PM / Domains: Enable genpd_dev_pm_attach_by_id|name() for single PM domain PM / Domains: Allow OF lookup for multi PM domain case from ->attach_dev() PM / Domains: Don't kfree() the virtual device in the error path cpufreq: Move ->get callback check outside of __cpufreq_get() PM / Domains: remove unnecessary unlikely() cpufreq: Remove needless bios_limit check in show_bios_limit() drivers/cpufreq/acpi-cpufreq.c: This fixes the following checkpatch warning firmware/psci: add support for SYSTEM_RESET2 PM / devfreq: add tracing for scheduling work trace: events: add devfreq trace event file ...
Diffstat (limited to 'drivers/firmware/psci/psci.c')
-rw-r--r--drivers/firmware/psci/psci.c750
1 files changed, 750 insertions, 0 deletions
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
new file mode 100644
index 000000000000..fe090ef43d28
--- /dev/null
+++ b/drivers/firmware/psci/psci.c
@@ -0,0 +1,750 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2015 ARM Limited
12 */
13
14#define pr_fmt(fmt) "psci: " fmt
15
16#include <linux/acpi.h>
17#include <linux/arm-smccc.h>
18#include <linux/cpuidle.h>
19#include <linux/errno.h>
20#include <linux/linkage.h>
21#include <linux/of.h>
22#include <linux/pm.h>
23#include <linux/printk.h>
24#include <linux/psci.h>
25#include <linux/reboot.h>
26#include <linux/slab.h>
27#include <linux/suspend.h>
28
29#include <uapi/linux/psci.h>
30
31#include <asm/cpuidle.h>
32#include <asm/cputype.h>
33#include <asm/system_misc.h>
34#include <asm/smp_plat.h>
35#include <asm/suspend.h>
36
37/*
38 * While a 64-bit OS can make calls with SMC32 calling conventions, for some
39 * calls it is necessary to use SMC64 to pass or return 64-bit values.
40 * For such calls PSCI_FN_NATIVE(version, name) will choose the appropriate
41 * (native-width) function ID.
42 */
43#ifdef CONFIG_64BIT
44#define PSCI_FN_NATIVE(version, name) PSCI_##version##_FN64_##name
45#else
46#define PSCI_FN_NATIVE(version, name) PSCI_##version##_FN_##name
47#endif
48
49/*
50 * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
51 * calls to its resident CPU, so we must avoid issuing those. We never migrate
52 * a Trusted OS even if it claims to be capable of migration -- doing so will
53 * require cooperation with a Trusted OS driver.
54 */
55static int resident_cpu = -1;
56
57bool psci_tos_resident_on(int cpu)
58{
59 return cpu == resident_cpu;
60}
61
62struct psci_operations psci_ops = {
63 .conduit = PSCI_CONDUIT_NONE,
64 .smccc_version = SMCCC_VERSION_1_0,
65};
66
67typedef unsigned long (psci_fn)(unsigned long, unsigned long,
68 unsigned long, unsigned long);
69static psci_fn *invoke_psci_fn;
70
71enum psci_function {
72 PSCI_FN_CPU_SUSPEND,
73 PSCI_FN_CPU_ON,
74 PSCI_FN_CPU_OFF,
75 PSCI_FN_MIGRATE,
76 PSCI_FN_MAX,
77};
78
79static u32 psci_function_id[PSCI_FN_MAX];
80
81#define PSCI_0_2_POWER_STATE_MASK \
82 (PSCI_0_2_POWER_STATE_ID_MASK | \
83 PSCI_0_2_POWER_STATE_TYPE_MASK | \
84 PSCI_0_2_POWER_STATE_AFFL_MASK)
85
86#define PSCI_1_0_EXT_POWER_STATE_MASK \
87 (PSCI_1_0_EXT_POWER_STATE_ID_MASK | \
88 PSCI_1_0_EXT_POWER_STATE_TYPE_MASK)
89
90static u32 psci_cpu_suspend_feature;
91static bool psci_system_reset2_supported;
92
93static inline bool psci_has_ext_power_state(void)
94{
95 return psci_cpu_suspend_feature &
96 PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK;
97}
98
99static inline bool psci_has_osi_support(void)
100{
101 return psci_cpu_suspend_feature & PSCI_1_0_OS_INITIATED;
102}
103
104static inline bool psci_power_state_loses_context(u32 state)
105{
106 const u32 mask = psci_has_ext_power_state() ?
107 PSCI_1_0_EXT_POWER_STATE_TYPE_MASK :
108 PSCI_0_2_POWER_STATE_TYPE_MASK;
109
110 return state & mask;
111}
112
113static inline bool psci_power_state_is_valid(u32 state)
114{
115 const u32 valid_mask = psci_has_ext_power_state() ?
116 PSCI_1_0_EXT_POWER_STATE_MASK :
117 PSCI_0_2_POWER_STATE_MASK;
118
119 return !(state & ~valid_mask);
120}
121
122static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
123 unsigned long arg0, unsigned long arg1,
124 unsigned long arg2)
125{
126 struct arm_smccc_res res;
127
128 arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
129 return res.a0;
130}
131
132static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
133 unsigned long arg0, unsigned long arg1,
134 unsigned long arg2)
135{
136 struct arm_smccc_res res;
137
138 arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
139 return res.a0;
140}
141
142static int psci_to_linux_errno(int errno)
143{
144 switch (errno) {
145 case PSCI_RET_SUCCESS:
146 return 0;
147 case PSCI_RET_NOT_SUPPORTED:
148 return -EOPNOTSUPP;
149 case PSCI_RET_INVALID_PARAMS:
150 case PSCI_RET_INVALID_ADDRESS:
151 return -EINVAL;
152 case PSCI_RET_DENIED:
153 return -EPERM;
154 };
155
156 return -EINVAL;
157}
158
159static u32 psci_get_version(void)
160{
161 return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
162}
163
164static int psci_cpu_suspend(u32 state, unsigned long entry_point)
165{
166 int err;
167 u32 fn;
168
169 fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
170 err = invoke_psci_fn(fn, state, entry_point, 0);
171 return psci_to_linux_errno(err);
172}
173
174static int psci_cpu_off(u32 state)
175{
176 int err;
177 u32 fn;
178
179 fn = psci_function_id[PSCI_FN_CPU_OFF];
180 err = invoke_psci_fn(fn, state, 0, 0);
181 return psci_to_linux_errno(err);
182}
183
184static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
185{
186 int err;
187 u32 fn;
188
189 fn = psci_function_id[PSCI_FN_CPU_ON];
190 err = invoke_psci_fn(fn, cpuid, entry_point, 0);
191 return psci_to_linux_errno(err);
192}
193
194static int psci_migrate(unsigned long cpuid)
195{
196 int err;
197 u32 fn;
198
199 fn = psci_function_id[PSCI_FN_MIGRATE];
200 err = invoke_psci_fn(fn, cpuid, 0, 0);
201 return psci_to_linux_errno(err);
202}
203
204static int psci_affinity_info(unsigned long target_affinity,
205 unsigned long lowest_affinity_level)
206{
207 return invoke_psci_fn(PSCI_FN_NATIVE(0_2, AFFINITY_INFO),
208 target_affinity, lowest_affinity_level, 0);
209}
210
211static int psci_migrate_info_type(void)
212{
213 return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
214}
215
216static unsigned long psci_migrate_info_up_cpu(void)
217{
218 return invoke_psci_fn(PSCI_FN_NATIVE(0_2, MIGRATE_INFO_UP_CPU),
219 0, 0, 0);
220}
221
222static void set_conduit(enum psci_conduit conduit)
223{
224 switch (conduit) {
225 case PSCI_CONDUIT_HVC:
226 invoke_psci_fn = __invoke_psci_fn_hvc;
227 break;
228 case PSCI_CONDUIT_SMC:
229 invoke_psci_fn = __invoke_psci_fn_smc;
230 break;
231 default:
232 WARN(1, "Unexpected PSCI conduit %d\n", conduit);
233 }
234
235 psci_ops.conduit = conduit;
236}
237
238static int get_set_conduit_method(struct device_node *np)
239{
240 const char *method;
241
242 pr_info("probing for conduit method from DT.\n");
243
244 if (of_property_read_string(np, "method", &method)) {
245 pr_warn("missing \"method\" property\n");
246 return -ENXIO;
247 }
248
249 if (!strcmp("hvc", method)) {
250 set_conduit(PSCI_CONDUIT_HVC);
251 } else if (!strcmp("smc", method)) {
252 set_conduit(PSCI_CONDUIT_SMC);
253 } else {
254 pr_warn("invalid \"method\" property: %s\n", method);
255 return -EINVAL;
256 }
257 return 0;
258}
259
260static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
261{
262 if ((reboot_mode == REBOOT_WARM || reboot_mode == REBOOT_SOFT) &&
263 psci_system_reset2_supported) {
264 /*
265 * reset_type[31] = 0 (architectural)
266 * reset_type[30:0] = 0 (SYSTEM_WARM_RESET)
267 * cookie = 0 (ignored by the implementation)
268 */
269 invoke_psci_fn(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2), 0, 0, 0);
270 } else {
271 invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
272 }
273}
274
275static void psci_sys_poweroff(void)
276{
277 invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
278}
279
280static int __init psci_features(u32 psci_func_id)
281{
282 return invoke_psci_fn(PSCI_1_0_FN_PSCI_FEATURES,
283 psci_func_id, 0, 0);
284}
285
286#ifdef CONFIG_CPU_IDLE
287static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
288
289static int psci_dt_parse_state_node(struct device_node *np, u32 *state)
290{
291 int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
292
293 if (err) {
294 pr_warn("%pOF missing arm,psci-suspend-param property\n", np);
295 return err;
296 }
297
298 if (!psci_power_state_is_valid(*state)) {
299 pr_warn("Invalid PSCI power state %#x\n", *state);
300 return -EINVAL;
301 }
302
303 return 0;
304}
305
306static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
307{
308 int i, ret = 0, count = 0;
309 u32 *psci_states;
310 struct device_node *state_node;
311
312 /* Count idle states */
313 while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
314 count))) {
315 count++;
316 of_node_put(state_node);
317 }
318
319 if (!count)
320 return -ENODEV;
321
322 psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
323 if (!psci_states)
324 return -ENOMEM;
325
326 for (i = 0; i < count; i++) {
327 state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
328 ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
329 of_node_put(state_node);
330
331 if (ret)
332 goto free_mem;
333
334 pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
335 }
336
337 /* Idle states parsed correctly, initialize per-cpu pointer */
338 per_cpu(psci_power_state, cpu) = psci_states;
339 return 0;
340
341free_mem:
342 kfree(psci_states);
343 return ret;
344}
345
346#ifdef CONFIG_ACPI
347#include <acpi/processor.h>
348
349static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu)
350{
351 int i, count;
352 u32 *psci_states;
353 struct acpi_lpi_state *lpi;
354 struct acpi_processor *pr = per_cpu(processors, cpu);
355
356 if (unlikely(!pr || !pr->flags.has_lpi))
357 return -EINVAL;
358
359 count = pr->power.count - 1;
360 if (count <= 0)
361 return -ENODEV;
362
363 psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
364 if (!psci_states)
365 return -ENOMEM;
366
367 for (i = 0; i < count; i++) {
368 u32 state;
369
370 lpi = &pr->power.lpi_states[i + 1];
371 /*
372 * Only bits[31:0] represent a PSCI power_state while
373 * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification
374 */
375 state = lpi->address;
376 if (!psci_power_state_is_valid(state)) {
377 pr_warn("Invalid PSCI power state %#x\n", state);
378 kfree(psci_states);
379 return -EINVAL;
380 }
381 psci_states[i] = state;
382 }
383 /* Idle states parsed correctly, initialize per-cpu pointer */
384 per_cpu(psci_power_state, cpu) = psci_states;
385 return 0;
386}
387#else
388static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu)
389{
390 return -EINVAL;
391}
392#endif
393
394int psci_cpu_init_idle(unsigned int cpu)
395{
396 struct device_node *cpu_node;
397 int ret;
398
399 /*
400 * If the PSCI cpu_suspend function hook has not been initialized
401 * idle states must not be enabled, so bail out
402 */
403 if (!psci_ops.cpu_suspend)
404 return -EOPNOTSUPP;
405
406 if (!acpi_disabled)
407 return psci_acpi_cpu_init_idle(cpu);
408
409 cpu_node = of_get_cpu_node(cpu, NULL);
410 if (!cpu_node)
411 return -ENODEV;
412
413 ret = psci_dt_cpu_init_idle(cpu_node, cpu);
414
415 of_node_put(cpu_node);
416
417 return ret;
418}
419
420static int psci_suspend_finisher(unsigned long index)
421{
422 u32 *state = __this_cpu_read(psci_power_state);
423
424 return psci_ops.cpu_suspend(state[index - 1],
425 __pa_symbol(cpu_resume));
426}
427
428int psci_cpu_suspend_enter(unsigned long index)
429{
430 int ret;
431 u32 *state = __this_cpu_read(psci_power_state);
432 /*
433 * idle state index 0 corresponds to wfi, should never be called
434 * from the cpu_suspend operations
435 */
436 if (WARN_ON_ONCE(!index))
437 return -EINVAL;
438
439 if (!psci_power_state_loses_context(state[index - 1]))
440 ret = psci_ops.cpu_suspend(state[index - 1], 0);
441 else
442 ret = cpu_suspend(index, psci_suspend_finisher);
443
444 return ret;
445}
446
447/* ARM specific CPU idle operations */
448#ifdef CONFIG_ARM
449static const struct cpuidle_ops psci_cpuidle_ops __initconst = {
450 .suspend = psci_cpu_suspend_enter,
451 .init = psci_dt_cpu_init_idle,
452};
453
454CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops);
455#endif
456#endif
457
458static int psci_system_suspend(unsigned long unused)
459{
460 return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
461 __pa_symbol(cpu_resume), 0, 0);
462}
463
464static int psci_system_suspend_enter(suspend_state_t state)
465{
466 return cpu_suspend(0, psci_system_suspend);
467}
468
469static const struct platform_suspend_ops psci_suspend_ops = {
470 .valid = suspend_valid_only_mem,
471 .enter = psci_system_suspend_enter,
472};
473
474static void __init psci_init_system_reset2(void)
475{
476 int ret;
477
478 ret = psci_features(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2));
479
480 if (ret != PSCI_RET_NOT_SUPPORTED)
481 psci_system_reset2_supported = true;
482}
483
484static void __init psci_init_system_suspend(void)
485{
486 int ret;
487
488 if (!IS_ENABLED(CONFIG_SUSPEND))
489 return;
490
491 ret = psci_features(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND));
492
493 if (ret != PSCI_RET_NOT_SUPPORTED)
494 suspend_set_ops(&psci_suspend_ops);
495}
496
497static void __init psci_init_cpu_suspend(void)
498{
499 int feature = psci_features(psci_function_id[PSCI_FN_CPU_SUSPEND]);
500
501 if (feature != PSCI_RET_NOT_SUPPORTED)
502 psci_cpu_suspend_feature = feature;
503}
504
505/*
506 * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
507 * return DENIED (which would be fatal).
508 */
509static void __init psci_init_migrate(void)
510{
511 unsigned long cpuid;
512 int type, cpu = -1;
513
514 type = psci_ops.migrate_info_type();
515
516 if (type == PSCI_0_2_TOS_MP) {
517 pr_info("Trusted OS migration not required\n");
518 return;
519 }
520
521 if (type == PSCI_RET_NOT_SUPPORTED) {
522 pr_info("MIGRATE_INFO_TYPE not supported.\n");
523 return;
524 }
525
526 if (type != PSCI_0_2_TOS_UP_MIGRATE &&
527 type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
528 pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
529 return;
530 }
531
532 cpuid = psci_migrate_info_up_cpu();
533 if (cpuid & ~MPIDR_HWID_BITMASK) {
534 pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
535 cpuid);
536 return;
537 }
538
539 cpu = get_logical_index(cpuid);
540 resident_cpu = cpu >= 0 ? cpu : -1;
541
542 pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
543}
544
545static void __init psci_init_smccc(void)
546{
547 u32 ver = ARM_SMCCC_VERSION_1_0;
548 int feature;
549
550 feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID);
551
552 if (feature != PSCI_RET_NOT_SUPPORTED) {
553 u32 ret;
554 ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
555 if (ret == ARM_SMCCC_VERSION_1_1) {
556 psci_ops.smccc_version = SMCCC_VERSION_1_1;
557 ver = ret;
558 }
559 }
560
561 /*
562 * Conveniently, the SMCCC and PSCI versions are encoded the
563 * same way. No, this isn't accidental.
564 */
565 pr_info("SMC Calling Convention v%d.%d\n",
566 PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
567
568}
569
570static void __init psci_0_2_set_functions(void)
571{
572 pr_info("Using standard PSCI v0.2 function IDs\n");
573 psci_ops.get_version = psci_get_version;
574
575 psci_function_id[PSCI_FN_CPU_SUSPEND] =
576 PSCI_FN_NATIVE(0_2, CPU_SUSPEND);
577 psci_ops.cpu_suspend = psci_cpu_suspend;
578
579 psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
580 psci_ops.cpu_off = psci_cpu_off;
581
582 psci_function_id[PSCI_FN_CPU_ON] = PSCI_FN_NATIVE(0_2, CPU_ON);
583 psci_ops.cpu_on = psci_cpu_on;
584
585 psci_function_id[PSCI_FN_MIGRATE] = PSCI_FN_NATIVE(0_2, MIGRATE);
586 psci_ops.migrate = psci_migrate;
587
588 psci_ops.affinity_info = psci_affinity_info;
589
590 psci_ops.migrate_info_type = psci_migrate_info_type;
591
592 arm_pm_restart = psci_sys_reset;
593
594 pm_power_off = psci_sys_poweroff;
595}
596
597/*
598 * Probe function for PSCI firmware versions >= 0.2
599 */
600static int __init psci_probe(void)
601{
602 u32 ver = psci_get_version();
603
604 pr_info("PSCIv%d.%d detected in firmware.\n",
605 PSCI_VERSION_MAJOR(ver),
606 PSCI_VERSION_MINOR(ver));
607
608 if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
609 pr_err("Conflicting PSCI version detected.\n");
610 return -EINVAL;
611 }
612
613 psci_0_2_set_functions();
614
615 psci_init_migrate();
616
617 if (PSCI_VERSION_MAJOR(ver) >= 1) {
618 psci_init_smccc();
619 psci_init_cpu_suspend();
620 psci_init_system_suspend();
621 psci_init_system_reset2();
622 }
623
624 return 0;
625}
626
627typedef int (*psci_initcall_t)(const struct device_node *);
628
629/*
630 * PSCI init function for PSCI versions >=0.2
631 *
632 * Probe based on PSCI PSCI_VERSION function
633 */
634static int __init psci_0_2_init(struct device_node *np)
635{
636 int err;
637
638 err = get_set_conduit_method(np);
639 if (err)
640 return err;
641
642 /*
643 * Starting with v0.2, the PSCI specification introduced a call
644 * (PSCI_VERSION) that allows probing the firmware version, so
645 * that PSCI function IDs and version specific initialization
646 * can be carried out according to the specific version reported
647 * by firmware
648 */
649 return psci_probe();
650}
651
652/*
653 * PSCI < v0.2 get PSCI Function IDs via DT.
654 */
655static int __init psci_0_1_init(struct device_node *np)
656{
657 u32 id;
658 int err;
659
660 err = get_set_conduit_method(np);
661 if (err)
662 return err;
663
664 pr_info("Using PSCI v0.1 Function IDs from DT\n");
665
666 if (!of_property_read_u32(np, "cpu_suspend", &id)) {
667 psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
668 psci_ops.cpu_suspend = psci_cpu_suspend;
669 }
670
671 if (!of_property_read_u32(np, "cpu_off", &id)) {
672 psci_function_id[PSCI_FN_CPU_OFF] = id;
673 psci_ops.cpu_off = psci_cpu_off;
674 }
675
676 if (!of_property_read_u32(np, "cpu_on", &id)) {
677 psci_function_id[PSCI_FN_CPU_ON] = id;
678 psci_ops.cpu_on = psci_cpu_on;
679 }
680
681 if (!of_property_read_u32(np, "migrate", &id)) {
682 psci_function_id[PSCI_FN_MIGRATE] = id;
683 psci_ops.migrate = psci_migrate;
684 }
685
686 return 0;
687}
688
689static int __init psci_1_0_init(struct device_node *np)
690{
691 int err;
692
693 err = psci_0_2_init(np);
694 if (err)
695 return err;
696
697 if (psci_has_osi_support())
698 pr_info("OSI mode supported.\n");
699
700 return 0;
701}
702
703static const struct of_device_id psci_of_match[] __initconst = {
704 { .compatible = "arm,psci", .data = psci_0_1_init},
705 { .compatible = "arm,psci-0.2", .data = psci_0_2_init},
706 { .compatible = "arm,psci-1.0", .data = psci_1_0_init},
707 {},
708};
709
710int __init psci_dt_init(void)
711{
712 struct device_node *np;
713 const struct of_device_id *matched_np;
714 psci_initcall_t init_fn;
715 int ret;
716
717 np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
718
719 if (!np || !of_device_is_available(np))
720 return -ENODEV;
721
722 init_fn = (psci_initcall_t)matched_np->data;
723 ret = init_fn(np);
724
725 of_node_put(np);
726 return ret;
727}
728
729#ifdef CONFIG_ACPI
730/*
731 * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
732 * explicitly clarified in SBBR
733 */
734int __init psci_acpi_init(void)
735{
736 if (!acpi_psci_present()) {
737 pr_info("is not implemented in ACPI.\n");
738 return -EOPNOTSUPP;
739 }
740
741 pr_info("probing for conduit method from ACPI.\n");
742
743 if (acpi_psci_use_hvc())
744 set_conduit(PSCI_CONDUIT_HVC);
745 else
746 set_conduit(PSCI_CONDUIT_SMC);
747
748 return psci_probe();
749}
750#endif