aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 18:21:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 18:21:02 -0400
commit3ed1c478eff8db80e234d5446cb378b503135888 (patch)
treee1c8e0f488ca49c49b5a31fe59add4254381dd4b /drivers
parent151173e8ce9b95bbbbd7eedb9035cfaffbdb7cb2 (diff)
parent371deb9500831ad1afbf9ea00e373f650deaed2f (diff)
Merge tag 'pm+acpi-3.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management and ACPI updates from Rafael J Wysocki: - ARM big.LITTLE cpufreq driver from Viresh Kumar. - exynos5440 cpufreq driver from Amit Daniel Kachhap. - cpufreq core cleanup and code consolidation from Viresh Kumar and Stratos Karafotis. - cpufreq scalability improvement from Nathan Zimmer. - AMD "frequency sensitivity feedback" powersave bias for the ondemand cpufreq governor from Jacob Shin. - cpuidle code consolidation and cleanups from Daniel Lezcano. - ARM OMAP cpuidle fixes from Santosh Shilimkar and Daniel Lezcano. - ACPICA fixes and other improvements from Bob Moore, Jung-uk Kim, Lv Zheng, Yinghai Lu, Tang Chen, Colin Ian King, and Linn Crosetto. - ACPI core updates related to hotplug from Toshi Kani, Paul Bolle, Yasuaki Ishimatsu, and Rafael J Wysocki. - Intel Lynxpoint LPSS (Low-Power Subsystem) support improvements from Rafael J Wysocki and Andy Shevchenko. * tag 'pm+acpi-3.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (192 commits) cpufreq: Revert incorrect commit 5800043 cpufreq: MAINTAINERS: Add co-maintainer cpuidle: add maintainer entry ACPI / thermal: do not always return THERMAL_TREND_RAISING for active trip points ARM: s3c64xx: cpuidle: use init/exit common routine cpufreq: pxa2xx: initialize variables ACPI: video: correct acpi_video_bus_add error processing SH: cpuidle: use init/exit common routine ARM: S5pv210: compiling issue, ARM_S5PV210_CPUFREQ needs CONFIG_CPU_FREQ_TABLE=y ACPI: Fix wrong parameter passed to memblock_reserve cpuidle: fix comment format pnp: use %*phC to dump small buffers isapnp: remove debug leftovers ARM: imx: cpuidle: use init/exit common routine ARM: davinci: cpuidle: use init/exit common routine ARM: kirkwood: cpuidle: use init/exit common routine ARM: calxeda: cpuidle: use init/exit common routine ARM: tegra: cpuidle: use init/exit common routine for tegra3 ARM: tegra: cpuidle: use init/exit common routine for tegra2 ARM: OMAP4: cpuidle: use init/exit common routine ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig13
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_lpss.c292
-rw-r--r--drivers/acpi/acpi_memhotplug.c328
-rw-r--r--drivers/acpi/acpi_pad.c2
-rw-r--r--drivers/acpi/acpi_platform.c40
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/acglobal.h5
-rw-r--r--drivers/acpi/acpica/aclocal.h29
-rw-r--r--drivers/acpi/acpica/acmacros.h6
-rw-r--r--drivers/acpi/acpica/acnamesp.h29
-rw-r--r--drivers/acpi/acpica/acpredef.h1305
-rw-r--r--drivers/acpi/acpica/acutils.h28
-rw-r--r--drivers/acpi/acpica/dsutils.c10
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/evevent.c12
-rw-r--r--drivers/acpi/acpica/evgpe.c6
-rw-r--r--drivers/acpi/acpica/evsci.c4
-rw-r--r--drivers/acpi/acpica/evxface.c21
-rw-r--r--drivers/acpi/acpica/evxfevnt.c12
-rw-r--r--drivers/acpi/acpica/exoparg2.c11
-rw-r--r--drivers/acpi/acpica/exprep.c4
-rw-r--r--drivers/acpi/acpica/exutils.c4
-rw-r--r--drivers/acpi/acpica/hwacpi.c20
-rw-r--r--drivers/acpi/acpica/nsconvert.c443
-rw-r--r--drivers/acpi/acpica/nseval.c26
-rw-r--r--drivers/acpi/acpica/nspredef.c213
-rw-r--r--drivers/acpi/acpica/nsprepkg.c10
-rw-r--r--drivers/acpi/acpica/nsrepair.c381
-rw-r--r--drivers/acpi/acpica/nsrepair2.c16
-rw-r--r--drivers/acpi/acpica/nsutils.c8
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/rscalc.c6
-rw-r--r--drivers/acpi/acpica/rsdump.c8
-rw-r--r--drivers/acpi/acpica/rslist.c8
-rw-r--r--drivers/acpi/acpica/rsxface.c8
-rw-r--r--drivers/acpi/acpica/tbfadt.c4
-rw-r--r--drivers/acpi/acpica/tbxface.c22
-rw-r--r--drivers/acpi/acpica/utaddress.c4
-rw-r--r--drivers/acpi/acpica/utcache.c18
-rw-r--r--drivers/acpi/acpica/utdelete.c96
-rw-r--r--drivers/acpi/acpica/utexcep.c26
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/utmutex.c9
-rw-r--r--drivers/acpi/acpica/utosi.c26
-rw-r--r--drivers/acpi/acpica/utpredef.c399
-rw-r--r--drivers/acpi/acpica/utxface.c17
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/bus.c5
-rw-r--r--drivers/acpi/button.c1
-rw-r--r--drivers/acpi/container.c152
-rw-r--r--drivers/acpi/device_pm.c39
-rw-r--r--drivers/acpi/fan.c8
-rw-r--r--drivers/acpi/internal.h21
-rw-r--r--drivers/acpi/osl.c4
-rw-r--r--drivers/acpi/pci_link.c1
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/power.c60
-rw-r--r--drivers/acpi/processor_idle.c1
-rw-r--r--drivers/acpi/processor_thermal.c24
-rw-r--r--drivers/acpi/processor_throttling.c3
-rw-r--r--drivers/acpi/scan.c531
-rw-r--r--drivers/acpi/sysfs.c66
-rw-r--r--drivers/acpi/thermal.c16
-rw-r--r--drivers/acpi/video.c318
-rw-r--r--drivers/acpi/video_detect.c25
-rw-r--r--drivers/base/power/domain.c6
-rw-r--r--drivers/base/power/generic_ops.c2
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/opp.c1
-rw-r--r--drivers/base/power/runtime.c2
-rw-r--r--drivers/clk/x86/Makefile2
-rw-r--r--drivers/clk/x86/clk-lpss.c99
-rw-r--r--drivers/clk/x86/clk-lpss.h36
-rw-r--r--drivers/clk/x86/clk-lpt.c40
-rw-r--r--drivers/cpufreq/Kconfig89
-rw-r--r--drivers/cpufreq/Kconfig.arm148
-rw-r--r--drivers/cpufreq/Kconfig.powerpc18
-rw-r--r--drivers/cpufreq/Kconfig.x8617
-rw-r--r--drivers/cpufreq/Makefile41
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c11
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c148
-rw-r--r--drivers/cpufreq/arm_big_little.c278
-rw-r--r--drivers/cpufreq/arm_big_little.h40
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c107
-rw-r--r--drivers/cpufreq/at32ap-cpufreq.c123
-rw-r--r--drivers/cpufreq/blackfin-cpufreq.c247
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c32
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c11
-rw-r--r--drivers/cpufreq/cpufreq.c145
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c244
-rw-r--r--drivers/cpufreq/cpufreq_governor.c291
-rw-r--r--drivers/cpufreq/cpufreq_governor.h128
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c363
-rw-r--r--drivers/cpufreq/cris-artpec3-cpufreq.c146
-rw-r--r--drivers/cpufreq/cris-etraxfs-cpufreq.c142
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c231
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c22
-rw-r--r--drivers/cpufreq/e_powersaver.c11
-rw-r--r--drivers/cpufreq/elanfreq.c10
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c9
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c481
-rw-r--r--drivers/cpufreq/gx-suspmod.c11
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c438
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c12
-rw-r--r--drivers/cpufreq/integrator-cpufreq.c220
-rw-r--r--drivers/cpufreq/intel_pstate.c21
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c18
-rw-r--r--drivers/cpufreq/longhaul.c18
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c248
-rw-r--r--drivers/cpufreq/maple-cpufreq.c5
-rw-r--r--drivers/cpufreq/omap-cpufreq.c34
-rw-r--r--drivers/cpufreq/p4-clockmod.c13
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c5
-rw-r--r--drivers/cpufreq/powernow-k6.c12
-rw-r--r--drivers/cpufreq/powernow-k7.c10
-rw-r--r--drivers/cpufreq/powernow-k8.c19
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c209
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.h24
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c115
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c156
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c492
-rw-r--r--drivers/cpufreq/pxa3xx-cpufreq.c254
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c5
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c7
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c5
-rw-r--r--drivers/cpufreq/sa1100-cpufreq.c247
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c406
-rw-r--r--drivers/cpufreq/sc520_freq.c10
-rw-r--r--drivers/cpufreq/sh-cpufreq.c189
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c408
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c269
-rw-r--r--drivers/cpufreq/spear-cpufreq.c7
-rw-r--r--drivers/cpufreq/speedstep-centrino.c28
-rw-r--r--drivers/cpufreq/speedstep-ich.c12
-rw-r--r--drivers/cpufreq/speedstep-smi.c5
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c292
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c92
-rw-r--r--drivers/cpuidle/Kconfig6
-rw-r--r--drivers/cpuidle/Makefile2
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c57
-rw-r--r--drivers/cpuidle/cpuidle-kirkwood.c29
-rw-r--r--drivers/cpuidle/cpuidle.c153
-rw-r--r--drivers/cpuidle/driver.c31
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c4
-rw-r--r--drivers/idle/intel_idle.c4
-rw-r--r--drivers/platform/x86/sony-laptop.c3
-rw-r--r--drivers/pnp/isapnp/core.c11
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c1
-rw-r--r--drivers/pnp/pnpbios/proc.c5
150 files changed, 10754 insertions, 2810 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 4bf68c8d4797..100bd724f648 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -298,14 +298,6 @@ config ACPI_DEBUG
298 Documentation/kernel-parameters.txt to control the type and 298 Documentation/kernel-parameters.txt to control the type and
299 amount of debug output. 299 amount of debug output.
300 300
301config ACPI_DEBUG_FUNC_TRACE
302 bool "Additionally enable ACPI function tracing"
303 default n
304 depends on ACPI_DEBUG
305 help
306 ACPI Debug Statements slow down ACPI processing. Function trace
307 is about half of the penalty and is rarely useful.
308
309config ACPI_PCI_SLOT 301config ACPI_PCI_SLOT
310 bool "PCI slot detection driver" 302 bool "PCI slot detection driver"
311 depends on SYSFS 303 depends on SYSFS
@@ -334,7 +326,7 @@ config X86_PM_TIMER
334 326
335config ACPI_CONTAINER 327config ACPI_CONTAINER
336 bool "Container and Module Devices" 328 bool "Container and Module Devices"
337 default (ACPI_HOTPLUG_MEMORY || ACPI_HOTPLUG_CPU || ACPI_HOTPLUG_IO) 329 default (ACPI_HOTPLUG_MEMORY || ACPI_HOTPLUG_CPU)
338 help 330 help
339 This driver supports ACPI Container and Module devices (IDs 331 This driver supports ACPI Container and Module devices (IDs
340 ACPI0004, PNP0A05, and PNP0A06). 332 ACPI0004, PNP0A05, and PNP0A06).
@@ -345,9 +337,8 @@ config ACPI_CONTAINER
345 the module will be called container. 337 the module will be called container.
346 338
347config ACPI_HOTPLUG_MEMORY 339config ACPI_HOTPLUG_MEMORY
348 tristate "Memory Hotplug" 340 bool "Memory Hotplug"
349 depends on MEMORY_HOTPLUG 341 depends on MEMORY_HOTPLUG
350 default n
351 help 342 help
352 This driver supports ACPI memory hotplug. The driver 343 This driver supports ACPI memory hotplug. The driver
353 fields notifications on ACPI memory devices (PNP0C80), 344 fields notifications on ACPI memory devices (PNP0C80),
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 474fcfeba66c..ecb743bf05a5 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -39,6 +39,7 @@ acpi-y += ec.o
39acpi-$(CONFIG_ACPI_DOCK) += dock.o 39acpi-$(CONFIG_ACPI_DOCK) += dock.o
40acpi-y += pci_root.o pci_link.o pci_irq.o 40acpi-y += pci_root.o pci_link.o pci_irq.o
41acpi-y += csrt.o 41acpi-y += csrt.o
42acpi-$(CONFIG_X86_INTEL_LPSS) += acpi_lpss.o
42acpi-y += acpi_platform.o 43acpi-y += acpi_platform.o
43acpi-y += power.o 44acpi-y += power.o
44acpi-y += event.o 45acpi-y += event.o
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
new file mode 100644
index 000000000000..b1c95422ce74
--- /dev/null
+++ b/drivers/acpi/acpi_lpss.c
@@ -0,0 +1,292 @@
1/*
2 * ACPI support for Intel Lynxpoint LPSS.
3 *
4 * Copyright (C) 2013, Intel Corporation
5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
6 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/acpi.h>
14#include <linux/clk.h>
15#include <linux/clkdev.h>
16#include <linux/clk-provider.h>
17#include <linux/err.h>
18#include <linux/io.h>
19#include <linux/platform_device.h>
20#include <linux/platform_data/clk-lpss.h>
21#include <linux/pm_runtime.h>
22
23#include "internal.h"
24
25ACPI_MODULE_NAME("acpi_lpss");
26
27#define LPSS_CLK_SIZE 0x04
28#define LPSS_LTR_SIZE 0x18
29
30/* Offsets relative to LPSS_PRIVATE_OFFSET */
31#define LPSS_GENERAL 0x08
32#define LPSS_GENERAL_LTR_MODE_SW BIT(2)
33#define LPSS_SW_LTR 0x10
34#define LPSS_AUTO_LTR 0x14
35
36struct lpss_device_desc {
37 bool clk_required;
38 const char *clk_parent;
39 bool ltr_required;
40 unsigned int prv_offset;
41};
42
43struct lpss_private_data {
44 void __iomem *mmio_base;
45 resource_size_t mmio_size;
46 struct clk *clk;
47 const struct lpss_device_desc *dev_desc;
48};
49
50static struct lpss_device_desc lpt_dev_desc = {
51 .clk_required = true,
52 .clk_parent = "lpss_clk",
53 .prv_offset = 0x800,
54 .ltr_required = true,
55};
56
57static struct lpss_device_desc lpt_sdio_dev_desc = {
58 .prv_offset = 0x1000,
59 .ltr_required = true,
60};
61
62static const struct acpi_device_id acpi_lpss_device_ids[] = {
63 /* Lynxpoint LPSS devices */
64 { "INT33C0", (unsigned long)&lpt_dev_desc },
65 { "INT33C1", (unsigned long)&lpt_dev_desc },
66 { "INT33C2", (unsigned long)&lpt_dev_desc },
67 { "INT33C3", (unsigned long)&lpt_dev_desc },
68 { "INT33C4", (unsigned long)&lpt_dev_desc },
69 { "INT33C5", (unsigned long)&lpt_dev_desc },
70 { "INT33C6", (unsigned long)&lpt_sdio_dev_desc },
71 { "INT33C7", },
72
73 { }
74};
75
76static int is_memory(struct acpi_resource *res, void *not_used)
77{
78 struct resource r;
79 return !acpi_dev_resource_memory(res, &r);
80}
81
82/* LPSS main clock device. */
83static struct platform_device *lpss_clk_dev;
84
85static inline void lpt_register_clock_device(void)
86{
87 lpss_clk_dev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
88}
89
90static int register_device_clock(struct acpi_device *adev,
91 struct lpss_private_data *pdata)
92{
93 const struct lpss_device_desc *dev_desc = pdata->dev_desc;
94
95 if (!lpss_clk_dev)
96 lpt_register_clock_device();
97
98 if (!dev_desc->clk_parent || !pdata->mmio_base
99 || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
100 return -ENODATA;
101
102 pdata->clk = clk_register_gate(NULL, dev_name(&adev->dev),
103 dev_desc->clk_parent, 0,
104 pdata->mmio_base + dev_desc->prv_offset,
105 0, 0, NULL);
106 if (IS_ERR(pdata->clk))
107 return PTR_ERR(pdata->clk);
108
109 clk_register_clkdev(pdata->clk, NULL, dev_name(&adev->dev));
110 return 0;
111}
112
113static int acpi_lpss_create_device(struct acpi_device *adev,
114 const struct acpi_device_id *id)
115{
116 struct lpss_device_desc *dev_desc;
117 struct lpss_private_data *pdata;
118 struct resource_list_entry *rentry;
119 struct list_head resource_list;
120 int ret;
121
122 dev_desc = (struct lpss_device_desc *)id->driver_data;
123 if (!dev_desc)
124 return acpi_create_platform_device(adev, id);
125
126 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
127 if (!pdata)
128 return -ENOMEM;
129
130 INIT_LIST_HEAD(&resource_list);
131 ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL);
132 if (ret < 0)
133 goto err_out;
134
135 list_for_each_entry(rentry, &resource_list, node)
136 if (resource_type(&rentry->res) == IORESOURCE_MEM) {
137 pdata->mmio_size = resource_size(&rentry->res);
138 pdata->mmio_base = ioremap(rentry->res.start,
139 pdata->mmio_size);
140 pdata->dev_desc = dev_desc;
141 break;
142 }
143
144 acpi_dev_free_resource_list(&resource_list);
145
146 if (dev_desc->clk_required) {
147 ret = register_device_clock(adev, pdata);
148 if (ret) {
149 /*
150 * Skip the device, but don't terminate the namespace
151 * scan.
152 */
153 kfree(pdata);
154 return 0;
155 }
156 }
157
158 adev->driver_data = pdata;
159 ret = acpi_create_platform_device(adev, id);
160 if (ret > 0)
161 return ret;
162
163 adev->driver_data = NULL;
164
165 err_out:
166 kfree(pdata);
167 return ret;
168}
169
170static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
171{
172 struct acpi_device *adev;
173 struct lpss_private_data *pdata;
174 unsigned long flags;
175 int ret;
176
177 ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev);
178 if (WARN_ON(ret))
179 return ret;
180
181 spin_lock_irqsave(&dev->power.lock, flags);
182 if (pm_runtime_suspended(dev)) {
183 ret = -EAGAIN;
184 goto out;
185 }
186 pdata = acpi_driver_data(adev);
187 if (WARN_ON(!pdata || !pdata->mmio_base)) {
188 ret = -ENODEV;
189 goto out;
190 }
191 *val = readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
192
193 out:
194 spin_unlock_irqrestore(&dev->power.lock, flags);
195 return ret;
196}
197
198static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
199 char *buf)
200{
201 u32 ltr_value = 0;
202 unsigned int reg;
203 int ret;
204
205 reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR;
206 ret = lpss_reg_read(dev, reg, &ltr_value);
207 if (ret)
208 return ret;
209
210 return snprintf(buf, PAGE_SIZE, "%08x\n", ltr_value);
211}
212
213static ssize_t lpss_ltr_mode_show(struct device *dev,
214 struct device_attribute *attr, char *buf)
215{
216 u32 ltr_mode = 0;
217 char *outstr;
218 int ret;
219
220 ret = lpss_reg_read(dev, LPSS_GENERAL, &ltr_mode);
221 if (ret)
222 return ret;
223
224 outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto";
225 return sprintf(buf, "%s\n", outstr);
226}
227
228static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL);
229static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL);
230static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL);
231
232static struct attribute *lpss_attrs[] = {
233 &dev_attr_auto_ltr.attr,
234 &dev_attr_sw_ltr.attr,
235 &dev_attr_ltr_mode.attr,
236 NULL,
237};
238
239static struct attribute_group lpss_attr_group = {
240 .attrs = lpss_attrs,
241 .name = "lpss_ltr",
242};
243
244static int acpi_lpss_platform_notify(struct notifier_block *nb,
245 unsigned long action, void *data)
246{
247 struct platform_device *pdev = to_platform_device(data);
248 struct lpss_private_data *pdata;
249 struct acpi_device *adev;
250 const struct acpi_device_id *id;
251 int ret = 0;
252
253 id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev);
254 if (!id || !id->driver_data)
255 return 0;
256
257 if (acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
258 return 0;
259
260 pdata = acpi_driver_data(adev);
261 if (!pdata || !pdata->mmio_base || !pdata->dev_desc->ltr_required)
262 return 0;
263
264 if (pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) {
265 dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n");
266 return 0;
267 }
268
269 if (action == BUS_NOTIFY_ADD_DEVICE)
270 ret = sysfs_create_group(&pdev->dev.kobj, &lpss_attr_group);
271 else if (action == BUS_NOTIFY_DEL_DEVICE)
272 sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
273
274 return ret;
275}
276
277static struct notifier_block acpi_lpss_nb = {
278 .notifier_call = acpi_lpss_platform_notify,
279};
280
281static struct acpi_scan_handler lpss_handler = {
282 .ids = acpi_lpss_device_ids,
283 .attach = acpi_lpss_create_device,
284};
285
286void __init acpi_lpss_init(void)
287{
288 if (!lpt_clk_init()) {
289 bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
290 acpi_scan_add_handler(&lpss_handler);
291 }
292}
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index da1f82b445e0..5e6301e94920 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (C) 2004 Intel Corporation <naveen.b.s@intel.com> 2 * Copyright (C) 2004, 2013 Intel Corporation
3 * Author: Naveen B S <naveen.b.s@intel.com>
4 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
3 * 5 *
4 * All rights reserved. 6 * All rights reserved.
5 * 7 *
@@ -25,14 +27,10 @@
25 * ranges. 27 * ranges.
26 */ 28 */
27 29
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/types.h>
32#include <linux/memory_hotplug.h>
33#include <linux/slab.h>
34#include <linux/acpi.h> 30#include <linux/acpi.h>
35#include <acpi/acpi_drivers.h> 31#include <linux/memory_hotplug.h>
32
33#include "internal.h"
36 34
37#define ACPI_MEMORY_DEVICE_CLASS "memory" 35#define ACPI_MEMORY_DEVICE_CLASS "memory"
38#define ACPI_MEMORY_DEVICE_HID "PNP0C80" 36#define ACPI_MEMORY_DEVICE_HID "PNP0C80"
@@ -44,32 +42,28 @@
44#define PREFIX "ACPI:memory_hp:" 42#define PREFIX "ACPI:memory_hp:"
45 43
46ACPI_MODULE_NAME("acpi_memhotplug"); 44ACPI_MODULE_NAME("acpi_memhotplug");
47MODULE_AUTHOR("Naveen B S <naveen.b.s@intel.com>");
48MODULE_DESCRIPTION("Hotplug Mem Driver");
49MODULE_LICENSE("GPL");
50 45
51/* Memory Device States */ 46/* Memory Device States */
52#define MEMORY_INVALID_STATE 0 47#define MEMORY_INVALID_STATE 0
53#define MEMORY_POWER_ON_STATE 1 48#define MEMORY_POWER_ON_STATE 1
54#define MEMORY_POWER_OFF_STATE 2 49#define MEMORY_POWER_OFF_STATE 2
55 50
56static int acpi_memory_device_add(struct acpi_device *device); 51static int acpi_memory_device_add(struct acpi_device *device,
57static int acpi_memory_device_remove(struct acpi_device *device); 52 const struct acpi_device_id *not_used);
53static void acpi_memory_device_remove(struct acpi_device *device);
58 54
59static const struct acpi_device_id memory_device_ids[] = { 55static const struct acpi_device_id memory_device_ids[] = {
60 {ACPI_MEMORY_DEVICE_HID, 0}, 56 {ACPI_MEMORY_DEVICE_HID, 0},
61 {"", 0}, 57 {"", 0},
62}; 58};
63MODULE_DEVICE_TABLE(acpi, memory_device_ids);
64 59
65static struct acpi_driver acpi_memory_device_driver = { 60static struct acpi_scan_handler memory_device_handler = {
66 .name = "acpi_memhotplug",
67 .class = ACPI_MEMORY_DEVICE_CLASS,
68 .ids = memory_device_ids, 61 .ids = memory_device_ids,
69 .ops = { 62 .attach = acpi_memory_device_add,
70 .add = acpi_memory_device_add, 63 .detach = acpi_memory_device_remove,
71 .remove = acpi_memory_device_remove, 64 .hotplug = {
72 }, 65 .enabled = true,
66 },
73}; 67};
74 68
75struct acpi_memory_info { 69struct acpi_memory_info {
@@ -79,7 +73,6 @@ struct acpi_memory_info {
79 unsigned short caching; /* memory cache attribute */ 73 unsigned short caching; /* memory cache attribute */
80 unsigned short write_protect; /* memory read/write attribute */ 74 unsigned short write_protect; /* memory read/write attribute */
81 unsigned int enabled:1; 75 unsigned int enabled:1;
82 unsigned int failed:1;
83}; 76};
84 77
85struct acpi_memory_device { 78struct acpi_memory_device {
@@ -153,48 +146,6 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
153 return 0; 146 return 0;
154} 147}
155 148
156static int acpi_memory_get_device(acpi_handle handle,
157 struct acpi_memory_device **mem_device)
158{
159 struct acpi_device *device = NULL;
160 int result = 0;
161
162 acpi_scan_lock_acquire();
163
164 acpi_bus_get_device(handle, &device);
165 if (device)
166 goto end;
167
168 /*
169 * Now add the notified device. This creates the acpi_device
170 * and invokes .add function
171 */
172 result = acpi_bus_scan(handle);
173 if (result) {
174 acpi_handle_warn(handle, "ACPI namespace scan failed\n");
175 result = -EINVAL;
176 goto out;
177 }
178 result = acpi_bus_get_device(handle, &device);
179 if (result) {
180 acpi_handle_warn(handle, "Missing device object\n");
181 result = -EINVAL;
182 goto out;
183 }
184
185 end:
186 *mem_device = acpi_driver_data(device);
187 if (!(*mem_device)) {
188 dev_err(&device->dev, "driver data not found\n");
189 result = -ENODEV;
190 goto out;
191 }
192
193 out:
194 acpi_scan_lock_release();
195 return result;
196}
197
198static int acpi_memory_check_device(struct acpi_memory_device *mem_device) 149static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
199{ 150{
200 unsigned long long current_status; 151 unsigned long long current_status;
@@ -249,13 +200,11 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
249 * returns -EEXIST. If add_memory() returns the other error, it 200 * returns -EEXIST. If add_memory() returns the other error, it
250 * means that this memory block is not used by the kernel. 201 * means that this memory block is not used by the kernel.
251 */ 202 */
252 if (result && result != -EEXIST) { 203 if (result && result != -EEXIST)
253 info->failed = 1;
254 continue; 204 continue;
255 }
256 205
257 if (!result) 206 info->enabled = 1;
258 info->enabled = 1; 207
259 /* 208 /*
260 * Add num_enable even if add_memory() returns -EEXIST, so the 209 * Add num_enable even if add_memory() returns -EEXIST, so the
261 * device is bound to this driver. 210 * device is bound to this driver.
@@ -286,16 +235,8 @@ static int acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
286 nid = acpi_get_node(mem_device->device->handle); 235 nid = acpi_get_node(mem_device->device->handle);
287 236
288 list_for_each_entry_safe(info, n, &mem_device->res_list, list) { 237 list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
289 if (info->failed)
290 /* The kernel does not use this memory block */
291 continue;
292
293 if (!info->enabled) 238 if (!info->enabled)
294 /* 239 continue;
295 * The kernel uses this memory block, but it may be not
296 * managed by us.
297 */
298 return -EBUSY;
299 240
300 if (nid < 0) 241 if (nid < 0)
301 nid = memory_add_physaddr_to_nid(info->start_addr); 242 nid = memory_add_physaddr_to_nid(info->start_addr);
@@ -310,95 +251,21 @@ static int acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
310 return result; 251 return result;
311} 252}
312 253
313static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
314{
315 struct acpi_memory_device *mem_device;
316 struct acpi_device *device;
317 struct acpi_eject_event *ej_event = NULL;
318 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
319 acpi_status status;
320
321 switch (event) {
322 case ACPI_NOTIFY_BUS_CHECK:
323 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
324 "\nReceived BUS CHECK notification for device\n"));
325 /* Fall Through */
326 case ACPI_NOTIFY_DEVICE_CHECK:
327 if (event == ACPI_NOTIFY_DEVICE_CHECK)
328 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
329 "\nReceived DEVICE CHECK notification for device\n"));
330 if (acpi_memory_get_device(handle, &mem_device)) {
331 acpi_handle_err(handle, "Cannot find driver data\n");
332 break;
333 }
334
335 ost_code = ACPI_OST_SC_SUCCESS;
336 break;
337
338 case ACPI_NOTIFY_EJECT_REQUEST:
339 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
340 "\nReceived EJECT REQUEST notification for device\n"));
341
342 status = AE_ERROR;
343 acpi_scan_lock_acquire();
344
345 if (acpi_bus_get_device(handle, &device)) {
346 acpi_handle_err(handle, "Device doesn't exist\n");
347 goto unlock;
348 }
349 mem_device = acpi_driver_data(device);
350 if (!mem_device) {
351 acpi_handle_err(handle, "Driver Data is NULL\n");
352 goto unlock;
353 }
354
355 ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
356 if (!ej_event) {
357 pr_err(PREFIX "No memory, dropping EJECT\n");
358 goto unlock;
359 }
360
361 get_device(&device->dev);
362 ej_event->device = device;
363 ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
364 /* The eject is carried out asynchronously. */
365 status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
366 ej_event);
367 if (ACPI_FAILURE(status)) {
368 put_device(&device->dev);
369 kfree(ej_event);
370 }
371
372 unlock:
373 acpi_scan_lock_release();
374 if (ACPI_SUCCESS(status))
375 return;
376 default:
377 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
378 "Unsupported event [0x%x]\n", event));
379
380 /* non-hotplug event; possibly handled by other handler */
381 return;
382 }
383
384 /* Inform firmware that the hotplug operation has completed */
385 (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
386}
387
388static void acpi_memory_device_free(struct acpi_memory_device *mem_device) 254static void acpi_memory_device_free(struct acpi_memory_device *mem_device)
389{ 255{
390 if (!mem_device) 256 if (!mem_device)
391 return; 257 return;
392 258
393 acpi_memory_free_device_resources(mem_device); 259 acpi_memory_free_device_resources(mem_device);
260 mem_device->device->driver_data = NULL;
394 kfree(mem_device); 261 kfree(mem_device);
395} 262}
396 263
397static int acpi_memory_device_add(struct acpi_device *device) 264static int acpi_memory_device_add(struct acpi_device *device,
265 const struct acpi_device_id *not_used)
398{ 266{
267 struct acpi_memory_device *mem_device;
399 int result; 268 int result;
400 struct acpi_memory_device *mem_device = NULL;
401
402 269
403 if (!device) 270 if (!device)
404 return -EINVAL; 271 return -EINVAL;
@@ -423,147 +290,36 @@ static int acpi_memory_device_add(struct acpi_device *device)
423 /* Set the device state */ 290 /* Set the device state */
424 mem_device->state = MEMORY_POWER_ON_STATE; 291 mem_device->state = MEMORY_POWER_ON_STATE;
425 292
426 pr_debug("%s\n", acpi_device_name(device)); 293 result = acpi_memory_check_device(mem_device);
294 if (result) {
295 acpi_memory_device_free(mem_device);
296 return 0;
297 }
427 298
428 if (!acpi_memory_check_device(mem_device)) { 299 result = acpi_memory_enable_device(mem_device);
429 /* call add_memory func */ 300 if (result) {
430 result = acpi_memory_enable_device(mem_device); 301 dev_err(&device->dev, "acpi_memory_enable_device() error\n");
431 if (result) { 302 acpi_memory_device_free(mem_device);
432 dev_err(&device->dev, 303 return -ENODEV;
433 "Error in acpi_memory_enable_device\n");
434 acpi_memory_device_free(mem_device);
435 }
436 } 304 }
437 return result; 305
306 dev_dbg(&device->dev, "Memory device configured by ACPI\n");
307 return 1;
438} 308}
439 309
440static int acpi_memory_device_remove(struct acpi_device *device) 310static void acpi_memory_device_remove(struct acpi_device *device)
441{ 311{
442 struct acpi_memory_device *mem_device = NULL; 312 struct acpi_memory_device *mem_device;
443 int result;
444 313
445 if (!device || !acpi_driver_data(device)) 314 if (!device || !acpi_driver_data(device))
446 return -EINVAL; 315 return;
447 316
448 mem_device = acpi_driver_data(device); 317 mem_device = acpi_driver_data(device);
449 318 acpi_memory_remove_memory(mem_device);
450 result = acpi_memory_remove_memory(mem_device);
451 if (result)
452 return result;
453
454 acpi_memory_device_free(mem_device); 319 acpi_memory_device_free(mem_device);
455
456 return 0;
457}
458
459/*
460 * Helper function to check for memory device
461 */
462static acpi_status is_memory_device(acpi_handle handle)
463{
464 char *hardware_id;
465 acpi_status status;
466 struct acpi_device_info *info;
467
468 status = acpi_get_object_info(handle, &info);
469 if (ACPI_FAILURE(status))
470 return status;
471
472 if (!(info->valid & ACPI_VALID_HID)) {
473 kfree(info);
474 return AE_ERROR;
475 }
476
477 hardware_id = info->hardware_id.string;
478 if ((hardware_id == NULL) ||
479 (strcmp(hardware_id, ACPI_MEMORY_DEVICE_HID)))
480 status = AE_ERROR;
481
482 kfree(info);
483 return status;
484}
485
486static acpi_status
487acpi_memory_register_notify_handler(acpi_handle handle,
488 u32 level, void *ctxt, void **retv)
489{
490 acpi_status status;
491
492
493 status = is_memory_device(handle);
494 if (ACPI_FAILURE(status))
495 return AE_OK; /* continue */
496
497 status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
498 acpi_memory_device_notify, NULL);
499 /* continue */
500 return AE_OK;
501}
502
503static acpi_status
504acpi_memory_deregister_notify_handler(acpi_handle handle,
505 u32 level, void *ctxt, void **retv)
506{
507 acpi_status status;
508
509
510 status = is_memory_device(handle);
511 if (ACPI_FAILURE(status))
512 return AE_OK; /* continue */
513
514 status = acpi_remove_notify_handler(handle,
515 ACPI_SYSTEM_NOTIFY,
516 acpi_memory_device_notify);
517
518 return AE_OK; /* continue */
519}
520
521static int __init acpi_memory_device_init(void)
522{
523 int result;
524 acpi_status status;
525
526
527 result = acpi_bus_register_driver(&acpi_memory_device_driver);
528
529 if (result < 0)
530 return -ENODEV;
531
532 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
533 ACPI_UINT32_MAX,
534 acpi_memory_register_notify_handler, NULL,
535 NULL, NULL);
536
537 if (ACPI_FAILURE(status)) {
538 ACPI_EXCEPTION((AE_INFO, status, "walk_namespace failed"));
539 acpi_bus_unregister_driver(&acpi_memory_device_driver);
540 return -ENODEV;
541 }
542
543 return 0;
544} 320}
545 321
546static void __exit acpi_memory_device_exit(void) 322void __init acpi_memory_hotplug_init(void)
547{ 323{
548 acpi_status status; 324 acpi_scan_add_handler_with_hotplug(&memory_device_handler, "memory");
549
550
551 /*
552 * Adding this to un-install notification handlers for all the device
553 * handles.
554 */
555 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
556 ACPI_UINT32_MAX,
557 acpi_memory_deregister_notify_handler, NULL,
558 NULL, NULL);
559
560 if (ACPI_FAILURE(status))
561 ACPI_EXCEPTION((AE_INFO, status, "walk_namespace failed"));
562
563 acpi_bus_unregister_driver(&acpi_memory_device_driver);
564
565 return;
566} 325}
567
568module_init(acpi_memory_device_init);
569module_exit(acpi_memory_device_exit);
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 31de1043eea0..27bb6a91de5f 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -236,7 +236,7 @@ static int create_power_saving_task(void)
236 ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread, 236 ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
237 (void *)(unsigned long)ps_tsk_num, 237 (void *)(unsigned long)ps_tsk_num,
238 "acpi_pad/%d", ps_tsk_num); 238 "acpi_pad/%d", ps_tsk_num);
239 rc = IS_ERR(ps_tsks[ps_tsk_num]) ? PTR_ERR(ps_tsks[ps_tsk_num]) : 0; 239 rc = PTR_RET(ps_tsks[ps_tsk_num]);
240 if (!rc) 240 if (!rc)
241 ps_tsk_num++; 241 ps_tsk_num++;
242 else 242 else
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 26fce4b8a632..fafec5ddf17f 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -22,9 +22,6 @@
22 22
23ACPI_MODULE_NAME("platform"); 23ACPI_MODULE_NAME("platform");
24 24
25/* Flags for acpi_create_platform_device */
26#define ACPI_PLATFORM_CLK BIT(0)
27
28/* 25/*
29 * The following ACPI IDs are known to be suitable for representing as 26 * The following ACPI IDs are known to be suitable for representing as
30 * platform devices. 27 * platform devices.
@@ -33,33 +30,9 @@ static const struct acpi_device_id acpi_platform_device_ids[] = {
33 30
34 { "PNP0D40" }, 31 { "PNP0D40" },
35 32
36 /* Haswell LPSS devices */
37 { "INT33C0", ACPI_PLATFORM_CLK },
38 { "INT33C1", ACPI_PLATFORM_CLK },
39 { "INT33C2", ACPI_PLATFORM_CLK },
40 { "INT33C3", ACPI_PLATFORM_CLK },
41 { "INT33C4", ACPI_PLATFORM_CLK },
42 { "INT33C5", ACPI_PLATFORM_CLK },
43 { "INT33C6", ACPI_PLATFORM_CLK },
44 { "INT33C7", ACPI_PLATFORM_CLK },
45
46 { } 33 { }
47}; 34};
48 35
49static int acpi_create_platform_clks(struct acpi_device *adev)
50{
51 static struct platform_device *pdev;
52
53 /* Create Lynxpoint LPSS clocks */
54 if (!pdev && !strncmp(acpi_device_hid(adev), "INT33C", 6)) {
55 pdev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
56 if (IS_ERR(pdev))
57 return PTR_ERR(pdev);
58 }
59
60 return 0;
61}
62
63/** 36/**
64 * acpi_create_platform_device - Create platform device for ACPI device node 37 * acpi_create_platform_device - Create platform device for ACPI device node
65 * @adev: ACPI device node to create a platform device for. 38 * @adev: ACPI device node to create a platform device for.
@@ -71,10 +44,9 @@ static int acpi_create_platform_clks(struct acpi_device *adev)
71 * 44 *
72 * Name of the platform device will be the same as @adev's. 45 * Name of the platform device will be the same as @adev's.
73 */ 46 */
74static int acpi_create_platform_device(struct acpi_device *adev, 47int acpi_create_platform_device(struct acpi_device *adev,
75 const struct acpi_device_id *id) 48 const struct acpi_device_id *id)
76{ 49{
77 unsigned long flags = id->driver_data;
78 struct platform_device *pdev = NULL; 50 struct platform_device *pdev = NULL;
79 struct acpi_device *acpi_parent; 51 struct acpi_device *acpi_parent;
80 struct platform_device_info pdevinfo; 52 struct platform_device_info pdevinfo;
@@ -83,14 +55,6 @@ static int acpi_create_platform_device(struct acpi_device *adev,
83 struct resource *resources; 55 struct resource *resources;
84 int count; 56 int count;
85 57
86 if (flags & ACPI_PLATFORM_CLK) {
87 int ret = acpi_create_platform_clks(adev);
88 if (ret) {
89 dev_err(&adev->dev, "failed to create clocks\n");
90 return ret;
91 }
92 }
93
94 /* If the ACPI node already has a physical device attached, skip it. */ 58 /* If the ACPI node already has a physical device attached, skip it. */
95 if (adev->physical_node_count) 59 if (adev->physical_node_count)
96 return 0; 60 return 0;
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index a1b9bf5085a2..7ddf29eca9f5 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -83,6 +83,7 @@ acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
83acpi-y += \ 83acpi-y += \
84 nsaccess.o \ 84 nsaccess.o \
85 nsalloc.o \ 85 nsalloc.o \
86 nsconvert.o \
86 nsdump.o \ 87 nsdump.o \
87 nseval.o \ 88 nseval.o \
88 nsinit.o \ 89 nsinit.o \
@@ -160,6 +161,7 @@ acpi-y += \
160 utobject.o \ 161 utobject.o \
161 utosi.o \ 162 utosi.o \
162 utownerid.o \ 163 utownerid.o \
164 utpredef.o \
163 utresrc.o \ 165 utresrc.o \
164 utstate.o \ 166 utstate.o \
165 utstring.o \ 167 utstring.o \
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index ecb49927b817..07160928ca25 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -224,6 +224,7 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
224 */ 224 */
225ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */ 225ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
226ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ 226ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
227ACPI_EXTERN acpi_spinlock acpi_gbl_reference_count_lock;
227 228
228/* Mutex for _OSI support */ 229/* Mutex for _OSI support */
229 230
@@ -413,10 +414,12 @@ ACPI_EXTERN u8 acpi_gbl_db_output_flags;
413 414
414#ifdef ACPI_DISASSEMBLER 415#ifdef ACPI_DISASSEMBLER
415 416
416u8 ACPI_INIT_GLOBAL(acpi_gbl_ignore_noop_operator, FALSE); 417ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_ignore_noop_operator, FALSE);
417 418
418ACPI_EXTERN u8 acpi_gbl_db_opt_disasm; 419ACPI_EXTERN u8 acpi_gbl_db_opt_disasm;
419ACPI_EXTERN u8 acpi_gbl_db_opt_verbose; 420ACPI_EXTERN u8 acpi_gbl_db_opt_verbose;
421ACPI_EXTERN u8 acpi_gbl_num_external_methods;
422ACPI_EXTERN u32 acpi_gbl_resolved_external_methods;
420ACPI_EXTERN struct acpi_external_list *acpi_gbl_external_list; 423ACPI_EXTERN struct acpi_external_list *acpi_gbl_external_list;
421ACPI_EXTERN struct acpi_external_file *acpi_gbl_external_file_list; 424ACPI_EXTERN struct acpi_external_file *acpi_gbl_external_file_list;
422#endif 425#endif
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 805f419086ab..d5bfbd331bfd 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -294,6 +294,8 @@ acpi_status(*acpi_internal_method) (struct acpi_walk_state * walk_state);
294#define ACPI_BTYPE_OBJECTS_AND_REFS 0x0001FFFF /* ARG or LOCAL */ 294#define ACPI_BTYPE_OBJECTS_AND_REFS 0x0001FFFF /* ARG or LOCAL */
295#define ACPI_BTYPE_ALL_OBJECTS 0x0000FFFF 295#define ACPI_BTYPE_ALL_OBJECTS 0x0000FFFF
296 296
297#pragma pack(1)
298
297/* 299/*
298 * Information structure for ACPI predefined names. 300 * Information structure for ACPI predefined names.
299 * Each entry in the table contains the following items: 301 * Each entry in the table contains the following items:
@@ -304,7 +306,7 @@ acpi_status(*acpi_internal_method) (struct acpi_walk_state * walk_state);
304 */ 306 */
305struct acpi_name_info { 307struct acpi_name_info {
306 char name[ACPI_NAME_SIZE]; 308 char name[ACPI_NAME_SIZE];
307 u8 param_count; 309 u16 argument_list;
308 u8 expected_btypes; 310 u8 expected_btypes;
309}; 311};
310 312
@@ -327,7 +329,7 @@ struct acpi_package_info {
327 u8 count1; 329 u8 count1;
328 u8 object_type2; 330 u8 object_type2;
329 u8 count2; 331 u8 count2;
330 u8 reserved; 332 u16 reserved;
331}; 333};
332 334
333/* Used for ACPI_PTYPE2_FIXED */ 335/* Used for ACPI_PTYPE2_FIXED */
@@ -336,6 +338,7 @@ struct acpi_package_info2 {
336 u8 type; 338 u8 type;
337 u8 count; 339 u8 count;
338 u8 object_type[4]; 340 u8 object_type[4];
341 u8 reserved;
339}; 342};
340 343
341/* Used for ACPI_PTYPE1_OPTION */ 344/* Used for ACPI_PTYPE1_OPTION */
@@ -345,7 +348,7 @@ struct acpi_package_info3 {
345 u8 count; 348 u8 count;
346 u8 object_type[2]; 349 u8 object_type[2];
347 u8 tail_object_type; 350 u8 tail_object_type;
348 u8 reserved; 351 u16 reserved;
349}; 352};
350 353
351union acpi_predefined_info { 354union acpi_predefined_info {
@@ -355,6 +358,10 @@ union acpi_predefined_info {
355 struct acpi_package_info3 ret_info3; 358 struct acpi_package_info3 ret_info3;
356}; 359};
357 360
361/* Reset to default packing */
362
363#pragma pack()
364
358/* Data block used during object validation */ 365/* Data block used during object validation */
359 366
360struct acpi_predefined_data { 367struct acpi_predefined_data {
@@ -363,6 +370,7 @@ struct acpi_predefined_data {
363 union acpi_operand_object *parent_package; 370 union acpi_operand_object *parent_package;
364 struct acpi_namespace_node *node; 371 struct acpi_namespace_node *node;
365 u32 flags; 372 u32 flags;
373 u32 return_btype;
366 u8 node_flags; 374 u8 node_flags;
367}; 375};
368 376
@@ -371,6 +379,20 @@ struct acpi_predefined_data {
371#define ACPI_OBJECT_REPAIRED 1 379#define ACPI_OBJECT_REPAIRED 1
372#define ACPI_OBJECT_WRAPPED 2 380#define ACPI_OBJECT_WRAPPED 2
373 381
382/* Return object auto-repair info */
383
384typedef acpi_status(*acpi_object_converter) (union acpi_operand_object
385 *original_object,
386 union acpi_operand_object
387 **converted_object);
388
389struct acpi_simple_repair_info {
390 char name[ACPI_NAME_SIZE];
391 u32 unexpected_btypes;
392 u32 package_index;
393 acpi_object_converter object_converter;
394};
395
374/* 396/*
375 * Bitmapped return value types 397 * Bitmapped return value types
376 * Note: the actual data types must be contiguous, a loop in nspredef.c 398 * Note: the actual data types must be contiguous, a loop in nspredef.c
@@ -1037,6 +1059,7 @@ struct acpi_external_list {
1037 u16 length; 1059 u16 length;
1038 u8 type; 1060 u8 type;
1039 u8 flags; 1061 u8 flags;
1062 u8 resolved;
1040}; 1063};
1041 1064
1042/* Values for Flags field above */ 1065/* Values for Flags field above */
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index ed7943b9044f..53666bd9193d 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -322,10 +322,12 @@
322 * where a pointer to an object of type union acpi_operand_object can also 322 * where a pointer to an object of type union acpi_operand_object can also
323 * appear. This macro is used to distinguish them. 323 * appear. This macro is used to distinguish them.
324 * 324 *
325 * The "Descriptor" field is the first field in both structures. 325 * The "DescriptorType" field is the second field in both structures.
326 */ 326 */
327#define ACPI_GET_DESCRIPTOR_PTR(d) (((union acpi_descriptor *)(void *)(d))->common.common_pointer)
328#define ACPI_SET_DESCRIPTOR_PTR(d, p) (((union acpi_descriptor *)(void *)(d))->common.common_pointer = (p))
327#define ACPI_GET_DESCRIPTOR_TYPE(d) (((union acpi_descriptor *)(void *)(d))->common.descriptor_type) 329#define ACPI_GET_DESCRIPTOR_TYPE(d) (((union acpi_descriptor *)(void *)(d))->common.descriptor_type)
328#define ACPI_SET_DESCRIPTOR_TYPE(d, t) (((union acpi_descriptor *)(void *)(d))->common.descriptor_type = t) 330#define ACPI_SET_DESCRIPTOR_TYPE(d, t) (((union acpi_descriptor *)(void *)(d))->common.descriptor_type = (t))
329 331
330/* 332/*
331 * Macros for the master AML opcode table 333 * Macros for the master AML opcode table
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 02cd5482ff8b..d2e491876bc0 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -167,6 +167,29 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent);
167int acpi_ns_compare_names(char *name1, char *name2); 167int acpi_ns_compare_names(char *name1, char *name2);
168 168
169/* 169/*
170 * nsconvert - Dynamic object conversion routines
171 */
172acpi_status
173acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
174 union acpi_operand_object **return_object);
175
176acpi_status
177acpi_ns_convert_to_string(union acpi_operand_object *original_object,
178 union acpi_operand_object **return_object);
179
180acpi_status
181acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
182 union acpi_operand_object **return_object);
183
184acpi_status
185acpi_ns_convert_to_unicode(union acpi_operand_object *original_object,
186 union acpi_operand_object **return_object);
187
188acpi_status
189acpi_ns_convert_to_resource(union acpi_operand_object *original_object,
190 union acpi_operand_object **return_object);
191
192/*
170 * nsdump - Namespace dump/print utilities 193 * nsdump - Namespace dump/print utilities
171 */ 194 */
172#ifdef ACPI_FUTURE_USAGE 195#ifdef ACPI_FUTURE_USAGE
@@ -208,10 +231,6 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
208 acpi_status return_status, 231 acpi_status return_status,
209 union acpi_operand_object **return_object); 232 union acpi_operand_object **return_object);
210 233
211const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
212 acpi_namespace_node
213 *node);
214
215void 234void
216acpi_ns_check_parameter_count(char *pathname, 235acpi_ns_check_parameter_count(char *pathname,
217 struct acpi_namespace_node *node, 236 struct acpi_namespace_node *node,
@@ -289,7 +308,7 @@ acpi_ns_get_attached_data(struct acpi_namespace_node *node,
289 * predefined methods/objects 308 * predefined methods/objects
290 */ 309 */
291acpi_status 310acpi_status
292acpi_ns_repair_object(struct acpi_predefined_data *data, 311acpi_ns_simple_repair(struct acpi_predefined_data *data,
293 u32 expected_btypes, 312 u32 expected_btypes,
294 u32 package_index, 313 u32 package_index,
295 union acpi_operand_object **return_object_ptr); 314 union acpi_operand_object **return_object_ptr);
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 752cc40cdc1e..b22b70944fd6 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -56,7 +56,7 @@
56 * object type 56 * object type
57 * count 57 * count
58 * 58 *
59 * ACPI_PTYPE1_VAR: Variable-length length: 59 * ACPI_PTYPE1_VAR: Variable-length length. Zero-length package is allowed:
60 * object type (Int/Buf/Ref) 60 * object type (Int/Buf/Ref)
61 * 61 *
62 * ACPI_PTYPE1_OPTION: Package has some required and some optional elements 62 * ACPI_PTYPE1_OPTION: Package has some required and some optional elements
@@ -66,14 +66,16 @@
66 * 2) PTYPE2 packages contain a Variable-length number of sub-packages. Each 66 * 2) PTYPE2 packages contain a Variable-length number of sub-packages. Each
67 * of the different types describe the contents of each of the sub-packages. 67 * of the different types describe the contents of each of the sub-packages.
68 * 68 *
69 * ACPI_PTYPE2: Each subpackage contains 1 or 2 object types: 69 * ACPI_PTYPE2: Each subpackage contains 1 or 2 object types. Zero-length
70 * parent package is allowed:
70 * object type 71 * object type
71 * count 72 * count
72 * object type 73 * object type
73 * count 74 * count
74 * (Used for _ALR,_MLS,_PSS,_TRT,_TSS) 75 * (Used for _ALR,_MLS,_PSS,_TRT,_TSS)
75 * 76 *
76 * ACPI_PTYPE2_COUNT: Each subpackage has a count as first element: 77 * ACPI_PTYPE2_COUNT: Each subpackage has a count as first element.
78 * Zero-length parent package is allowed:
77 * object type 79 * object type
78 * (Used for _CSD,_PSD,_TSD) 80 * (Used for _CSD,_PSD,_TSD)
79 * 81 *
@@ -84,17 +86,19 @@
84 * count 86 * count
85 * (Used for _CST) 87 * (Used for _CST)
86 * 88 *
87 * ACPI_PTYPE2_FIXED: Each subpackage is of Fixed-length 89 * ACPI_PTYPE2_FIXED: Each subpackage is of Fixed-length. Zero-length
90 * parent package is allowed.
88 * (Used for _PRT) 91 * (Used for _PRT)
89 * 92 *
90 * ACPI_PTYPE2_MIN: Each subpackage has a Variable-length but minimum length 93 * ACPI_PTYPE2_MIN: Each subpackage has a Variable-length but minimum length.
94 * Zero-length parent package is allowed:
91 * (Used for _HPX) 95 * (Used for _HPX)
92 * 96 *
93 * ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length 97 * ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length
94 * (Used for _ART, _FPS) 98 * (Used for _ART, _FPS)
95 * 99 *
96 * ACPI_PTYPE2_FIX_VAR: Each subpackage consists of some fixed-length elements 100 * ACPI_PTYPE2_FIX_VAR: Each subpackage consists of some fixed-length elements
97 * followed by an optional element 101 * followed by an optional element. Zero-length parent package is allowed.
98 * object type 102 * object type
99 * count 103 * count
100 * object type 104 * object type
@@ -116,8 +120,47 @@ enum acpi_return_package_types {
116 ACPI_PTYPE2_FIX_VAR = 10 120 ACPI_PTYPE2_FIX_VAR = 10
117}; 121};
118 122
123/* Support macros for users of the predefined info table */
124
125#define METHOD_PREDEF_ARGS_MAX 4
126#define METHOD_ARG_BIT_WIDTH 3
127#define METHOD_ARG_MASK 0x0007
128#define ARG_COUNT_IS_MINIMUM 0x8000
129#define METHOD_MAX_ARG_TYPE ACPI_TYPE_PACKAGE
130
131#define METHOD_GET_COUNT(arg_list) (arg_list & METHOD_ARG_MASK)
132#define METHOD_GET_NEXT_ARG(arg_list) (arg_list >> METHOD_ARG_BIT_WIDTH)
133
134/* Macros used to build the predefined info table */
135
136#define METHOD_0ARGS 0
137#define METHOD_1ARGS(a1) (1 | (a1 << 3))
138#define METHOD_2ARGS(a1,a2) (2 | (a1 << 3) | (a2 << 6))
139#define METHOD_3ARGS(a1,a2,a3) (3 | (a1 << 3) | (a2 << 6) | (a3 << 9))
140#define METHOD_4ARGS(a1,a2,a3,a4) (4 | (a1 << 3) | (a2 << 6) | (a3 << 9) | (a4 << 12))
141
142#define METHOD_RETURNS(type) (type)
143#define METHOD_NO_RETURN_VALUE 0
144
145#define PACKAGE_INFO(a,b,c,d,e,f) {{{(a),(b),(c),(d)}, ((((u16)(f)) << 8) | (e)), 0}}
146
147/* Support macros for the resource descriptor info table */
148
149#define WIDTH_1 0x0001
150#define WIDTH_2 0x0002
151#define WIDTH_3 0x0004
152#define WIDTH_8 0x0008
153#define WIDTH_16 0x0010
154#define WIDTH_32 0x0020
155#define WIDTH_64 0x0040
156#define VARIABLE_DATA 0x0080
157#define NUM_RESOURCE_WIDTHS 8
158
159#define WIDTH_ADDRESS WIDTH_16 | WIDTH_32 | WIDTH_64
160
119#ifdef ACPI_CREATE_PREDEFINED_TABLE 161#ifdef ACPI_CREATE_PREDEFINED_TABLE
120/* 162/******************************************************************************
163 *
121 * Predefined method/object information table. 164 * Predefined method/object information table.
122 * 165 *
123 * These are the names that can actually be evaluated via acpi_evaluate_object. 166 * These are the names that can actually be evaluated via acpi_evaluate_object.
@@ -125,23 +168,24 @@ enum acpi_return_package_types {
125 * 168 *
126 * 1) Predefined/Reserved names that are never evaluated via 169 * 1) Predefined/Reserved names that are never evaluated via
127 * acpi_evaluate_object: 170 * acpi_evaluate_object:
128 * _Lxx and _Exx GPE methods 171 * _Lxx and _Exx GPE methods
129 * _Qxx EC methods 172 * _Qxx EC methods
130 * _T_x compiler temporary variables 173 * _T_x compiler temporary variables
174 * _Wxx wake events
131 * 175 *
132 * 2) Predefined names that never actually exist within the AML code: 176 * 2) Predefined names that never actually exist within the AML code:
133 * Predefined resource descriptor field names 177 * Predefined resource descriptor field names
134 * 178 *
135 * 3) Predefined names that are implemented within ACPICA: 179 * 3) Predefined names that are implemented within ACPICA:
136 * _OSI 180 * _OSI
137 *
138 * 4) Some predefined names that are not documented within the ACPI spec.
139 * _WDG, _WED
140 * 181 *
141 * The main entries in the table each contain the following items: 182 * The main entries in the table each contain the following items:
142 * 183 *
143 * name - The ACPI reserved name 184 * name - The ACPI reserved name
144 * param_count - Number of arguments to the method 185 * argument_list - Contains (in 16 bits), the number of required
186 * arguments to the method (3 bits), and a 3-bit type
187 * field for each argument (up to 4 arguments). The
188 * METHOD_?ARGS macros generate the correct packed data.
145 * expected_btypes - Allowed type(s) for the return value. 189 * expected_btypes - Allowed type(s) for the return value.
146 * 0 means that no return value is expected. 190 * 0 means that no return value is expected.
147 * 191 *
@@ -151,256 +195,511 @@ enum acpi_return_package_types {
151 * overall size of the stored data. 195 * overall size of the stored data.
152 * 196 *
153 * Note: The additional braces are intended to promote portability. 197 * Note: The additional braces are intended to promote portability.
154 */ 198 *
155static const union acpi_predefined_info predefined_names[] = { 199 * Note2: Table is used by the kernel-resident subsystem, the iASL compiler,
156 {{"_AC0", 0, ACPI_RTYPE_INTEGER}}, 200 * and the acpi_help utility.
157 {{"_AC1", 0, ACPI_RTYPE_INTEGER}}, 201 *
158 {{"_AC2", 0, ACPI_RTYPE_INTEGER}}, 202 * TBD: _PRT - currently ignore reversed entries. Attempt to fix in nsrepair.
159 {{"_AC3", 0, ACPI_RTYPE_INTEGER}}, 203 * Possibly fixing package elements like _BIF, etc.
160 {{"_AC4", 0, ACPI_RTYPE_INTEGER}}, 204 *
161 {{"_AC5", 0, ACPI_RTYPE_INTEGER}}, 205 *****************************************************************************/
162 {{"_AC6", 0, ACPI_RTYPE_INTEGER}}, 206
163 {{"_AC7", 0, ACPI_RTYPE_INTEGER}}, 207const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
164 {{"_AC8", 0, ACPI_RTYPE_INTEGER}}, 208 {{"_AC0", METHOD_0ARGS,
165 {{"_AC9", 0, ACPI_RTYPE_INTEGER}}, 209 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
166 {{"_ADR", 0, ACPI_RTYPE_INTEGER}}, 210
167 {{"_AEI", 0, ACPI_RTYPE_BUFFER}}, 211 {{"_AC1", METHOD_0ARGS,
168 {{"_AL0", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 212 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
169 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 213
170 214 {{"_AC2", METHOD_0ARGS,
171 {{"_AL1", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 215 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
172 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 216
173 217 {{"_AC3", METHOD_0ARGS,
174 {{"_AL2", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 218 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
175 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 219
176 220 {{"_AC4", METHOD_0ARGS,
177 {{"_AL3", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 221 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
178 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 222
179 223 {{"_AC5", METHOD_0ARGS,
180 {{"_AL4", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 224 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
181 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 225
182 226 {{"_AC6", METHOD_0ARGS,
183 {{"_AL5", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 227 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
184 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 228
185 229 {{"_AC7", METHOD_0ARGS,
186 {{"_AL6", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 230 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
187 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 231
188 232 {{"_AC8", METHOD_0ARGS,
189 {{"_AL7", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 233 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
190 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 234
191 235 {{"_AC9", METHOD_0ARGS,
192 {{"_AL8", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 236 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
193 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 237
194 238 {{"_ADR", METHOD_0ARGS,
195 {{"_AL9", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 239 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
196 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 240
197 241 {{"_AEI", METHOD_0ARGS,
198 {{"_ALC", 0, ACPI_RTYPE_INTEGER}}, 242 METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
199 {{"_ALI", 0, ACPI_RTYPE_INTEGER}}, 243
200 {{"_ALP", 0, ACPI_RTYPE_INTEGER}}, 244 {{"_AL0", METHOD_0ARGS,
201 {{"_ALR", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 2 (Ints) */ 245 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
202 {{{ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 2,0}, 0,0}}, 246 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
203 247
204 {{"_ALT", 0, ACPI_RTYPE_INTEGER}}, 248 {{"_AL1", METHOD_0ARGS,
205 {{"_ART", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(rev), n Pkg (2 Ref/11 Int) */ 249 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
206 {{{ACPI_PTYPE2_REV_FIXED, ACPI_RTYPE_REFERENCE, 2, ACPI_RTYPE_INTEGER}, 250 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
207 11, 0}}, 251
208 252 {{"_AL2", METHOD_0ARGS,
209 {{"_BBN", 0, ACPI_RTYPE_INTEGER}}, 253 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
210 {{"_BCL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */ 254 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
211 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}}, 255
212 256 {{"_AL3", METHOD_0ARGS,
213 {{"_BCM", 1, 0}}, 257 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
214 {{"_BCT", 1, ACPI_RTYPE_INTEGER}}, 258 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
215 {{"_BDN", 0, ACPI_RTYPE_INTEGER}}, 259
216 {{"_BFS", 1, 0}}, 260 {{"_AL4", METHOD_0ARGS,
217 {{"_BIF", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (9 Int),(4 Str) */ 261 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
218 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9, ACPI_RTYPE_STRING}, 4, 0}}, 262 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
219 263
220 {{"_BIX", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int),(4 Str) */ 264 {{"_AL5", METHOD_0ARGS,
221 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16, ACPI_RTYPE_STRING}, 4, 265 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
222 0}}, 266 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
223 267
224 {{"_BLT", 3, 0}}, 268 {{"_AL6", METHOD_0ARGS,
225 {{"_BMA", 1, ACPI_RTYPE_INTEGER}}, 269 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
226 {{"_BMC", 1, 0}}, 270 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
227 {{"_BMD", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (5 Int) */ 271
228 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 5,0}, 0,0}}, 272 {{"_AL7", METHOD_0ARGS,
229 273 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
230 {{"_BMS", 1, ACPI_RTYPE_INTEGER}}, 274 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
231 {{"_BQC", 0, ACPI_RTYPE_INTEGER}}, 275
232 {{"_BST", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */ 276 {{"_AL8", METHOD_0ARGS,
233 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4,0}, 0,0}}, 277 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
234 278 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
235 {{"_BTM", 1, ACPI_RTYPE_INTEGER}}, 279
236 {{"_BTP", 1, 0}}, 280 {{"_AL9", METHOD_0ARGS,
237 {{"_CBA", 0, ACPI_RTYPE_INTEGER}}, /* See PCI firmware spec 3.0 */ 281 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
238 {{"_CDM", 0, ACPI_RTYPE_INTEGER}}, 282 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
239 {{"_CID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Strs) */ 283
240 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0, 0}, 0, 284 {{"_ALC", METHOD_0ARGS,
241 0}}, 285 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
242 286
243 {{"_CLS", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (3 Int) */ 287 {{"_ALI", METHOD_0ARGS,
244 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}}, 288 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
245 289
246 {{"_CPC", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Bufs) */ 290 {{"_ALP", METHOD_0ARGS,
247 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER, 0, 0}, 0, 291 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
248 0}}, 292
249 293 {{"_ALR", METHOD_0ARGS,
250 {{"_CRS", 0, ACPI_RTYPE_BUFFER}}, 294 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each 2 (Ints) */
251 {{"_CRT", 0, ACPI_RTYPE_INTEGER}}, 295 PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 2, 0, 0, 0),
252 {{"_CSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n-1 Int) */ 296
253 {{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 0,0}, 0,0}}, 297 {{"_ALT", METHOD_0ARGS,
254 298 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
255 {{"_CST", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */ 299
256 {{{ACPI_PTYPE2_PKG_COUNT, ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3, 300 {{"_ART", METHOD_0ARGS,
257 0}}, 301 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (1 Int(rev), n Pkg (2 Ref/11 Int) */
258 302 PACKAGE_INFO(ACPI_PTYPE2_REV_FIXED, ACPI_RTYPE_REFERENCE, 2,
259 {{"_CWS", 1, ACPI_RTYPE_INTEGER}}, 303 ACPI_RTYPE_INTEGER, 11, 0),
260 {{"_DCK", 1, ACPI_RTYPE_INTEGER}}, 304
261 {{"_DCS", 0, ACPI_RTYPE_INTEGER}}, 305 {{"_BBN", METHOD_0ARGS,
262 {{"_DDC", 1, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER}}, 306 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
263 {{"_DDN", 0, ACPI_RTYPE_STRING}}, 307
264 {{"_DEP", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 308 {{"_BCL", METHOD_0ARGS,
265 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}}, 309 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Ints) */
266 310 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0, 0, 0, 0),
267 {{"_DGS", 0, ACPI_RTYPE_INTEGER}}, 311
268 {{"_DIS", 0, 0}}, 312 {{"_BCM", METHOD_1ARGS(ACPI_TYPE_INTEGER),
269 313 METHOD_NO_RETURN_VALUE}},
270 {{"_DLM", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (1 Ref, 0/1 Optional Buf/Ref) */ 314
271 {{{ACPI_PTYPE2_FIX_VAR, ACPI_RTYPE_REFERENCE, 1, 315 {{"_BCT", METHOD_1ARGS(ACPI_TYPE_INTEGER),
272 ACPI_RTYPE_REFERENCE | ACPI_RTYPE_BUFFER}, 0, 0}}, 316 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
273 317
274 {{"_DMA", 0, ACPI_RTYPE_BUFFER}}, 318 {{"_BDN", METHOD_0ARGS,
275 {{"_DOD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */ 319 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
276 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}}, 320
277 321 {{"_BFS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
278 {{"_DOS", 1, 0}}, 322 METHOD_NO_RETURN_VALUE}},
279 {{"_DSM", 4, ACPI_RTYPE_ALL}}, /* Must return a type, but it can be of any type */ 323
280 {{"_DSS", 1, 0}}, 324 {{"_BIF", METHOD_0ARGS,
281 {{"_DSW", 3, 0}}, 325 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (9 Int),(4 Str) */
282 {{"_DTI", 1, 0}}, 326 PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9,
283 {{"_EC_", 0, ACPI_RTYPE_INTEGER}}, 327 ACPI_RTYPE_STRING, 4, 0),
284 {{"_EDL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs)*/ 328
285 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 329 {{"_BIX", METHOD_0ARGS,
286 330 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (16 Int),(4 Str) */
287 {{"_EJ0", 1, 0}}, 331 PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,
288 {{"_EJ1", 1, 0}}, 332 ACPI_RTYPE_STRING, 4, 0),
289 {{"_EJ2", 1, 0}}, 333
290 {{"_EJ3", 1, 0}}, 334 {{"_BLT",
291 {{"_EJ4", 1, 0}}, 335 METHOD_3ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
292 {{"_EJD", 0, ACPI_RTYPE_STRING}}, 336 METHOD_NO_RETURN_VALUE}},
293 {{"_EVT", 1, 0}}, 337
294 {{"_FDE", 0, ACPI_RTYPE_BUFFER}}, 338 {{"_BMA", METHOD_1ARGS(ACPI_TYPE_INTEGER),
295 {{"_FDI", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int) */ 339 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
296 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,0}, 0,0}}, 340
297 341 {{"_BMC", METHOD_1ARGS(ACPI_TYPE_INTEGER),
298 {{"_FDM", 1, 0}}, 342 METHOD_NO_RETURN_VALUE}},
299 {{"_FIF", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */ 343
300 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0}, 0, 0}}, 344 {{"_BMD", METHOD_0ARGS,
301 345 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (5 Int) */
302 {{"_FIX", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */ 346 PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 5, 0, 0, 0),
303 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}}, 347
304 348 {{"_BMS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
305 {{"_FPS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(rev), n Pkg (5 Int) */ 349 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
306 {{{ACPI_PTYPE2_REV_FIXED, ACPI_RTYPE_INTEGER, 5, 0}, 0, 0}}, 350
307 351 {{"_BQC", METHOD_0ARGS,
308 {{"_FSL", 1, 0}}, 352 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
309 {{"_FST", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (3 Int) */ 353
310 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}}, 354 {{"_BST", METHOD_0ARGS,
311 355 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (4 Int) */
312 {{"_GAI", 0, ACPI_RTYPE_INTEGER}}, 356 PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0),
313 {{"_GCP", 0, ACPI_RTYPE_INTEGER}}, 357
314 {{"_GHL", 0, ACPI_RTYPE_INTEGER}}, 358 {{"_BTM", METHOD_1ARGS(ACPI_TYPE_INTEGER),
315 {{"_GLK", 0, ACPI_RTYPE_INTEGER}}, 359 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
316 {{"_GPD", 0, ACPI_RTYPE_INTEGER}}, 360
317 {{"_GPE", 0, ACPI_RTYPE_INTEGER}}, /* _GPE method, not _GPE scope */ 361 {{"_BTP", METHOD_1ARGS(ACPI_TYPE_INTEGER),
318 {{"_GRT", 0, ACPI_RTYPE_BUFFER}}, 362 METHOD_NO_RETURN_VALUE}},
319 {{"_GSB", 0, ACPI_RTYPE_INTEGER}}, 363
320 {{"_GTF", 0, ACPI_RTYPE_BUFFER}}, 364 {{"_CBA", METHOD_0ARGS,
321 {{"_GTM", 0, ACPI_RTYPE_BUFFER}}, 365 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* See PCI firmware spec 3.0 */
322 {{"_GTS", 1, 0}}, 366
323 {{"_GWS", 1, ACPI_RTYPE_INTEGER}}, 367 {{"_CDM", METHOD_0ARGS,
324 {{"_HID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}}, 368 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
325 {{"_HOT", 0, ACPI_RTYPE_INTEGER}}, 369
326 {{"_HPP", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */ 370 {{"_CID", METHOD_0ARGS,
327 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4,0}, 0,0}}, 371 METHOD_RETURNS(ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Ints/Strs) */
372 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0,
373 0, 0, 0),
374
375 {{"_CLS", METHOD_0ARGS,
376 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (3 Int) */
377 PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0, 0, 0),
378
379 {{"_CPC", METHOD_0ARGS,
380 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Ints/Bufs) */
381 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER, 0,
382 0, 0, 0),
383
384 {{"_CRS", METHOD_0ARGS,
385 METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
386
387 {{"_CRT", METHOD_0ARGS,
388 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
389
390 {{"_CSD", METHOD_0ARGS,
391 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (1 Int(n), n-1 Int) */
392 PACKAGE_INFO(ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 0, 0, 0, 0),
393
394 {{"_CST", METHOD_0ARGS,
395 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */
396 PACKAGE_INFO(ACPI_PTYPE2_PKG_COUNT, ACPI_RTYPE_BUFFER, 1,
397 ACPI_RTYPE_INTEGER, 3, 0),
398
399 {{"_CWS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
400 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
401
402 {{"_DCK", METHOD_1ARGS(ACPI_TYPE_INTEGER),
403 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
404
405 {{"_DCS", METHOD_0ARGS,
406 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
407
408 {{"_DDC", METHOD_1ARGS(ACPI_TYPE_INTEGER),
409 METHOD_RETURNS(ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER)}},
410
411 {{"_DDN", METHOD_0ARGS,
412 METHOD_RETURNS(ACPI_RTYPE_STRING)}},
413
414 {{"_DEP", METHOD_0ARGS,
415 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
416 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
417
418 {{"_DGS", METHOD_0ARGS,
419 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
420
421 {{"_DIS", METHOD_0ARGS,
422 METHOD_NO_RETURN_VALUE}},
423
424 {{"_DLM", METHOD_0ARGS,
425 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each (1 Ref, 0/1 Optional Buf/Ref) */
426 PACKAGE_INFO(ACPI_PTYPE2_FIX_VAR, ACPI_RTYPE_REFERENCE, 1,
427 ACPI_RTYPE_REFERENCE | ACPI_RTYPE_BUFFER, 0, 0),
428
429 {{"_DMA", METHOD_0ARGS,
430 METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
431
432 {{"_DOD", METHOD_0ARGS,
433 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Ints) */
434 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0, 0, 0, 0),
435
436 {{"_DOS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
437 METHOD_NO_RETURN_VALUE}},
438
439 {{"_DSM",
440 METHOD_4ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
441 ACPI_TYPE_PACKAGE),
442 METHOD_RETURNS(ACPI_RTYPE_ALL)}}, /* Must return a value, but it can be of any type */
443
444 {{"_DSS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
445 METHOD_NO_RETURN_VALUE}},
446
447 {{"_DSW",
448 METHOD_3ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
449 METHOD_NO_RETURN_VALUE}},
450
451 {{"_DTI", METHOD_1ARGS(ACPI_TYPE_INTEGER),
452 METHOD_NO_RETURN_VALUE}},
453
454 {{"_EC_", METHOD_0ARGS,
455 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
456
457 {{"_EDL", METHOD_0ARGS,
458 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
459 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
460
461 {{"_EJ0", METHOD_1ARGS(ACPI_TYPE_INTEGER),
462 METHOD_NO_RETURN_VALUE}},
463
464 {{"_EJ1", METHOD_1ARGS(ACPI_TYPE_INTEGER),
465 METHOD_NO_RETURN_VALUE}},
466
467 {{"_EJ2", METHOD_1ARGS(ACPI_TYPE_INTEGER),
468 METHOD_NO_RETURN_VALUE}},
469
470 {{"_EJ3", METHOD_1ARGS(ACPI_TYPE_INTEGER),
471 METHOD_NO_RETURN_VALUE}},
472
473 {{"_EJ4", METHOD_1ARGS(ACPI_TYPE_INTEGER),
474 METHOD_NO_RETURN_VALUE}},
475
476 {{"_EJD", METHOD_0ARGS,
477 METHOD_RETURNS(ACPI_RTYPE_STRING)}},
478
479 {{"_ERR",
480 METHOD_3ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_STRING, ACPI_TYPE_INTEGER),
481 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* Internal use only, used by ACPICA test suites */
482
483 {{"_EVT", METHOD_1ARGS(ACPI_TYPE_INTEGER),
484 METHOD_NO_RETURN_VALUE}},
485
486 {{"_FDE", METHOD_0ARGS,
487 METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
488
489 {{"_FDI", METHOD_0ARGS,
490 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (16 Int) */
491 PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16, 0, 0, 0),
492
493 {{"_FDM", METHOD_1ARGS(ACPI_TYPE_INTEGER),
494 METHOD_NO_RETURN_VALUE}},
495
496 {{"_FIF", METHOD_0ARGS,
497 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (4 Int) */
498 PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0),
499
500 {{"_FIX", METHOD_0ARGS,
501 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Ints) */
502 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0, 0, 0, 0),
503
504 {{"_FPS", METHOD_0ARGS,
505 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (1 Int(rev), n Pkg (5 Int) */
506 PACKAGE_INFO(ACPI_PTYPE2_REV_FIXED, ACPI_RTYPE_INTEGER, 5, 0, 0, 0),
507
508 {{"_FSL", METHOD_1ARGS(ACPI_TYPE_INTEGER),
509 METHOD_NO_RETURN_VALUE}},
510
511 {{"_FST", METHOD_0ARGS,
512 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (3 Int) */
513 PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0, 0, 0),
514
515 {{"_GAI", METHOD_0ARGS,
516 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
517
518 {{"_GCP", METHOD_0ARGS,
519 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
520
521 {{"_GHL", METHOD_0ARGS,
522 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
523
524 {{"_GLK", METHOD_0ARGS,
525 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
526
527 {{"_GPD", METHOD_0ARGS,
528 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
529
530 {{"_GPE", METHOD_0ARGS,
531 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* _GPE method, not _GPE scope */
532
533 {{"_GRT", METHOD_0ARGS,
534 METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
535
536 {{"_GSB", METHOD_0ARGS,
537 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
538
539 {{"_GTF", METHOD_0ARGS,
540 METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
541
542 {{"_GTM", METHOD_0ARGS,
543 METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
544
545 {{"_GTS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
546 METHOD_NO_RETURN_VALUE}},
547
548 {{"_GWS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
549 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
550
551 {{"_HID", METHOD_0ARGS,
552 METHOD_RETURNS(ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING)}},
553
554 {{"_HOT", METHOD_0ARGS,
555 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
556
557 {{"_HPP", METHOD_0ARGS,
558 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (4 Int) */
559 PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0),
328 560
329 /* 561 /*
330 * For _HPX, a single package is returned, containing a Variable-length number 562 * For _HPX, a single package is returned, containing a variable-length number
331 * of sub-packages. Each sub-package contains a PCI record setting. 563 * of sub-packages. Each sub-package contains a PCI record setting.
332 * There are several different type of record settings, of different 564 * There are several different type of record settings, of different
333 * lengths, but all elements of all settings are Integers. 565 * lengths, but all elements of all settings are Integers.
334 */ 566 */
335 {{"_HPX", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (var Ints) */ 567 {{"_HPX", METHOD_0ARGS,
336 {{{ACPI_PTYPE2_MIN, ACPI_RTYPE_INTEGER, 5,0}, 0,0}}, 568 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each (var Ints) */
337 569 PACKAGE_INFO(ACPI_PTYPE2_MIN, ACPI_RTYPE_INTEGER, 5, 0, 0, 0),
338 {{"_HRV", 0, ACPI_RTYPE_INTEGER}}, 570
339 {{"_IFT", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */ 571 {{"_HRV", METHOD_0ARGS,
340 {{"_INI", 0, 0}}, 572 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
341 {{"_IRC", 0, 0}}, 573
342 {{"_LCK", 1, 0}}, 574 {{"_IFT", METHOD_0ARGS,
343 {{"_LID", 0, ACPI_RTYPE_INTEGER}}, 575 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* See IPMI spec */
344 {{"_MAT", 0, ACPI_RTYPE_BUFFER}}, 576
345 {{"_MBM", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (8 Int) */ 577 {{"_INI", METHOD_0ARGS,
346 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 8, 0}, 0, 0}}, 578 METHOD_NO_RETURN_VALUE}},
347 579
348 {{"_MLS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (1 Str/1 Buf) */ 580 {{"_IRC", METHOD_0ARGS,
349 {{{ACPI_PTYPE2, ACPI_RTYPE_STRING, 1, ACPI_RTYPE_BUFFER}, 1, 0}}, 581 METHOD_NO_RETURN_VALUE}},
350 582
351 {{"_MSG", 1, 0}}, 583 {{"_LCK", METHOD_1ARGS(ACPI_TYPE_INTEGER),
352 {{"_MSM", 4, ACPI_RTYPE_INTEGER}}, 584 METHOD_NO_RETURN_VALUE}},
353 {{"_NTT", 0, ACPI_RTYPE_INTEGER}}, 585
354 {{"_OFF", 0, 0}}, 586 {{"_LID", METHOD_0ARGS,
355 {{"_ON_", 0, 0}}, 587 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
356 {{"_OS_", 0, ACPI_RTYPE_STRING}}, 588
357 {{"_OSC", 4, ACPI_RTYPE_BUFFER}}, 589 {{"_MAT", METHOD_0ARGS,
358 {{"_OST", 3, 0}}, 590 METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
359 {{"_PAI", 1, ACPI_RTYPE_INTEGER}}, 591
360 {{"_PCL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 592 {{"_MBM", METHOD_0ARGS,
361 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 593 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (8 Int) */
362 594 PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 8, 0, 0, 0),
363 {{"_PCT", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (2 Buf) */ 595
364 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_BUFFER, 2,0}, 0,0}}, 596 {{"_MLS", METHOD_0ARGS,
365 597 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each (1 Str/1 Buf) */
366 {{"_PDC", 1, 0}}, 598 PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_STRING, 1, ACPI_RTYPE_BUFFER, 1,
367 {{"_PDL", 0, ACPI_RTYPE_INTEGER}}, 599 0),
368 {{"_PIC", 1, 0}}, 600
369 {{"_PIF", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (3 Int),(3 Str) */ 601 {{"_MSG", METHOD_1ARGS(ACPI_TYPE_INTEGER),
370 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, ACPI_RTYPE_STRING}, 3, 0}}, 602 METHOD_NO_RETURN_VALUE}},
371 603
372 {{"_PLD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Bufs) */ 604 {{"_MSM",
373 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_BUFFER, 0,0}, 0,0}}, 605 METHOD_4ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
374 606 ACPI_TYPE_INTEGER),
375 {{"_PMC", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (11 Int),(3 Str) */ 607 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
376 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 11, ACPI_RTYPE_STRING}, 3, 608
377 0}}, 609 {{"_NTT", METHOD_0ARGS,
378 610 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
379 {{"_PMD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 611
380 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}}, 612 {{"_OFF", METHOD_0ARGS,
381 613 METHOD_NO_RETURN_VALUE}},
382 {{"_PMM", 0, ACPI_RTYPE_INTEGER}}, 614
383 {{"_PPC", 0, ACPI_RTYPE_INTEGER}}, 615 {{"_ON_", METHOD_0ARGS,
384 {{"_PPE", 0, ACPI_RTYPE_INTEGER}}, /* See dig64 spec */ 616 METHOD_NO_RETURN_VALUE}},
385 {{"_PR0", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 617
386 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 618 {{"_OS_", METHOD_0ARGS,
387 619 METHOD_RETURNS(ACPI_RTYPE_STRING)}},
388 {{"_PR1", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 620
389 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 621 {{"_OSC",
390 622 METHOD_4ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
391 {{"_PR2", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 623 ACPI_TYPE_BUFFER),
392 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 624 METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
393 625
394 {{"_PR3", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 626 {{"_OST",
395 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}}, 627 METHOD_3ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, ACPI_TYPE_BUFFER),
396 628 METHOD_NO_RETURN_VALUE}},
397 {{"_PRE", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 629
398 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}}, 630 {{"_PAI", METHOD_1ARGS(ACPI_TYPE_INTEGER),
399 631 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
400 {{"_PRL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 632
401 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}}, 633 {{"_PCL", METHOD_0ARGS,
402 634 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
403 {{"_PRS", 0, ACPI_RTYPE_BUFFER}}, 635 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
636
637 {{"_PCT", METHOD_0ARGS,
638 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (2 Buf) */
639 PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_BUFFER, 2, 0, 0, 0),
640
641 {{"_PDC", METHOD_1ARGS(ACPI_TYPE_BUFFER),
642 METHOD_NO_RETURN_VALUE}},
643
644 {{"_PDL", METHOD_0ARGS,
645 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
646
647 {{"_PIC", METHOD_1ARGS(ACPI_TYPE_INTEGER),
648 METHOD_NO_RETURN_VALUE}},
649
650 {{"_PIF", METHOD_0ARGS,
651 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (3 Int),(3 Str) */
652 PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3,
653 ACPI_RTYPE_STRING, 3, 0),
654
655 {{"_PLD", METHOD_0ARGS,
656 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Bufs) */
657 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_BUFFER, 0, 0, 0, 0),
658
659 {{"_PMC", METHOD_0ARGS,
660 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (11 Int),(3 Str) */
661 PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 11,
662 ACPI_RTYPE_STRING, 3, 0),
663
664 {{"_PMD", METHOD_0ARGS,
665 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
666 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
667
668 {{"_PMM", METHOD_0ARGS,
669 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
670
671 {{"_PPC", METHOD_0ARGS,
672 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
673
674 {{"_PPE", METHOD_0ARGS,
675 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* See dig64 spec */
676
677 {{"_PR0", METHOD_0ARGS,
678 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
679 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
680
681 {{"_PR1", METHOD_0ARGS,
682 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
683 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
684
685 {{"_PR2", METHOD_0ARGS,
686 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
687 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
688
689 {{"_PR3", METHOD_0ARGS,
690 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
691 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
692
693 {{"_PRE", METHOD_0ARGS,
694 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
695 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
696
697 {{"_PRL", METHOD_0ARGS,
698 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
699 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
700
701 {{"_PRS", METHOD_0ARGS,
702 METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
404 703
405 /* 704 /*
406 * For _PRT, many BIOSs reverse the 3rd and 4th Package elements (Source 705 * For _PRT, many BIOSs reverse the 3rd and 4th Package elements (Source
@@ -410,47 +709,89 @@ static const union acpi_predefined_info predefined_names[] = {
410 * warning, add the ACPI_RTYPE_REFERENCE type to the 4th element (index 3) 709 * warning, add the ACPI_RTYPE_REFERENCE type to the 4th element (index 3)
411 * in the statement below. 710 * in the statement below.
412 */ 711 */
413 {{"_PRT", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (4): Int,Int,Int/Ref,Int */ 712 {{"_PRT", METHOD_0ARGS,
414 {{{ACPI_PTYPE2_FIXED, 4, ACPI_RTYPE_INTEGER,ACPI_RTYPE_INTEGER}, 713 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each (4): Int,Int,Int/Ref,Int */
415 ACPI_RTYPE_INTEGER | ACPI_RTYPE_REFERENCE, 714 PACKAGE_INFO(ACPI_PTYPE2_FIXED, 4, ACPI_RTYPE_INTEGER,
416 ACPI_RTYPE_INTEGER}}, 715 ACPI_RTYPE_INTEGER,
417 716 ACPI_RTYPE_INTEGER | ACPI_RTYPE_REFERENCE,
418 {{"_PRW", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each: Pkg/Int,Int,[Variable-length Refs] (Pkg is Ref/Int) */ 717 ACPI_RTYPE_INTEGER),
419 {{{ACPI_PTYPE1_OPTION, 2, ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE, 718
420 ACPI_RTYPE_INTEGER}, ACPI_RTYPE_REFERENCE,0}}, 719 {{"_PRW", METHOD_0ARGS,
421 720 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each: Pkg/Int,Int,[Variable-length Refs] (Pkg is Ref/Int) */
422 {{"_PS0", 0, 0}}, 721 PACKAGE_INFO(ACPI_PTYPE1_OPTION, 2,
423 {{"_PS1", 0, 0}}, 722 ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE,
424 {{"_PS2", 0, 0}}, 723 ACPI_RTYPE_INTEGER, ACPI_RTYPE_REFERENCE, 0),
425 {{"_PS3", 0, 0}}, 724
426 {{"_PSC", 0, ACPI_RTYPE_INTEGER}}, 725 {{"_PS0", METHOD_0ARGS,
427 {{"_PSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (5 Int) with count */ 726 METHOD_NO_RETURN_VALUE}},
428 {{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER,0,0}, 0,0}}, 727
429 728 {{"_PS1", METHOD_0ARGS,
430 {{"_PSE", 1, 0}}, 729 METHOD_NO_RETURN_VALUE}},
431 {{"_PSL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 730
432 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 731 {{"_PS2", METHOD_0ARGS,
433 732 METHOD_NO_RETURN_VALUE}},
434 {{"_PSR", 0, ACPI_RTYPE_INTEGER}}, 733
435 {{"_PSS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (6 Int) */ 734 {{"_PS3", METHOD_0ARGS,
436 {{{ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 6,0}, 0,0}}, 735 METHOD_NO_RETURN_VALUE}},
437 736
438 {{"_PSV", 0, ACPI_RTYPE_INTEGER}}, 737 {{"_PSC", METHOD_0ARGS,
439 {{"_PSW", 1, 0}}, 738 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
440 {{"_PTC", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (2 Buf) */ 739
441 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_BUFFER, 2,0}, 0,0}}, 740 {{"_PSD", METHOD_0ARGS,
442 741 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each (5 Int) with count */
443 {{"_PTP", 2, ACPI_RTYPE_INTEGER}}, 742 PACKAGE_INFO(ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 0, 0, 0, 0),
444 {{"_PTS", 1, 0}}, 743
445 {{"_PUR", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (2 Int) */ 744 {{"_PSE", METHOD_1ARGS(ACPI_TYPE_INTEGER),
446 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2, 0}, 0, 0}}, 745 METHOD_NO_RETURN_VALUE}},
447 746
448 {{"_PXM", 0, ACPI_RTYPE_INTEGER}}, 747 {{"_PSL", METHOD_0ARGS,
449 {{"_REG", 2, 0}}, 748 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
450 {{"_REV", 0, ACPI_RTYPE_INTEGER}}, 749 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
451 {{"_RMV", 0, ACPI_RTYPE_INTEGER}}, 750
452 {{"_ROM", 2, ACPI_RTYPE_BUFFER}}, 751 {{"_PSR", METHOD_0ARGS,
453 {{"_RTV", 0, ACPI_RTYPE_INTEGER}}, 752 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
753
754 {{"_PSS", METHOD_0ARGS,
755 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each (6 Int) */
756 PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 6, 0, 0, 0),
757
758 {{"_PSV", METHOD_0ARGS,
759 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
760
761 {{"_PSW", METHOD_1ARGS(ACPI_TYPE_INTEGER),
762 METHOD_NO_RETURN_VALUE}},
763
764 {{"_PTC", METHOD_0ARGS,
765 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (2 Buf) */
766 PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_BUFFER, 2, 0, 0, 0),
767
768 {{"_PTP", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
769 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
770
771 {{"_PTS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
772 METHOD_NO_RETURN_VALUE}},
773
774 {{"_PUR", METHOD_0ARGS,
775 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (2 Int) */
776 PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2, 0, 0, 0),
777
778 {{"_PXM", METHOD_0ARGS,
779 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
780
781 {{"_REG", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
782 METHOD_NO_RETURN_VALUE}},
783
784 {{"_REV", METHOD_0ARGS,
785 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
786
787 {{"_RMV", METHOD_0ARGS,
788 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
789
790 {{"_ROM", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
791 METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
792
793 {{"_RTV", METHOD_0ARGS,
794 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
454 795
455 /* 796 /*
456 * For _S0_ through _S5_, the ACPI spec defines a return Package 797 * For _S0_ through _S5_, the ACPI spec defines a return Package
@@ -458,111 +799,285 @@ static const union acpi_predefined_info predefined_names[] = {
458 * Allow this by making the objects "Variable-length length", but all elements 799 * Allow this by making the objects "Variable-length length", but all elements
459 * must be Integers. 800 * must be Integers.
460 */ 801 */
461 {{"_S0_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */ 802 {{"_S0_", METHOD_0ARGS,
462 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}}, 803 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (1 Int) */
463 804 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0),
464 {{"_S1_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */ 805
465 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}}, 806 {{"_S1_", METHOD_0ARGS,
466 807 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (1 Int) */
467 {{"_S2_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */ 808 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0),
468 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}}, 809
469 810 {{"_S2_", METHOD_0ARGS,
470 {{"_S3_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */ 811 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (1 Int) */
471 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}}, 812 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0),
472 813
473 {{"_S4_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */ 814 {{"_S3_", METHOD_0ARGS,
474 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}}, 815 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (1 Int) */
475 816 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0),
476 {{"_S5_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */ 817
477 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}}, 818 {{"_S4_", METHOD_0ARGS,
478 819 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (1 Int) */
479 {{"_S1D", 0, ACPI_RTYPE_INTEGER}}, 820 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0),
480 {{"_S2D", 0, ACPI_RTYPE_INTEGER}}, 821
481 {{"_S3D", 0, ACPI_RTYPE_INTEGER}}, 822 {{"_S5_", METHOD_0ARGS,
482 {{"_S4D", 0, ACPI_RTYPE_INTEGER}}, 823 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (1 Int) */
483 {{"_S0W", 0, ACPI_RTYPE_INTEGER}}, 824 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0),
484 {{"_S1W", 0, ACPI_RTYPE_INTEGER}}, 825
485 {{"_S2W", 0, ACPI_RTYPE_INTEGER}}, 826 {{"_S1D", METHOD_0ARGS,
486 {{"_S3W", 0, ACPI_RTYPE_INTEGER}}, 827 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
487 {{"_S4W", 0, ACPI_RTYPE_INTEGER}}, 828
488 {{"_SBS", 0, ACPI_RTYPE_INTEGER}}, 829 {{"_S2D", METHOD_0ARGS,
489 {{"_SCP", 0x13, 0}}, /* Acpi 1.0 allowed 1 arg. Acpi 3.0 expanded to 3 args. Allow both. */ 830 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
490 /* Note: the 3-arg definition may be removed for ACPI 4.0 */ 831
491 {{"_SDD", 1, 0}}, 832 {{"_S3D", METHOD_0ARGS,
492 {{"_SEG", 0, ACPI_RTYPE_INTEGER}}, 833 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
493 {{"_SHL", 1, ACPI_RTYPE_INTEGER}}, 834
494 {{"_SLI", 0, ACPI_RTYPE_BUFFER}}, 835 {{"_S4D", METHOD_0ARGS,
495 {{"_SPD", 1, ACPI_RTYPE_INTEGER}}, 836 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
496 {{"_SRS", 1, 0}}, 837
497 {{"_SRT", 1, ACPI_RTYPE_INTEGER}}, 838 {{"_S0W", METHOD_0ARGS,
498 {{"_SRV", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */ 839 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
499 {{"_SST", 1, 0}}, 840
500 {{"_STA", 0, ACPI_RTYPE_INTEGER}}, 841 {{"_S1W", METHOD_0ARGS,
501 {{"_STM", 3, 0}}, 842 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
502 {{"_STP", 2, ACPI_RTYPE_INTEGER}}, 843
503 {{"_STR", 0, ACPI_RTYPE_BUFFER}}, 844 {{"_S2W", METHOD_0ARGS,
504 {{"_STV", 2, ACPI_RTYPE_INTEGER}}, 845 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
505 {{"_SUB", 0, ACPI_RTYPE_STRING}}, 846
506 {{"_SUN", 0, ACPI_RTYPE_INTEGER}}, 847 {{"_S3W", METHOD_0ARGS,
507 {{"_SWS", 0, ACPI_RTYPE_INTEGER}}, 848 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
508 {{"_TC1", 0, ACPI_RTYPE_INTEGER}}, 849
509 {{"_TC2", 0, ACPI_RTYPE_INTEGER}}, 850 {{"_S4W", METHOD_0ARGS,
510 {{"_TDL", 0, ACPI_RTYPE_INTEGER}}, 851 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
511 {{"_TIP", 1, ACPI_RTYPE_INTEGER}}, 852
512 {{"_TIV", 1, ACPI_RTYPE_INTEGER}}, 853 {{"_SBS", METHOD_0ARGS,
513 {{"_TMP", 0, ACPI_RTYPE_INTEGER}}, 854 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
514 {{"_TPC", 0, ACPI_RTYPE_INTEGER}}, 855
515 {{"_TPT", 1, 0}}, 856 {{"_SCP", METHOD_1ARGS(ACPI_TYPE_INTEGER) | ARG_COUNT_IS_MINIMUM,
516 {{"_TRT", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 2 Ref/6 Int */ 857 METHOD_NO_RETURN_VALUE}}, /* Acpi 1.0 allowed 1 integer arg. Acpi 3.0 expanded to 3 args. Allow both. */
517 {{{ACPI_PTYPE2, ACPI_RTYPE_REFERENCE, 2, ACPI_RTYPE_INTEGER}, 6, 0}}, 858
518 859 {{"_SDD", METHOD_1ARGS(ACPI_TYPE_BUFFER),
519 {{"_TSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5 Int with count */ 860 METHOD_NO_RETURN_VALUE}},
520 {{{ACPI_PTYPE2_COUNT,ACPI_RTYPE_INTEGER, 5,0}, 0,0}}, 861
521 862 {{"_SEG", METHOD_0ARGS,
522 {{"_TSP", 0, ACPI_RTYPE_INTEGER}}, 863 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
523 {{"_TSS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5 Int */ 864
524 {{{ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 5,0}, 0,0}}, 865 {{"_SHL", METHOD_1ARGS(ACPI_TYPE_INTEGER),
525 866 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
526 {{"_TST", 0, ACPI_RTYPE_INTEGER}}, 867
527 {{"_TTS", 1, 0}}, 868 {{"_SLI", METHOD_0ARGS,
528 {{"_TZD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 869 METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
529 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 870
530 871 {{"_SPD", METHOD_1ARGS(ACPI_TYPE_INTEGER),
531 {{"_TZM", 0, ACPI_RTYPE_REFERENCE}}, 872 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
532 {{"_TZP", 0, ACPI_RTYPE_INTEGER}}, 873
533 {{"_UID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}}, 874 {{"_SRS", METHOD_1ARGS(ACPI_TYPE_BUFFER),
534 {{"_UPC", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */ 875 METHOD_NO_RETURN_VALUE}},
535 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4,0}, 0,0}}, 876
536 877 {{"_SRT", METHOD_1ARGS(ACPI_TYPE_BUFFER),
537 {{"_UPD", 0, ACPI_RTYPE_INTEGER}}, 878 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
538 {{"_UPP", 0, ACPI_RTYPE_INTEGER}}, 879
539 {{"_VPO", 0, ACPI_RTYPE_INTEGER}}, 880 {{"_SRV", METHOD_0ARGS,
881 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* See IPMI spec */
882
883 {{"_SST", METHOD_1ARGS(ACPI_TYPE_INTEGER),
884 METHOD_NO_RETURN_VALUE}},
885
886 {{"_STA", METHOD_0ARGS,
887 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
888
889 {{"_STM",
890 METHOD_3ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_BUFFER, ACPI_TYPE_BUFFER),
891 METHOD_NO_RETURN_VALUE}},
892
893 {{"_STP", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
894 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
895
896 {{"_STR", METHOD_0ARGS,
897 METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
898
899 {{"_STV", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
900 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
901
902 {{"_SUB", METHOD_0ARGS,
903 METHOD_RETURNS(ACPI_RTYPE_STRING)}},
904
905 {{"_SUN", METHOD_0ARGS,
906 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
907
908 {{"_SWS", METHOD_0ARGS,
909 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
910
911 {{"_TC1", METHOD_0ARGS,
912 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
913
914 {{"_TC2", METHOD_0ARGS,
915 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
916
917 {{"_TDL", METHOD_0ARGS,
918 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
919
920 {{"_TIP", METHOD_1ARGS(ACPI_TYPE_INTEGER),
921 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
922
923 {{"_TIV", METHOD_1ARGS(ACPI_TYPE_INTEGER),
924 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
925
926 {{"_TMP", METHOD_0ARGS,
927 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
928
929 {{"_TPC", METHOD_0ARGS,
930 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
931
932 {{"_TPT", METHOD_1ARGS(ACPI_TYPE_INTEGER),
933 METHOD_NO_RETURN_VALUE}},
934
935 {{"_TRT", METHOD_0ARGS,
936 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each 2 Ref/6 Int */
937 PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_REFERENCE, 2, ACPI_RTYPE_INTEGER,
938 6, 0),
939
940 {{"_TSD", METHOD_0ARGS,
941 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each 5 Int with count */
942 PACKAGE_INFO(ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 5, 0, 0, 0),
943
944 {{"_TSP", METHOD_0ARGS,
945 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
946
947 {{"_TSS", METHOD_0ARGS,
948 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each 5 Int */
949 PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 5, 0, 0, 0),
950
951 {{"_TST", METHOD_0ARGS,
952 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
953
954 {{"_TTS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
955 METHOD_NO_RETURN_VALUE}},
956
957 {{"_TZD", METHOD_0ARGS,
958 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
959 PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
960
961 {{"_TZM", METHOD_0ARGS,
962 METHOD_RETURNS(ACPI_RTYPE_REFERENCE)}},
963
964 {{"_TZP", METHOD_0ARGS,
965 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
966
967 {{"_UID", METHOD_0ARGS,
968 METHOD_RETURNS(ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING)}},
969
970 {{"_UPC", METHOD_0ARGS,
971 METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (4 Int) */
972 PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0),
973
974 {{"_UPD", METHOD_0ARGS,
975 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
976
977 {{"_UPP", METHOD_0ARGS,
978 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
979
980 {{"_VPO", METHOD_0ARGS,
981 METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
540 982
541 /* Acpi 1.0 defined _WAK with no return value. Later, it was changed to return a package */ 983 /* Acpi 1.0 defined _WAK with no return value. Later, it was changed to return a package */
542 984
543 {{"_WAK", 1, 985 {{"_WAK", METHOD_1ARGS(ACPI_TYPE_INTEGER),
544 ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE}}, 986 METHOD_RETURNS(ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER |
545 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2,0}, 0,0}}, /* Fixed-length (2 Int), but is optional */ 987 ACPI_RTYPE_PACKAGE)}},
988 PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2, 0, 0, 0), /* Fixed-length (2 Int), but is optional */
546 989
547 /* _WDG/_WED are MS extensions defined by "Windows Instrumentation" */ 990 /* _WDG/_WED are MS extensions defined by "Windows Instrumentation" */
548 991
549 {{"_WDG", 0, ACPI_RTYPE_BUFFER}}, 992 {{"_WDG", METHOD_0ARGS,
550 {{"_WED", 1, 993 METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
551 ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER}}, 994
995 {{"_WED", METHOD_1ARGS(ACPI_TYPE_INTEGER),
996 METHOD_RETURNS(ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING |
997 ACPI_RTYPE_BUFFER)}},
552 998
553 {{{0, 0, 0, 0}, 0, 0}} /* Table terminator */ 999 PACKAGE_INFO(0, 0, 0, 0, 0, 0) /* Table terminator */
554}; 1000};
1001#else
1002extern const union acpi_predefined_info acpi_gbl_predefined_methods[];
1003#endif
555 1004
556#if 0 1005#if (defined ACPI_CREATE_RESOURCE_TABLE && defined ACPI_APPLICATION)
1006/******************************************************************************
1007 *
1008 * Predefined names for use in Resource Descriptors. These names do not
1009 * appear in the global Predefined Name table (since these names never
1010 * appear in actual AML byte code, only in the original ASL)
1011 *
1012 * Note: Used by iASL compiler and acpi_help utility only.
1013 *
1014 *****************************************************************************/
557 1015
558 /* This is an internally implemented control method, no need to check */ 1016const union acpi_predefined_info acpi_gbl_resource_names[] = {
559{ { 1017 {{"_ADR", WIDTH_16 | WIDTH_64, 0}},
560"_OSI", 1, ACPI_RTYPE_INTEGER}}, 1018 {{"_ALN", WIDTH_8 | WIDTH_16 | WIDTH_32, 0}},
1019 {{"_ASI", WIDTH_8, 0}},
1020 {{"_ASZ", WIDTH_8, 0}},
1021 {{"_ATT", WIDTH_64, 0}},
1022 {{"_BAS", WIDTH_16 | WIDTH_32, 0}},
1023 {{"_BM_", WIDTH_1, 0}},
1024 {{"_DBT", WIDTH_16, 0}}, /* Acpi 5.0 */
1025 {{"_DEC", WIDTH_1, 0}},
1026 {{"_DMA", WIDTH_8, 0}},
1027 {{"_DPL", WIDTH_1, 0}}, /* Acpi 5.0 */
1028 {{"_DRS", WIDTH_16, 0}}, /* Acpi 5.0 */
1029 {{"_END", WIDTH_1, 0}}, /* Acpi 5.0 */
1030 {{"_FLC", WIDTH_2, 0}}, /* Acpi 5.0 */
1031 {{"_GRA", WIDTH_ADDRESS, 0}},
1032 {{"_HE_", WIDTH_1, 0}},
1033 {{"_INT", WIDTH_16 | WIDTH_32, 0}},
1034 {{"_IOR", WIDTH_2, 0}}, /* Acpi 5.0 */
1035 {{"_LEN", WIDTH_8 | WIDTH_ADDRESS, 0}},
1036 {{"_LIN", WIDTH_8, 0}}, /* Acpi 5.0 */
1037 {{"_LL_", WIDTH_1, 0}},
1038 {{"_MAF", WIDTH_1, 0}},
1039 {{"_MAX", WIDTH_ADDRESS, 0}},
1040 {{"_MEM", WIDTH_2, 0}},
1041 {{"_MIF", WIDTH_1, 0}},
1042 {{"_MIN", WIDTH_ADDRESS, 0}},
1043 {{"_MOD", WIDTH_1, 0}}, /* Acpi 5.0 */
1044 {{"_MTP", WIDTH_2, 0}},
1045 {{"_PAR", WIDTH_8, 0}}, /* Acpi 5.0 */
1046 {{"_PHA", WIDTH_1, 0}}, /* Acpi 5.0 */
1047 {{"_PIN", WIDTH_16, 0}}, /* Acpi 5.0 */
1048 {{"_PPI", WIDTH_8, 0}}, /* Acpi 5.0 */
1049 {{"_POL", WIDTH_1 | WIDTH_2, 0}}, /* Acpi 5.0 */
1050 {{"_RBO", WIDTH_8, 0}},
1051 {{"_RBW", WIDTH_8, 0}},
1052 {{"_RNG", WIDTH_1, 0}},
1053 {{"_RT_", WIDTH_8, 0}}, /* Acpi 3.0 */
1054 {{"_RW_", WIDTH_1, 0}},
1055 {{"_RXL", WIDTH_16, 0}}, /* Acpi 5.0 */
1056 {{"_SHR", WIDTH_2, 0}},
1057 {{"_SIZ", WIDTH_2, 0}},
1058 {{"_SLV", WIDTH_1, 0}}, /* Acpi 5.0 */
1059 {{"_SPE", WIDTH_32, 0}}, /* Acpi 5.0 */
1060 {{"_STB", WIDTH_2, 0}}, /* Acpi 5.0 */
1061 {{"_TRA", WIDTH_ADDRESS, 0}},
1062 {{"_TRS", WIDTH_1, 0}},
1063 {{"_TSF", WIDTH_8, 0}}, /* Acpi 3.0 */
1064 {{"_TTP", WIDTH_1, 0}},
1065 {{"_TXL", WIDTH_16, 0}}, /* Acpi 5.0 */
1066 {{"_TYP", WIDTH_2 | WIDTH_16, 0}},
1067 {{"_VEN", VARIABLE_DATA, 0}}, /* Acpi 5.0 */
1068 PACKAGE_INFO(0, 0, 0, 0, 0, 0) /* Table terminator */
1069};
561 1070
562 /* TBD: */ 1071static const union acpi_predefined_info acpi_gbl_scope_names[] = {
563 _PRT - currently ignore reversed entries. attempt to fix here? 1072 {{"_GPE", 0, 0}},
564 think about possibly fixing package elements like _BIF, etc. 1073 {{"_PR_", 0, 0}},
1074 {{"_SB_", 0, 0}},
1075 {{"_SI_", 0, 0}},
1076 {{"_TZ_", 0, 0}},
1077 PACKAGE_INFO(0, 0, 0, 0, 0, 0) /* Table terminator */
1078};
1079#else
1080extern const union acpi_predefined_info acpi_gbl_resource_names[];
565#endif 1081#endif
566 1082
567#endif 1083#endif
568#endif
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 0082fa0a6139..202f4f12d3e2 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -113,9 +113,10 @@ struct acpi_pkg_info {
113 u32 num_packages; 113 u32 num_packages;
114}; 114};
115 115
116/* Object reference counts */
117
116#define REF_INCREMENT (u16) 0 118#define REF_INCREMENT (u16) 0
117#define REF_DECREMENT (u16) 1 119#define REF_DECREMENT (u16) 1
118#define REF_FORCE_DELETE (u16) 2
119 120
120/* acpi_ut_dump_buffer */ 121/* acpi_ut_dump_buffer */
121 122
@@ -421,7 +422,7 @@ acpi_ut_get_object_size(union acpi_operand_object *obj, acpi_size * obj_length);
421 */ 422 */
422acpi_status acpi_ut_initialize_interfaces(void); 423acpi_status acpi_ut_initialize_interfaces(void);
423 424
424void acpi_ut_interface_terminate(void); 425acpi_status acpi_ut_interface_terminate(void);
425 426
426acpi_status acpi_ut_install_interface(acpi_string interface_name); 427acpi_status acpi_ut_install_interface(acpi_string interface_name);
427 428
@@ -432,6 +433,26 @@ struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name);
432acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state); 433acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state);
433 434
434/* 435/*
436 * utpredef - support for predefined names
437 */
438const union acpi_predefined_info *acpi_ut_get_next_predefined_method(const union
439 acpi_predefined_info
440 *this_name);
441
442const union acpi_predefined_info *acpi_ut_match_predefined_method(char *name);
443
444const union acpi_predefined_info *acpi_ut_match_resource_name(char *name);
445
446void
447acpi_ut_display_predefined_method(char *buffer,
448 const union acpi_predefined_info *this_name,
449 u8 multi_line);
450
451void acpi_ut_get_expected_return_types(char *buffer, u32 expected_btypes);
452
453u32 acpi_ut_get_resource_bit_width(char *buffer, u16 types);
454
455/*
435 * utstate - Generic state creation/cache routines 456 * utstate - Generic state creation/cache routines
436 */ 457 */
437void 458void
@@ -483,7 +504,8 @@ acpi_ut_short_divide(u64 in_dividend,
483/* 504/*
484 * utmisc 505 * utmisc
485 */ 506 */
486const char *acpi_ut_validate_exception(acpi_status status); 507const struct acpi_exception_info *acpi_ut_validate_exception(acpi_status
508 status);
487 509
488u8 acpi_ut_is_pci_root_bridge(char *id); 510u8 acpi_ut_is_pci_root_bridge(char *id);
489 511
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 4d8c992a51d8..99778997c35a 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -178,7 +178,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
178 178
179 if (!op) { 179 if (!op) {
180 ACPI_ERROR((AE_INFO, "Null Op")); 180 ACPI_ERROR((AE_INFO, "Null Op"));
181 return_VALUE(TRUE); 181 return_UINT8(TRUE);
182 } 182 }
183 183
184 /* 184 /*
@@ -210,7 +210,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
210 "At Method level, result of [%s] not used\n", 210 "At Method level, result of [%s] not used\n",
211 acpi_ps_get_opcode_name(op->common. 211 acpi_ps_get_opcode_name(op->common.
212 aml_opcode))); 212 aml_opcode)));
213 return_VALUE(FALSE); 213 return_UINT8(FALSE);
214 } 214 }
215 215
216 /* Get info on the parent. The root_op is AML_SCOPE */ 216 /* Get info on the parent. The root_op is AML_SCOPE */
@@ -219,7 +219,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
219 acpi_ps_get_opcode_info(op->common.parent->common.aml_opcode); 219 acpi_ps_get_opcode_info(op->common.parent->common.aml_opcode);
220 if (parent_info->class == AML_CLASS_UNKNOWN) { 220 if (parent_info->class == AML_CLASS_UNKNOWN) {
221 ACPI_ERROR((AE_INFO, "Unknown parent opcode Op=%p", op)); 221 ACPI_ERROR((AE_INFO, "Unknown parent opcode Op=%p", op));
222 return_VALUE(FALSE); 222 return_UINT8(FALSE);
223 } 223 }
224 224
225 /* 225 /*
@@ -307,7 +307,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
307 acpi_ps_get_opcode_name(op->common.parent->common. 307 acpi_ps_get_opcode_name(op->common.parent->common.
308 aml_opcode), op)); 308 aml_opcode), op));
309 309
310 return_VALUE(TRUE); 310 return_UINT8(TRUE);
311 311
312 result_not_used: 312 result_not_used:
313 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 313 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
@@ -316,7 +316,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
316 acpi_ps_get_opcode_name(op->common.parent->common. 316 acpi_ps_get_opcode_name(op->common.parent->common.
317 aml_opcode), op)); 317 aml_opcode), op));
318 318
319 return_VALUE(FALSE); 319 return_UINT8(FALSE);
320} 320}
321 321
322/******************************************************************************* 322/*******************************************************************************
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 44f8325c2bae..e2199a947470 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -693,7 +693,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
693 default: 693 default:
694 694
695 ACPI_ERROR((AE_INFO, 695 ACPI_ERROR((AE_INFO,
696 "Unimplemented opcode, class=0x%X type=0x%X Opcode=-0x%X Op=%p", 696 "Unimplemented opcode, class=0x%X type=0x%X Opcode=0x%X Op=%p",
697 op_class, op_type, op->common.aml_opcode, 697 op_class, op_type, op->common.aml_opcode,
698 op)); 698 op));
699 699
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index b8ea0b26cde3..83cd45f4a870 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -257,6 +257,8 @@ u32 acpi_ev_fixed_event_detect(void)
257 * 257 *
258 * DESCRIPTION: Clears the status bit for the requested event, calls the 258 * DESCRIPTION: Clears the status bit for the requested event, calls the
259 * handler that previously registered for the event. 259 * handler that previously registered for the event.
260 * NOTE: If there is no handler for the event, the event is
261 * disabled to prevent further interrupts.
260 * 262 *
261 ******************************************************************************/ 263 ******************************************************************************/
262 264
@@ -271,17 +273,17 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
271 status_register_id, ACPI_CLEAR_STATUS); 273 status_register_id, ACPI_CLEAR_STATUS);
272 274
273 /* 275 /*
274 * Make sure we've got a handler. If not, report an error. The event is 276 * Make sure that a handler exists. If not, report an error
275 * disabled to prevent further interrupts. 277 * and disable the event to prevent further interrupts.
276 */ 278 */
277 if (NULL == acpi_gbl_fixed_event_handlers[event].handler) { 279 if (!acpi_gbl_fixed_event_handlers[event].handler) {
278 (void)acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. 280 (void)acpi_write_bit_register(acpi_gbl_fixed_event_info[event].
279 enable_register_id, 281 enable_register_id,
280 ACPI_DISABLE_EVENT); 282 ACPI_DISABLE_EVENT);
281 283
282 ACPI_ERROR((AE_INFO, 284 ACPI_ERROR((AE_INFO,
283 "No installed handler for fixed event [0x%08X]", 285 "No installed handler for fixed event - %s (%u), disabling",
284 event)); 286 acpi_ut_get_event_name(event), event));
285 287
286 return (ACPI_INTERRUPT_NOT_HANDLED); 288 return (ACPI_INTERRUPT_NOT_HANDLED);
287 } 289 }
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index b9adb9a7ed85..a493b528f8f9 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -707,7 +707,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
707 if (ACPI_FAILURE(status)) { 707 if (ACPI_FAILURE(status)) {
708 ACPI_EXCEPTION((AE_INFO, status, 708 ACPI_EXCEPTION((AE_INFO, status,
709 "Unable to clear GPE%02X", gpe_number)); 709 "Unable to clear GPE%02X", gpe_number));
710 return_VALUE(ACPI_INTERRUPT_NOT_HANDLED); 710 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
711 } 711 }
712 } 712 }
713 713
@@ -724,7 +724,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
724 if (ACPI_FAILURE(status)) { 724 if (ACPI_FAILURE(status)) {
725 ACPI_EXCEPTION((AE_INFO, status, 725 ACPI_EXCEPTION((AE_INFO, status,
726 "Unable to disable GPE%02X", gpe_number)); 726 "Unable to disable GPE%02X", gpe_number));
727 return_VALUE(ACPI_INTERRUPT_NOT_HANDLED); 727 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
728 } 728 }
729 729
730 /* 730 /*
@@ -784,7 +784,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
784 break; 784 break;
785 } 785 }
786 786
787 return_VALUE(ACPI_INTERRUPT_HANDLED); 787 return_UINT32(ACPI_INTERRUPT_HANDLED);
788} 788}
789 789
790#endif /* !ACPI_REDUCED_HARDWARE */ 790#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index f4b43bede015..b905acf7aacd 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -89,7 +89,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
89 */ 89 */
90 interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list); 90 interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
91 91
92 return_VALUE(interrupt_handled); 92 return_UINT32(interrupt_handled);
93} 93}
94 94
95/******************************************************************************* 95/*******************************************************************************
@@ -120,7 +120,7 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
120 120
121 interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list); 121 interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
122 122
123 return_VALUE(interrupt_handled); 123 return_UINT32(interrupt_handled);
124} 124}
125 125
126/****************************************************************************** 126/******************************************************************************
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index ddffd6847914..ca5fba99c33b 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -467,9 +467,9 @@ acpi_install_fixed_event_handler(u32 event,
467 return_ACPI_STATUS(status); 467 return_ACPI_STATUS(status);
468 } 468 }
469 469
470 /* Don't allow two handlers. */ 470 /* Do not allow multiple handlers */
471 471
472 if (NULL != acpi_gbl_fixed_event_handlers[event].handler) { 472 if (acpi_gbl_fixed_event_handlers[event].handler) {
473 status = AE_ALREADY_EXISTS; 473 status = AE_ALREADY_EXISTS;
474 goto cleanup; 474 goto cleanup;
475 } 475 }
@@ -483,8 +483,9 @@ acpi_install_fixed_event_handler(u32 event,
483 if (ACPI_SUCCESS(status)) 483 if (ACPI_SUCCESS(status))
484 status = acpi_enable_event(event, 0); 484 status = acpi_enable_event(event, 0);
485 if (ACPI_FAILURE(status)) { 485 if (ACPI_FAILURE(status)) {
486 ACPI_WARNING((AE_INFO, "Could not enable fixed event 0x%X", 486 ACPI_WARNING((AE_INFO,
487 event)); 487 "Could not enable fixed event - %s (%u)",
488 acpi_ut_get_event_name(event), event));
488 489
489 /* Remove the handler */ 490 /* Remove the handler */
490 491
@@ -492,7 +493,8 @@ acpi_install_fixed_event_handler(u32 event,
492 acpi_gbl_fixed_event_handlers[event].context = NULL; 493 acpi_gbl_fixed_event_handlers[event].context = NULL;
493 } else { 494 } else {
494 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 495 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
495 "Enabled fixed event %X, Handler=%p\n", event, 496 "Enabled fixed event %s (%X), Handler=%p\n",
497 acpi_ut_get_event_name(event), event,
496 handler)); 498 handler));
497 } 499 }
498 500
@@ -544,11 +546,12 @@ acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
544 546
545 if (ACPI_FAILURE(status)) { 547 if (ACPI_FAILURE(status)) {
546 ACPI_WARNING((AE_INFO, 548 ACPI_WARNING((AE_INFO,
547 "Could not write to fixed event enable register 0x%X", 549 "Could not disable fixed event - %s (%u)",
548 event)); 550 acpi_ut_get_event_name(event), event));
549 } else { 551 } else {
550 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n", 552 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
551 event)); 553 "Disabled fixed event - %s (%X)\n",
554 acpi_ut_get_event_name(event), event));
552 } 555 }
553 556
554 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); 557 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index d6e4e42316db..7039606a0ba8 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -74,6 +74,12 @@ acpi_status acpi_enable(void)
74 return_ACPI_STATUS(AE_NO_ACPI_TABLES); 74 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
75 } 75 }
76 76
77 /* If the Hardware Reduced flag is set, machine is always in acpi mode */
78
79 if (acpi_gbl_reduced_hardware) {
80 return_ACPI_STATUS(AE_OK);
81 }
82
77 /* Check current mode */ 83 /* Check current mode */
78 84
79 if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { 85 if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
@@ -126,6 +132,12 @@ acpi_status acpi_disable(void)
126 132
127 ACPI_FUNCTION_TRACE(acpi_disable); 133 ACPI_FUNCTION_TRACE(acpi_disable);
128 134
135 /* If the Hardware Reduced flag is set, machine is always in acpi mode */
136
137 if (acpi_gbl_reduced_hardware) {
138 return_ACPI_STATUS(AE_OK);
139 }
140
129 if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) { 141 if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) {
130 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 142 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
131 "System is already in legacy (non-ACPI) mode\n")); 143 "System is already in legacy (non-ACPI) mode\n"));
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index e491e46f17df..b0838a4ea53e 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -257,7 +257,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
257 union acpi_operand_object *return_desc = NULL; 257 union acpi_operand_object *return_desc = NULL;
258 u64 index; 258 u64 index;
259 acpi_status status = AE_OK; 259 acpi_status status = AE_OK;
260 acpi_size length; 260 acpi_size length = 0;
261 261
262 ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_1T_1R, 262 ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_1T_1R,
263 acpi_ps_get_opcode_name(walk_state->opcode)); 263 acpi_ps_get_opcode_name(walk_state->opcode));
@@ -320,7 +320,6 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
320 * NOTE: A length of zero is ok, and will create a zero-length, null 320 * NOTE: A length of zero is ok, and will create a zero-length, null
321 * terminated string. 321 * terminated string.
322 */ 322 */
323 length = 0;
324 while ((length < operand[0]->buffer.length) && 323 while ((length < operand[0]->buffer.length) &&
325 (length < operand[1]->integer.value) && 324 (length < operand[1]->integer.value) &&
326 (operand[0]->buffer.pointer[length])) { 325 (operand[0]->buffer.pointer[length])) {
@@ -376,6 +375,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
376 case ACPI_TYPE_STRING: 375 case ACPI_TYPE_STRING:
377 376
378 if (index >= operand[0]->string.length) { 377 if (index >= operand[0]->string.length) {
378 length = operand[0]->string.length;
379 status = AE_AML_STRING_LIMIT; 379 status = AE_AML_STRING_LIMIT;
380 } 380 }
381 381
@@ -386,6 +386,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
386 case ACPI_TYPE_BUFFER: 386 case ACPI_TYPE_BUFFER:
387 387
388 if (index >= operand[0]->buffer.length) { 388 if (index >= operand[0]->buffer.length) {
389 length = operand[0]->buffer.length;
389 status = AE_AML_BUFFER_LIMIT; 390 status = AE_AML_BUFFER_LIMIT;
390 } 391 }
391 392
@@ -396,6 +397,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
396 case ACPI_TYPE_PACKAGE: 397 case ACPI_TYPE_PACKAGE:
397 398
398 if (index >= operand[0]->package.count) { 399 if (index >= operand[0]->package.count) {
400 length = operand[0]->package.count;
399 status = AE_AML_PACKAGE_LIMIT; 401 status = AE_AML_PACKAGE_LIMIT;
400 } 402 }
401 403
@@ -414,8 +416,9 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
414 416
415 if (ACPI_FAILURE(status)) { 417 if (ACPI_FAILURE(status)) {
416 ACPI_EXCEPTION((AE_INFO, status, 418 ACPI_EXCEPTION((AE_INFO, status,
417 "Index (0x%8.8X%8.8X) is beyond end of object", 419 "Index (0x%X%8.8X) is beyond end of object (length 0x%X)",
418 ACPI_FORMAT_UINT64(index))); 420 ACPI_FORMAT_UINT64(index),
421 (u32)length));
419 goto cleanup; 422 goto cleanup;
420 } 423 }
421 424
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index d6eab81f54fb..6b728aef2dca 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -276,7 +276,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
276 /* Invalid field access type */ 276 /* Invalid field access type */
277 277
278 ACPI_ERROR((AE_INFO, "Unknown field access type 0x%X", access)); 278 ACPI_ERROR((AE_INFO, "Unknown field access type 0x%X", access));
279 return_VALUE(0); 279 return_UINT32(0);
280 } 280 }
281 281
282 if (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD) { 282 if (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD) {
@@ -289,7 +289,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
289 } 289 }
290 290
291 *return_byte_alignment = byte_alignment; 291 *return_byte_alignment = byte_alignment;
292 return_VALUE(bit_length); 292 return_UINT32(bit_length);
293} 293}
294 294
295/******************************************************************************* 295/*******************************************************************************
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index b205cbb4b50c..99dc7b287d55 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -340,7 +340,7 @@ static u32 acpi_ex_digits_needed(u64 value, u32 base)
340 /* u64 is unsigned, so we don't worry about a '-' prefix */ 340 /* u64 is unsigned, so we don't worry about a '-' prefix */
341 341
342 if (value == 0) { 342 if (value == 0) {
343 return_VALUE(1); 343 return_UINT32(1);
344 } 344 }
345 345
346 current_value = value; 346 current_value = value;
@@ -354,7 +354,7 @@ static u32 acpi_ex_digits_needed(u64 value, u32 base)
354 num_digits++; 354 num_digits++;
355 } 355 }
356 356
357 return_VALUE(num_digits); 357 return_UINT32(num_digits);
358} 358}
359 359
360/******************************************************************************* 360/*******************************************************************************
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index deb3f61e2bd1..579c3a53ac87 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -66,6 +66,12 @@ acpi_status acpi_hw_set_mode(u32 mode)
66 66
67 ACPI_FUNCTION_TRACE(hw_set_mode); 67 ACPI_FUNCTION_TRACE(hw_set_mode);
68 68
69 /* If the Hardware Reduced flag is set, machine is always in acpi mode */
70
71 if (acpi_gbl_reduced_hardware) {
72 return_ACPI_STATUS(AE_OK);
73 }
74
69 /* 75 /*
70 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero, 76 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
71 * system does not support mode transition. 77 * system does not support mode transition.
@@ -146,23 +152,29 @@ u32 acpi_hw_get_mode(void)
146 152
147 ACPI_FUNCTION_TRACE(hw_get_mode); 153 ACPI_FUNCTION_TRACE(hw_get_mode);
148 154
155 /* If the Hardware Reduced flag is set, machine is always in acpi mode */
156
157 if (acpi_gbl_reduced_hardware) {
158 return_UINT32(ACPI_SYS_MODE_ACPI);
159 }
160
149 /* 161 /*
150 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero, 162 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
151 * system does not support mode transition. 163 * system does not support mode transition.
152 */ 164 */
153 if (!acpi_gbl_FADT.smi_command) { 165 if (!acpi_gbl_FADT.smi_command) {
154 return_VALUE(ACPI_SYS_MODE_ACPI); 166 return_UINT32(ACPI_SYS_MODE_ACPI);
155 } 167 }
156 168
157 status = acpi_read_bit_register(ACPI_BITREG_SCI_ENABLE, &value); 169 status = acpi_read_bit_register(ACPI_BITREG_SCI_ENABLE, &value);
158 if (ACPI_FAILURE(status)) { 170 if (ACPI_FAILURE(status)) {
159 return_VALUE(ACPI_SYS_MODE_LEGACY); 171 return_UINT32(ACPI_SYS_MODE_LEGACY);
160 } 172 }
161 173
162 if (value) { 174 if (value) {
163 return_VALUE(ACPI_SYS_MODE_ACPI); 175 return_UINT32(ACPI_SYS_MODE_ACPI);
164 } else { 176 } else {
165 return_VALUE(ACPI_SYS_MODE_LEGACY); 177 return_UINT32(ACPI_SYS_MODE_LEGACY);
166 } 178 }
167} 179}
168 180
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
new file mode 100644
index 000000000000..8f79a9d2d50e
--- /dev/null
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -0,0 +1,443 @@
1/******************************************************************************
2 *
3 * Module Name: nsconvert - Object conversions for objects returned by
4 * predefined methods
5 *
6 *****************************************************************************/
7
8/*
9 * Copyright (C) 2000 - 2013, Intel Corp.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions, and the following disclaimer,
17 * without modification.
18 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
19 * substantially similar to the "NO WARRANTY" disclaimer below
20 * ("Disclaimer") and any redistribution must be conditioned upon
21 * including a substantially similar Disclaimer requirement for further
22 * binary redistribution.
23 * 3. Neither the names of the above-listed copyright holders nor the names
24 * of any contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * Alternatively, this software may be distributed under the terms of the
28 * GNU General Public License ("GPL") version 2 as published by the Free
29 * Software Foundation.
30 *
31 * NO WARRANTY
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
35 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
36 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
40 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
41 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42 * POSSIBILITY OF SUCH DAMAGES.
43 */
44
45#include <acpi/acpi.h>
46#include "accommon.h"
47#include "acnamesp.h"
48#include "acinterp.h"
49#include "acpredef.h"
50#include "amlresrc.h"
51
52#define _COMPONENT ACPI_NAMESPACE
53ACPI_MODULE_NAME("nsconvert")
54
55/*******************************************************************************
56 *
57 * FUNCTION: acpi_ns_convert_to_integer
58 *
59 * PARAMETERS: original_object - Object to be converted
60 * return_object - Where the new converted object is returned
61 *
62 * RETURN: Status. AE_OK if conversion was successful.
63 *
64 * DESCRIPTION: Attempt to convert a String/Buffer object to an Integer.
65 *
66 ******************************************************************************/
67acpi_status
68acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
69 union acpi_operand_object **return_object)
70{
71 union acpi_operand_object *new_object;
72 acpi_status status;
73 u64 value = 0;
74 u32 i;
75
76 switch (original_object->common.type) {
77 case ACPI_TYPE_STRING:
78
79 /* String-to-Integer conversion */
80
81 status = acpi_ut_strtoul64(original_object->string.pointer,
82 ACPI_ANY_BASE, &value);
83 if (ACPI_FAILURE(status)) {
84 return (status);
85 }
86 break;
87
88 case ACPI_TYPE_BUFFER:
89
90 /* Buffer-to-Integer conversion. Max buffer size is 64 bits. */
91
92 if (original_object->buffer.length > 8) {
93 return (AE_AML_OPERAND_TYPE);
94 }
95
96 /* Extract each buffer byte to create the integer */
97
98 for (i = 0; i < original_object->buffer.length; i++) {
99 value |=
100 ((u64)original_object->buffer.
101 pointer[i] << (i * 8));
102 }
103 break;
104
105 default:
106 return (AE_AML_OPERAND_TYPE);
107 }
108
109 new_object = acpi_ut_create_integer_object(value);
110 if (!new_object) {
111 return (AE_NO_MEMORY);
112 }
113
114 *return_object = new_object;
115 return (AE_OK);
116}
117
118/*******************************************************************************
119 *
120 * FUNCTION: acpi_ns_convert_to_string
121 *
122 * PARAMETERS: original_object - Object to be converted
123 * return_object - Where the new converted object is returned
124 *
125 * RETURN: Status. AE_OK if conversion was successful.
126 *
127 * DESCRIPTION: Attempt to convert a Integer/Buffer object to a String.
128 *
129 ******************************************************************************/
130
131acpi_status
132acpi_ns_convert_to_string(union acpi_operand_object *original_object,
133 union acpi_operand_object **return_object)
134{
135 union acpi_operand_object *new_object;
136 acpi_size length;
137 acpi_status status;
138
139 switch (original_object->common.type) {
140 case ACPI_TYPE_INTEGER:
141 /*
142 * Integer-to-String conversion. Commonly, convert
143 * an integer of value 0 to a NULL string. The last element of
144 * _BIF and _BIX packages occasionally need this fix.
145 */
146 if (original_object->integer.value == 0) {
147
148 /* Allocate a new NULL string object */
149
150 new_object = acpi_ut_create_string_object(0);
151 if (!new_object) {
152 return (AE_NO_MEMORY);
153 }
154 } else {
155 status =
156 acpi_ex_convert_to_string(original_object,
157 &new_object,
158 ACPI_IMPLICIT_CONVERT_HEX);
159 if (ACPI_FAILURE(status)) {
160 return (status);
161 }
162 }
163 break;
164
165 case ACPI_TYPE_BUFFER:
166 /*
167 * Buffer-to-String conversion. Use a to_string
168 * conversion, no transform performed on the buffer data. The best
169 * example of this is the _BIF method, where the string data from
170 * the battery is often (incorrectly) returned as buffer object(s).
171 */
172 length = 0;
173 while ((length < original_object->buffer.length) &&
174 (original_object->buffer.pointer[length])) {
175 length++;
176 }
177
178 /* Allocate a new string object */
179
180 new_object = acpi_ut_create_string_object(length);
181 if (!new_object) {
182 return (AE_NO_MEMORY);
183 }
184
185 /*
186 * Copy the raw buffer data with no transform. String is already NULL
187 * terminated at Length+1.
188 */
189 ACPI_MEMCPY(new_object->string.pointer,
190 original_object->buffer.pointer, length);
191 break;
192
193 default:
194 return (AE_AML_OPERAND_TYPE);
195 }
196
197 *return_object = new_object;
198 return (AE_OK);
199}
200
201/*******************************************************************************
202 *
203 * FUNCTION: acpi_ns_convert_to_buffer
204 *
205 * PARAMETERS: original_object - Object to be converted
206 * return_object - Where the new converted object is returned
207 *
208 * RETURN: Status. AE_OK if conversion was successful.
209 *
210 * DESCRIPTION: Attempt to convert a Integer/String/Package object to a Buffer.
211 *
212 ******************************************************************************/
213
214acpi_status
215acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
216 union acpi_operand_object **return_object)
217{
218 union acpi_operand_object *new_object;
219 acpi_status status;
220 union acpi_operand_object **elements;
221 u32 *dword_buffer;
222 u32 count;
223 u32 i;
224
225 switch (original_object->common.type) {
226 case ACPI_TYPE_INTEGER:
227 /*
228 * Integer-to-Buffer conversion.
229 * Convert the Integer to a packed-byte buffer. _MAT and other
230 * objects need this sometimes, if a read has been performed on a
231 * Field object that is less than or equal to the global integer
232 * size (32 or 64 bits).
233 */
234 status =
235 acpi_ex_convert_to_buffer(original_object, &new_object);
236 if (ACPI_FAILURE(status)) {
237 return (status);
238 }
239 break;
240
241 case ACPI_TYPE_STRING:
242
243 /* String-to-Buffer conversion. Simple data copy */
244
245 new_object =
246 acpi_ut_create_buffer_object(original_object->string.
247 length);
248 if (!new_object) {
249 return (AE_NO_MEMORY);
250 }
251
252 ACPI_MEMCPY(new_object->buffer.pointer,
253 original_object->string.pointer,
254 original_object->string.length);
255 break;
256
257 case ACPI_TYPE_PACKAGE:
258 /*
259 * This case is often seen for predefined names that must return a
260 * Buffer object with multiple DWORD integers within. For example,
261 * _FDE and _GTM. The Package can be converted to a Buffer.
262 */
263
264 /* All elements of the Package must be integers */
265
266 elements = original_object->package.elements;
267 count = original_object->package.count;
268
269 for (i = 0; i < count; i++) {
270 if ((!*elements) ||
271 ((*elements)->common.type != ACPI_TYPE_INTEGER)) {
272 return (AE_AML_OPERAND_TYPE);
273 }
274 elements++;
275 }
276
277 /* Create the new buffer object to replace the Package */
278
279 new_object = acpi_ut_create_buffer_object(ACPI_MUL_4(count));
280 if (!new_object) {
281 return (AE_NO_MEMORY);
282 }
283
284 /* Copy the package elements (integers) to the buffer as DWORDs */
285
286 elements = original_object->package.elements;
287 dword_buffer = ACPI_CAST_PTR(u32, new_object->buffer.pointer);
288
289 for (i = 0; i < count; i++) {
290 *dword_buffer = (u32)(*elements)->integer.value;
291 dword_buffer++;
292 elements++;
293 }
294 break;
295
296 default:
297 return (AE_AML_OPERAND_TYPE);
298 }
299
300 *return_object = new_object;
301 return (AE_OK);
302}
303
304/*******************************************************************************
305 *
306 * FUNCTION: acpi_ns_convert_to_unicode
307 *
308 * PARAMETERS: original_object - ASCII String Object to be converted
309 * return_object - Where the new converted object is returned
310 *
311 * RETURN: Status. AE_OK if conversion was successful.
312 *
313 * DESCRIPTION: Attempt to convert a String object to a Unicode string Buffer.
314 *
315 ******************************************************************************/
316
317acpi_status
318acpi_ns_convert_to_unicode(union acpi_operand_object *original_object,
319 union acpi_operand_object **return_object)
320{
321 union acpi_operand_object *new_object;
322 char *ascii_string;
323 u16 *unicode_buffer;
324 u32 unicode_length;
325 u32 i;
326
327 if (!original_object) {
328 return (AE_OK);
329 }
330
331 /* If a Buffer was returned, it must be at least two bytes long */
332
333 if (original_object->common.type == ACPI_TYPE_BUFFER) {
334 if (original_object->buffer.length < 2) {
335 return (AE_AML_OPERAND_VALUE);
336 }
337
338 *return_object = NULL;
339 return (AE_OK);
340 }
341
342 /*
343 * The original object is an ASCII string. Convert this string to
344 * a unicode buffer.
345 */
346 ascii_string = original_object->string.pointer;
347 unicode_length = (original_object->string.length * 2) + 2;
348
349 /* Create a new buffer object for the Unicode data */
350
351 new_object = acpi_ut_create_buffer_object(unicode_length);
352 if (!new_object) {
353 return (AE_NO_MEMORY);
354 }
355
356 unicode_buffer = ACPI_CAST_PTR(u16, new_object->buffer.pointer);
357
358 /* Convert ASCII to Unicode */
359
360 for (i = 0; i < original_object->string.length; i++) {
361 unicode_buffer[i] = (u16)ascii_string[i];
362 }
363
364 *return_object = new_object;
365 return (AE_OK);
366}
367
368/*******************************************************************************
369 *
370 * FUNCTION: acpi_ns_convert_to_resource
371 *
372 * PARAMETERS: original_object - Object to be converted
373 * return_object - Where the new converted object is returned
374 *
375 * RETURN: Status. AE_OK if conversion was successful
376 *
377 * DESCRIPTION: Attempt to convert a Integer object to a resource_template
378 * Buffer.
379 *
380 ******************************************************************************/
381
382acpi_status
383acpi_ns_convert_to_resource(union acpi_operand_object *original_object,
384 union acpi_operand_object **return_object)
385{
386 union acpi_operand_object *new_object;
387 u8 *buffer;
388
389 /*
390 * We can fix the following cases for an expected resource template:
391 * 1. No return value (interpreter slack mode is disabled)
392 * 2. A "Return (Zero)" statement
393 * 3. A "Return empty buffer" statement
394 *
395 * We will return a buffer containing a single end_tag
396 * resource descriptor.
397 */
398 if (original_object) {
399 switch (original_object->common.type) {
400 case ACPI_TYPE_INTEGER:
401
402 /* We can only repair an Integer==0 */
403
404 if (original_object->integer.value) {
405 return (AE_AML_OPERAND_TYPE);
406 }
407 break;
408
409 case ACPI_TYPE_BUFFER:
410
411 if (original_object->buffer.length) {
412
413 /* Additional checks can be added in the future */
414
415 *return_object = NULL;
416 return (AE_OK);
417 }
418 break;
419
420 case ACPI_TYPE_STRING:
421 default:
422
423 return (AE_AML_OPERAND_TYPE);
424 }
425 }
426
427 /* Create the new buffer object for the resource descriptor */
428
429 new_object = acpi_ut_create_buffer_object(2);
430 if (!new_object) {
431 return (AE_NO_MEMORY);
432 }
433
434 buffer = ACPI_CAST_PTR(u8, new_object->buffer.pointer);
435
436 /* Initialize the Buffer with a single end_tag descriptor */
437
438 buffer[0] = (ACPI_RESOURCE_NAME_END_TAG | ASL_RDESC_END_TAG_SIZE);
439 buffer[1] = 0x00;
440
441 *return_object = new_object;
442 return (AE_OK);
443}
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 1538f3eb2a8f..b61db69d5675 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -98,17 +98,21 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
98 info->return_object = NULL; 98 info->return_object = NULL;
99 info->param_count = 0; 99 info->param_count = 0;
100 100
101 /* 101 if (!info->resolved_node) {
102 * Get the actual namespace node for the target object. Handles these cases: 102 /*
103 * 103 * Get the actual namespace node for the target object if we need to.
104 * 1) Null node, Pathname (absolute path) 104 * Handles these cases:
105 * 2) Node, Pathname (path relative to Node) 105 *
106 * 3) Node, Null Pathname 106 * 1) Null node, Pathname (absolute path)
107 */ 107 * 2) Node, Pathname (path relative to Node)
108 status = acpi_ns_get_node(info->prefix_node, info->pathname, 108 * 3) Node, Null Pathname
109 ACPI_NS_NO_UPSEARCH, &info->resolved_node); 109 */
110 if (ACPI_FAILURE(status)) { 110 status = acpi_ns_get_node(info->prefix_node, info->pathname,
111 return_ACPI_STATUS(status); 111 ACPI_NS_NO_UPSEARCH,
112 &info->resolved_node);
113 if (ACPI_FAILURE(status)) {
114 return_ACPI_STATUS(status);
115 }
112 } 116 }
113 117
114 /* 118 /*
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 224c30053401..8a52916148cb 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -76,19 +76,7 @@ static acpi_status
76acpi_ns_check_reference(struct acpi_predefined_data *data, 76acpi_ns_check_reference(struct acpi_predefined_data *data,
77 union acpi_operand_object *return_object); 77 union acpi_operand_object *return_object);
78 78
79static void acpi_ns_get_expected_types(char *buffer, u32 expected_btypes); 79static u32 acpi_ns_get_bitmapped_type(union acpi_operand_object *return_object);
80
81/*
82 * Names for the types that can be returned by the predefined objects.
83 * Used for warning messages. Must be in the same order as the ACPI_RTYPEs
84 */
85static const char *acpi_rtype_names[] = {
86 "/Integer",
87 "/String",
88 "/Buffer",
89 "/Package",
90 "/Reference",
91};
92 80
93/******************************************************************************* 81/*******************************************************************************
94 * 82 *
@@ -112,7 +100,6 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
112 acpi_status return_status, 100 acpi_status return_status,
113 union acpi_operand_object **return_object_ptr) 101 union acpi_operand_object **return_object_ptr)
114{ 102{
115 union acpi_operand_object *return_object = *return_object_ptr;
116 acpi_status status = AE_OK; 103 acpi_status status = AE_OK;
117 const union acpi_predefined_info *predefined; 104 const union acpi_predefined_info *predefined;
118 char *pathname; 105 char *pathname;
@@ -120,7 +107,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
120 107
121 /* Match the name for this method/object against the predefined list */ 108 /* Match the name for this method/object against the predefined list */
122 109
123 predefined = acpi_ns_check_for_predefined_name(node); 110 predefined = acpi_ut_match_predefined_method(node->name.ascii);
124 111
125 /* Get the full pathname to the object, for use in warning messages */ 112 /* Get the full pathname to the object, for use in warning messages */
126 113
@@ -152,25 +139,6 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
152 } 139 }
153 140
154 /* 141 /*
155 * If there is no return value, check if we require a return value for
156 * this predefined name. Either one return value is expected, or none,
157 * for both methods and other objects.
158 *
159 * Exit now if there is no return object. Warning if one was expected.
160 */
161 if (!return_object) {
162 if ((predefined->info.expected_btypes) &&
163 (!(predefined->info.expected_btypes & ACPI_RTYPE_NONE))) {
164 ACPI_WARN_PREDEFINED((AE_INFO, pathname,
165 ACPI_WARN_ALWAYS,
166 "Missing expected return value"));
167
168 status = AE_AML_NO_RETURN_VALUE;
169 }
170 goto cleanup;
171 }
172
173 /*
174 * Return value validation and possible repair. 142 * Return value validation and possible repair.
175 * 143 *
176 * 1) Don't perform return value validation/repair if this feature 144 * 1) Don't perform return value validation/repair if this feature
@@ -310,8 +278,10 @@ acpi_ns_check_parameter_count(char *pathname,
310 * Validate the user-supplied parameter count. 278 * Validate the user-supplied parameter count.
311 * Allow two different legal argument counts (_SCP, etc.) 279 * Allow two different legal argument counts (_SCP, etc.)
312 */ 280 */
313 required_params_current = predefined->info.param_count & 0x0F; 281 required_params_current =
314 required_params_old = predefined->info.param_count >> 4; 282 predefined->info.argument_list & METHOD_ARG_MASK;
283 required_params_old =
284 predefined->info.argument_list >> METHOD_ARG_BIT_WIDTH;
315 285
316 if (user_param_count != ACPI_UINT32_MAX) { 286 if (user_param_count != ACPI_UINT32_MAX) {
317 if ((user_param_count != required_params_current) && 287 if ((user_param_count != required_params_current) &&
@@ -340,52 +310,6 @@ acpi_ns_check_parameter_count(char *pathname,
340 310
341/******************************************************************************* 311/*******************************************************************************
342 * 312 *
343 * FUNCTION: acpi_ns_check_for_predefined_name
344 *
345 * PARAMETERS: node - Namespace node for the method/object
346 *
347 * RETURN: Pointer to entry in predefined table. NULL indicates not found.
348 *
349 * DESCRIPTION: Check an object name against the predefined object list.
350 *
351 ******************************************************************************/
352
353const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
354 acpi_namespace_node
355 *node)
356{
357 const union acpi_predefined_info *this_name;
358
359 /* Quick check for a predefined name, first character must be underscore */
360
361 if (node->name.ascii[0] != '_') {
362 return (NULL);
363 }
364
365 /* Search info table for a predefined method/object name */
366
367 this_name = predefined_names;
368 while (this_name->info.name[0]) {
369 if (ACPI_COMPARE_NAME(node->name.ascii, this_name->info.name)) {
370 return (this_name);
371 }
372
373 /*
374 * Skip next entry in the table if this name returns a Package
375 * (next entry contains the package info)
376 */
377 if (this_name->info.expected_btypes & ACPI_RTYPE_PACKAGE) {
378 this_name++;
379 }
380
381 this_name++;
382 }
383
384 return (NULL); /* Not found */
385}
386
387/*******************************************************************************
388 *
389 * FUNCTION: acpi_ns_check_object_type 313 * FUNCTION: acpi_ns_check_object_type
390 * 314 *
391 * PARAMETERS: data - Pointer to validation data structure 315 * PARAMETERS: data - Pointer to validation data structure
@@ -410,28 +334,12 @@ acpi_ns_check_object_type(struct acpi_predefined_data *data,
410{ 334{
411 union acpi_operand_object *return_object = *return_object_ptr; 335 union acpi_operand_object *return_object = *return_object_ptr;
412 acpi_status status = AE_OK; 336 acpi_status status = AE_OK;
413 u32 return_btype;
414 char type_buffer[48]; /* Room for 5 types */ 337 char type_buffer[48]; /* Room for 5 types */
415 338
416 /*
417 * If we get a NULL return_object here, it is a NULL package element.
418 * Since all extraneous NULL package elements were removed earlier by a
419 * call to acpi_ns_remove_null_elements, this is an unexpected NULL element.
420 * We will attempt to repair it.
421 */
422 if (!return_object) {
423 status = acpi_ns_repair_null_element(data, expected_btypes,
424 package_index,
425 return_object_ptr);
426 if (ACPI_SUCCESS(status)) {
427 return (AE_OK); /* Repair was successful */
428 }
429 goto type_error_exit;
430 }
431
432 /* A Namespace node should not get here, but make sure */ 339 /* A Namespace node should not get here, but make sure */
433 340
434 if (ACPI_GET_DESCRIPTOR_TYPE(return_object) == ACPI_DESC_TYPE_NAMED) { 341 if (return_object &&
342 ACPI_GET_DESCRIPTOR_TYPE(return_object) == ACPI_DESC_TYPE_NAMED) {
435 ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags, 343 ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
436 "Invalid return type - Found a Namespace node [%4.4s] type %s", 344 "Invalid return type - Found a Namespace node [%4.4s] type %s",
437 return_object->node.name.ascii, 345 return_object->node.name.ascii,
@@ -448,59 +356,31 @@ acpi_ns_check_object_type(struct acpi_predefined_data *data,
448 * from all of the predefined names (including elements of returned 356 * from all of the predefined names (including elements of returned
449 * packages) 357 * packages)
450 */ 358 */
451 switch (return_object->common.type) { 359 data->return_btype = acpi_ns_get_bitmapped_type(return_object);
452 case ACPI_TYPE_INTEGER: 360 if (data->return_btype == ACPI_RTYPE_ANY) {
453 return_btype = ACPI_RTYPE_INTEGER;
454 break;
455
456 case ACPI_TYPE_BUFFER:
457 return_btype = ACPI_RTYPE_BUFFER;
458 break;
459
460 case ACPI_TYPE_STRING:
461 return_btype = ACPI_RTYPE_STRING;
462 break;
463 361
464 case ACPI_TYPE_PACKAGE:
465 return_btype = ACPI_RTYPE_PACKAGE;
466 break;
467
468 case ACPI_TYPE_LOCAL_REFERENCE:
469 return_btype = ACPI_RTYPE_REFERENCE;
470 break;
471
472 default:
473 /* Not one of the supported objects, must be incorrect */ 362 /* Not one of the supported objects, must be incorrect */
474
475 goto type_error_exit; 363 goto type_error_exit;
476 } 364 }
477 365
478 /* Is the object one of the expected types? */ 366 /* For reference objects, check that the reference type is correct */
479
480 if (return_btype & expected_btypes) {
481
482 /* For reference objects, check that the reference type is correct */
483
484 if (return_object->common.type == ACPI_TYPE_LOCAL_REFERENCE) {
485 status = acpi_ns_check_reference(data, return_object);
486 }
487 367
368 if ((data->return_btype & expected_btypes) == ACPI_RTYPE_REFERENCE) {
369 status = acpi_ns_check_reference(data, return_object);
488 return (status); 370 return (status);
489 } 371 }
490 372
491 /* Type mismatch -- attempt repair of the returned object */ 373 /* Attempt simple repair of the returned object if necessary */
492 374
493 status = acpi_ns_repair_object(data, expected_btypes, 375 status = acpi_ns_simple_repair(data, expected_btypes,
494 package_index, return_object_ptr); 376 package_index, return_object_ptr);
495 if (ACPI_SUCCESS(status)) { 377 return (status);
496 return (AE_OK); /* Repair was successful */
497 }
498 378
499 type_error_exit: 379 type_error_exit:
500 380
501 /* Create a string with all expected types for this predefined object */ 381 /* Create a string with all expected types for this predefined object */
502 382
503 acpi_ns_get_expected_types(type_buffer, expected_btypes); 383 acpi_ut_get_expected_return_types(type_buffer, expected_btypes);
504 384
505 if (package_index == ACPI_NOT_PACKAGE_ELEMENT) { 385 if (package_index == ACPI_NOT_PACKAGE_ELEMENT) {
506 ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags, 386 ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
@@ -558,36 +438,55 @@ acpi_ns_check_reference(struct acpi_predefined_data *data,
558 438
559/******************************************************************************* 439/*******************************************************************************
560 * 440 *
561 * FUNCTION: acpi_ns_get_expected_types 441 * FUNCTION: acpi_ns_get_bitmapped_type
562 * 442 *
563 * PARAMETERS: buffer - Pointer to where the string is returned 443 * PARAMETERS: return_object - Object returned from method/obj evaluation
564 * expected_btypes - Bitmap of expected return type(s)
565 * 444 *
566 * RETURN: Buffer is populated with type names. 445 * RETURN: Object return type. ACPI_RTYPE_ANY indicates that the object
446 * type is not supported. ACPI_RTYPE_NONE indicates that no
447 * object was returned (return_object is NULL).
567 * 448 *
568 * DESCRIPTION: Translate the expected types bitmap into a string of ascii 449 * DESCRIPTION: Convert object type into a bitmapped object return type.
569 * names of expected types, for use in warning messages.
570 * 450 *
571 ******************************************************************************/ 451 ******************************************************************************/
572 452
573static void acpi_ns_get_expected_types(char *buffer, u32 expected_btypes) 453static u32 acpi_ns_get_bitmapped_type(union acpi_operand_object *return_object)
574{ 454{
575 u32 this_rtype; 455 u32 return_btype;
576 u32 i;
577 u32 j;
578 456
579 j = 1; 457 if (!return_object) {
580 buffer[0] = 0; 458 return (ACPI_RTYPE_NONE);
581 this_rtype = ACPI_RTYPE_INTEGER; 459 }
582 460
583 for (i = 0; i < ACPI_NUM_RTYPES; i++) { 461 /* Map acpi_object_type to internal bitmapped type */
584 462
585 /* If one of the expected types, concatenate the name of this type */ 463 switch (return_object->common.type) {
464 case ACPI_TYPE_INTEGER:
465 return_btype = ACPI_RTYPE_INTEGER;
466 break;
586 467
587 if (expected_btypes & this_rtype) { 468 case ACPI_TYPE_BUFFER:
588 ACPI_STRCAT(buffer, &acpi_rtype_names[i][j]); 469 return_btype = ACPI_RTYPE_BUFFER;
589 j = 0; /* Use name separator from now on */ 470 break;
590 } 471
591 this_rtype <<= 1; /* Next Rtype */ 472 case ACPI_TYPE_STRING:
473 return_btype = ACPI_RTYPE_STRING;
474 break;
475
476 case ACPI_TYPE_PACKAGE:
477 return_btype = ACPI_RTYPE_PACKAGE;
478 break;
479
480 case ACPI_TYPE_LOCAL_REFERENCE:
481 return_btype = ACPI_RTYPE_REFERENCE;
482 break;
483
484 default:
485 /* Not one of the supported objects, must be incorrect */
486
487 return_btype = ACPI_RTYPE_ANY;
488 break;
592 } 489 }
490
491 return (return_btype);
593} 492}
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index a40155467d2e..77cdd539de16 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -112,9 +112,15 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
112 elements = return_object->package.elements; 112 elements = return_object->package.elements;
113 count = return_object->package.count; 113 count = return_object->package.count;
114 114
115 /* The package must have at least one element, else invalid */ 115 /*
116 116 * Most packages must have at least one element. The only exception
117 * is the variable-length package (ACPI_PTYPE1_VAR).
118 */
117 if (!count) { 119 if (!count) {
120 if (package->ret_info.type == ACPI_PTYPE1_VAR) {
121 return (AE_OK);
122 }
123
118 ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags, 124 ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
119 "Return Package has no elements (empty)")); 125 "Return Package has no elements (empty)"));
120 126
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 9e833353c06a..18f02e4ece01 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -46,6 +46,7 @@
46#include "acnamesp.h" 46#include "acnamesp.h"
47#include "acinterp.h" 47#include "acinterp.h"
48#include "acpredef.h" 48#include "acpredef.h"
49#include "amlresrc.h"
49 50
50#define _COMPONENT ACPI_NAMESPACE 51#define _COMPONENT ACPI_NAMESPACE
51ACPI_MODULE_NAME("nsrepair") 52ACPI_MODULE_NAME("nsrepair")
@@ -71,6 +72,11 @@ ACPI_MODULE_NAME("nsrepair")
71 * Buffer -> String 72 * Buffer -> String
72 * Buffer -> Package of Integers 73 * Buffer -> Package of Integers
73 * Package -> Package of one Package 74 * Package -> Package of one Package
75 *
76 * Additional conversions that are available:
77 * Convert a null return or zero return value to an end_tag descriptor
78 * Convert an ASCII string to a Unicode buffer
79 *
74 * An incorrect standalone object is wrapped with required outer package 80 * An incorrect standalone object is wrapped with required outer package
75 * 81 *
76 * Additional possible repairs: 82 * Additional possible repairs:
@@ -78,21 +84,51 @@ ACPI_MODULE_NAME("nsrepair")
78 * 84 *
79 ******************************************************************************/ 85 ******************************************************************************/
80/* Local prototypes */ 86/* Local prototypes */
81static acpi_status 87static const struct acpi_simple_repair_info *acpi_ns_match_simple_repair(struct
82acpi_ns_convert_to_integer(union acpi_operand_object *original_object, 88 acpi_namespace_node
83 union acpi_operand_object **return_object); 89 *node,
84 90 u32
85static acpi_status 91 return_btype,
86acpi_ns_convert_to_string(union acpi_operand_object *original_object, 92 u32
87 union acpi_operand_object **return_object); 93 package_index);
88 94
89static acpi_status 95/*
90acpi_ns_convert_to_buffer(union acpi_operand_object *original_object, 96 * Special but simple repairs for some names.
91 union acpi_operand_object **return_object); 97 *
98 * 2nd argument: Unexpected types that can be repaired
99 */
100static const struct acpi_simple_repair_info acpi_object_repair_info[] = {
101 /* Resource descriptor conversions */
102
103 {"_CRS",
104 ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER |
105 ACPI_RTYPE_NONE,
106 ACPI_NOT_PACKAGE_ELEMENT,
107 acpi_ns_convert_to_resource},
108 {"_DMA",
109 ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER |
110 ACPI_RTYPE_NONE,
111 ACPI_NOT_PACKAGE_ELEMENT,
112 acpi_ns_convert_to_resource},
113 {"_PRS",
114 ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER |
115 ACPI_RTYPE_NONE,
116 ACPI_NOT_PACKAGE_ELEMENT,
117 acpi_ns_convert_to_resource},
118
119 /* Unicode conversions */
120
121 {"_MLS", ACPI_RTYPE_STRING, 1,
122 acpi_ns_convert_to_unicode},
123 {"_STR", ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER,
124 ACPI_NOT_PACKAGE_ELEMENT,
125 acpi_ns_convert_to_unicode},
126 {{0, 0, 0, 0}, 0, 0, NULL} /* Table terminator */
127};
92 128
93/******************************************************************************* 129/*******************************************************************************
94 * 130 *
95 * FUNCTION: acpi_ns_repair_object 131 * FUNCTION: acpi_ns_simple_repair
96 * 132 *
97 * PARAMETERS: data - Pointer to validation data structure 133 * PARAMETERS: data - Pointer to validation data structure
98 * expected_btypes - Object types expected 134 * expected_btypes - Object types expected
@@ -110,16 +146,54 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
110 ******************************************************************************/ 146 ******************************************************************************/
111 147
112acpi_status 148acpi_status
113acpi_ns_repair_object(struct acpi_predefined_data *data, 149acpi_ns_simple_repair(struct acpi_predefined_data *data,
114 u32 expected_btypes, 150 u32 expected_btypes,
115 u32 package_index, 151 u32 package_index,
116 union acpi_operand_object **return_object_ptr) 152 union acpi_operand_object **return_object_ptr)
117{ 153{
118 union acpi_operand_object *return_object = *return_object_ptr; 154 union acpi_operand_object *return_object = *return_object_ptr;
119 union acpi_operand_object *new_object; 155 union acpi_operand_object *new_object = NULL;
120 acpi_status status; 156 acpi_status status;
157 const struct acpi_simple_repair_info *predefined;
158
159 ACPI_FUNCTION_NAME(ns_simple_repair);
160
161 /*
162 * Special repairs for certain names that are in the repair table.
163 * Check if this name is in the list of repairable names.
164 */
165 predefined = acpi_ns_match_simple_repair(data->node,
166 data->return_btype,
167 package_index);
168 if (predefined) {
169 if (!return_object) {
170 ACPI_WARN_PREDEFINED((AE_INFO, data->pathname,
171 ACPI_WARN_ALWAYS,
172 "Missing expected return value"));
173 }
174
175 status =
176 predefined->object_converter(return_object, &new_object);
177 if (ACPI_FAILURE(status)) {
178
179 /* A fatal error occurred during a conversion */
180
181 ACPI_EXCEPTION((AE_INFO, status,
182 "During return object analysis"));
183 return (status);
184 }
185 if (new_object) {
186 goto object_repaired;
187 }
188 }
121 189
122 ACPI_FUNCTION_NAME(ns_repair_object); 190 /*
191 * Do not perform simple object repair unless the return type is not
192 * expected.
193 */
194 if (data->return_btype & expected_btypes) {
195 return (AE_OK);
196 }
123 197
124 /* 198 /*
125 * At this point, we know that the type of the returned object was not 199 * At this point, we know that the type of the returned object was not
@@ -127,6 +201,24 @@ acpi_ns_repair_object(struct acpi_predefined_data *data,
127 * repair the object by converting it to one of the expected object 201 * repair the object by converting it to one of the expected object
128 * types for this predefined name. 202 * types for this predefined name.
129 */ 203 */
204
205 /*
206 * If there is no return value, check if we require a return value for
207 * this predefined name. Either one return value is expected, or none,
208 * for both methods and other objects.
209 *
210 * Exit now if there is no return object. Warning if one was expected.
211 */
212 if (!return_object) {
213 if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) {
214 ACPI_WARN_PREDEFINED((AE_INFO, data->pathname,
215 ACPI_WARN_ALWAYS,
216 "Missing expected return value"));
217
218 return (AE_AML_NO_RETURN_VALUE);
219 }
220 }
221
130 if (expected_btypes & ACPI_RTYPE_INTEGER) { 222 if (expected_btypes & ACPI_RTYPE_INTEGER) {
131 status = acpi_ns_convert_to_integer(return_object, &new_object); 223 status = acpi_ns_convert_to_integer(return_object, &new_object);
132 if (ACPI_SUCCESS(status)) { 224 if (ACPI_SUCCESS(status)) {
@@ -216,254 +308,51 @@ acpi_ns_repair_object(struct acpi_predefined_data *data,
216 return (AE_OK); 308 return (AE_OK);
217} 309}
218 310
219/******************************************************************************* 311/******************************************************************************
220 *
221 * FUNCTION: acpi_ns_convert_to_integer
222 *
223 * PARAMETERS: original_object - Object to be converted
224 * return_object - Where the new converted object is returned
225 *
226 * RETURN: Status. AE_OK if conversion was successful.
227 *
228 * DESCRIPTION: Attempt to convert a String/Buffer object to an Integer.
229 *
230 ******************************************************************************/
231
232static acpi_status
233acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
234 union acpi_operand_object **return_object)
235{
236 union acpi_operand_object *new_object;
237 acpi_status status;
238 u64 value = 0;
239 u32 i;
240
241 switch (original_object->common.type) {
242 case ACPI_TYPE_STRING:
243
244 /* String-to-Integer conversion */
245
246 status = acpi_ut_strtoul64(original_object->string.pointer,
247 ACPI_ANY_BASE, &value);
248 if (ACPI_FAILURE(status)) {
249 return (status);
250 }
251 break;
252
253 case ACPI_TYPE_BUFFER:
254
255 /* Buffer-to-Integer conversion. Max buffer size is 64 bits. */
256
257 if (original_object->buffer.length > 8) {
258 return (AE_AML_OPERAND_TYPE);
259 }
260
261 /* Extract each buffer byte to create the integer */
262
263 for (i = 0; i < original_object->buffer.length; i++) {
264 value |=
265 ((u64) original_object->buffer.
266 pointer[i] << (i * 8));
267 }
268 break;
269
270 default:
271 return (AE_AML_OPERAND_TYPE);
272 }
273
274 new_object = acpi_ut_create_integer_object(value);
275 if (!new_object) {
276 return (AE_NO_MEMORY);
277 }
278
279 *return_object = new_object;
280 return (AE_OK);
281}
282
283/*******************************************************************************
284 *
285 * FUNCTION: acpi_ns_convert_to_string
286 *
287 * PARAMETERS: original_object - Object to be converted
288 * return_object - Where the new converted object is returned
289 *
290 * RETURN: Status. AE_OK if conversion was successful.
291 *
292 * DESCRIPTION: Attempt to convert a Integer/Buffer object to a String.
293 *
294 ******************************************************************************/
295
296static acpi_status
297acpi_ns_convert_to_string(union acpi_operand_object *original_object,
298 union acpi_operand_object **return_object)
299{
300 union acpi_operand_object *new_object;
301 acpi_size length;
302 acpi_status status;
303
304 switch (original_object->common.type) {
305 case ACPI_TYPE_INTEGER:
306 /*
307 * Integer-to-String conversion. Commonly, convert
308 * an integer of value 0 to a NULL string. The last element of
309 * _BIF and _BIX packages occasionally need this fix.
310 */
311 if (original_object->integer.value == 0) {
312
313 /* Allocate a new NULL string object */
314
315 new_object = acpi_ut_create_string_object(0);
316 if (!new_object) {
317 return (AE_NO_MEMORY);
318 }
319 } else {
320 status =
321 acpi_ex_convert_to_string(original_object,
322 &new_object,
323 ACPI_IMPLICIT_CONVERT_HEX);
324 if (ACPI_FAILURE(status)) {
325 return (status);
326 }
327 }
328 break;
329
330 case ACPI_TYPE_BUFFER:
331 /*
332 * Buffer-to-String conversion. Use a to_string
333 * conversion, no transform performed on the buffer data. The best
334 * example of this is the _BIF method, where the string data from
335 * the battery is often (incorrectly) returned as buffer object(s).
336 */
337 length = 0;
338 while ((length < original_object->buffer.length) &&
339 (original_object->buffer.pointer[length])) {
340 length++;
341 }
342
343 /* Allocate a new string object */
344
345 new_object = acpi_ut_create_string_object(length);
346 if (!new_object) {
347 return (AE_NO_MEMORY);
348 }
349
350 /*
351 * Copy the raw buffer data with no transform. String is already NULL
352 * terminated at Length+1.
353 */
354 ACPI_MEMCPY(new_object->string.pointer,
355 original_object->buffer.pointer, length);
356 break;
357
358 default:
359 return (AE_AML_OPERAND_TYPE);
360 }
361
362 *return_object = new_object;
363 return (AE_OK);
364}
365
366/*******************************************************************************
367 * 312 *
368 * FUNCTION: acpi_ns_convert_to_buffer 313 * FUNCTION: acpi_ns_match_simple_repair
369 * 314 *
370 * PARAMETERS: original_object - Object to be converted 315 * PARAMETERS: node - Namespace node for the method/object
371 * return_object - Where the new converted object is returned 316 * return_btype - Object type that was returned
317 * package_index - Index of object within parent package (if
318 * applicable - ACPI_NOT_PACKAGE_ELEMENT
319 * otherwise)
372 * 320 *
373 * RETURN: Status. AE_OK if conversion was successful. 321 * RETURN: Pointer to entry in repair table. NULL indicates not found.
374 * 322 *
375 * DESCRIPTION: Attempt to convert a Integer/String/Package object to a Buffer. 323 * DESCRIPTION: Check an object name against the repairable object list.
376 * 324 *
377 ******************************************************************************/ 325 *****************************************************************************/
378 326
379static acpi_status 327static const struct acpi_simple_repair_info *acpi_ns_match_simple_repair(struct
380acpi_ns_convert_to_buffer(union acpi_operand_object *original_object, 328 acpi_namespace_node
381 union acpi_operand_object **return_object) 329 *node,
330 u32
331 return_btype,
332 u32
333 package_index)
382{ 334{
383 union acpi_operand_object *new_object; 335 const struct acpi_simple_repair_info *this_name;
384 acpi_status status;
385 union acpi_operand_object **elements;
386 u32 *dword_buffer;
387 u32 count;
388 u32 i;
389 336
390 switch (original_object->common.type) { 337 /* Search info table for a repairable predefined method/object name */
391 case ACPI_TYPE_INTEGER:
392 /*
393 * Integer-to-Buffer conversion.
394 * Convert the Integer to a packed-byte buffer. _MAT and other
395 * objects need this sometimes, if a read has been performed on a
396 * Field object that is less than or equal to the global integer
397 * size (32 or 64 bits).
398 */
399 status =
400 acpi_ex_convert_to_buffer(original_object, &new_object);
401 if (ACPI_FAILURE(status)) {
402 return (status);
403 }
404 break;
405 338
406 case ACPI_TYPE_STRING: 339 this_name = acpi_object_repair_info;
340 while (this_name->object_converter) {
341 if (ACPI_COMPARE_NAME(node->name.ascii, this_name->name)) {
407 342
408 /* String-to-Buffer conversion. Simple data copy */ 343 /* Check if we can actually repair this name/type combination */
409
410 new_object =
411 acpi_ut_create_buffer_object(original_object->string.
412 length);
413 if (!new_object) {
414 return (AE_NO_MEMORY);
415 }
416 344
417 ACPI_MEMCPY(new_object->buffer.pointer, 345 if ((return_btype & this_name->unexpected_btypes) &&
418 original_object->string.pointer, 346 (package_index == this_name->package_index)) {
419 original_object->string.length); 347 return (this_name);
420 break;
421
422 case ACPI_TYPE_PACKAGE:
423 /*
424 * This case is often seen for predefined names that must return a
425 * Buffer object with multiple DWORD integers within. For example,
426 * _FDE and _GTM. The Package can be converted to a Buffer.
427 */
428
429 /* All elements of the Package must be integers */
430
431 elements = original_object->package.elements;
432 count = original_object->package.count;
433
434 for (i = 0; i < count; i++) {
435 if ((!*elements) ||
436 ((*elements)->common.type != ACPI_TYPE_INTEGER)) {
437 return (AE_AML_OPERAND_TYPE);
438 } 348 }
439 elements++;
440 }
441
442 /* Create the new buffer object to replace the Package */
443 349
444 new_object = acpi_ut_create_buffer_object(ACPI_MUL_4(count)); 350 return (NULL);
445 if (!new_object) {
446 return (AE_NO_MEMORY);
447 } 351 }
448 352 this_name++;
449 /* Copy the package elements (integers) to the buffer as DWORDs */
450
451 elements = original_object->package.elements;
452 dword_buffer = ACPI_CAST_PTR(u32, new_object->buffer.pointer);
453
454 for (i = 0; i < count; i++) {
455 *dword_buffer = (u32) (*elements)->integer.value;
456 dword_buffer++;
457 elements++;
458 }
459 break;
460
461 default:
462 return (AE_AML_OPERAND_TYPE);
463 } 353 }
464 354
465 *return_object = new_object; 355 return (NULL); /* Name was not found in the repair table */
466 return (AE_OK);
467} 356}
468 357
469/******************************************************************************* 358/*******************************************************************************
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index ba4d98287c6a..149e9b9c2c1b 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -66,9 +66,9 @@ typedef struct acpi_repair_info {
66 66
67/* Local prototypes */ 67/* Local prototypes */
68 68
69static const struct acpi_repair_info *acpi_ns_match_repairable_name(struct 69static const struct acpi_repair_info *acpi_ns_match_complex_repair(struct
70 acpi_namespace_node 70 acpi_namespace_node
71 *node); 71 *node);
72 72
73static acpi_status 73static acpi_status
74acpi_ns_repair_ALR(struct acpi_predefined_data *data, 74acpi_ns_repair_ALR(struct acpi_predefined_data *data,
@@ -175,7 +175,7 @@ acpi_ns_complex_repairs(struct acpi_predefined_data *data,
175 175
176 /* Check if this name is in the list of repairable names */ 176 /* Check if this name is in the list of repairable names */
177 177
178 predefined = acpi_ns_match_repairable_name(node); 178 predefined = acpi_ns_match_complex_repair(node);
179 if (!predefined) { 179 if (!predefined) {
180 return (validate_status); 180 return (validate_status);
181 } 181 }
@@ -186,7 +186,7 @@ acpi_ns_complex_repairs(struct acpi_predefined_data *data,
186 186
187/****************************************************************************** 187/******************************************************************************
188 * 188 *
189 * FUNCTION: acpi_ns_match_repairable_name 189 * FUNCTION: acpi_ns_match_complex_repair
190 * 190 *
191 * PARAMETERS: node - Namespace node for the method/object 191 * PARAMETERS: node - Namespace node for the method/object
192 * 192 *
@@ -196,9 +196,9 @@ acpi_ns_complex_repairs(struct acpi_predefined_data *data,
196 * 196 *
197 *****************************************************************************/ 197 *****************************************************************************/
198 198
199static const struct acpi_repair_info *acpi_ns_match_repairable_name(struct 199static const struct acpi_repair_info *acpi_ns_match_complex_repair(struct
200 acpi_namespace_node 200 acpi_namespace_node
201 *node) 201 *node)
202{ 202{
203 const struct acpi_repair_info *this_name; 203 const struct acpi_repair_info *this_name;
204 204
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 686420df684f..2808586fad30 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -112,10 +112,10 @@ acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
112 112
113 if (!node) { 113 if (!node) {
114 ACPI_WARNING((AE_INFO, "Null Node parameter")); 114 ACPI_WARNING((AE_INFO, "Null Node parameter"));
115 return_VALUE(ACPI_TYPE_ANY); 115 return_UINT8(ACPI_TYPE_ANY);
116 } 116 }
117 117
118 return_VALUE(node->type); 118 return_UINT8(node->type);
119} 119}
120 120
121/******************************************************************************* 121/*******************************************************************************
@@ -140,10 +140,10 @@ u32 acpi_ns_local(acpi_object_type type)
140 /* Type code out of range */ 140 /* Type code out of range */
141 141
142 ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type)); 142 ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
143 return_VALUE(ACPI_NS_NORMAL); 143 return_UINT32(ACPI_NS_NORMAL);
144 } 144 }
145 145
146 return_VALUE(acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL); 146 return_UINT32(acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL);
147} 147}
148 148
149/******************************************************************************* 149/*******************************************************************************
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index f51308cdbc65..9f25a3d4e992 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -108,7 +108,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state)
108 /* Byte 0 is a special case, either bits [0:3] or [0:5] are used */ 108 /* Byte 0 is a special case, either bits [0:3] or [0:5] are used */
109 109
110 package_length |= (aml[0] & byte_zero_mask); 110 package_length |= (aml[0] & byte_zero_mask);
111 return_VALUE(package_length); 111 return_UINT32(package_length);
112} 112}
113 113
114/******************************************************************************* 114/*******************************************************************************
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 7816d4eef04e..72077fa1eea5 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -202,6 +202,12 @@ acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
202 return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE); 202 return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE);
203 } 203 }
204 204
205 /* Sanity check the length. It must not be zero, or we loop forever */
206
207 if (!resource->length) {
208 return_ACPI_STATUS(AE_AML_BAD_RESOURCE_LENGTH);
209 }
210
205 /* Get the base size of the (external stream) resource descriptor */ 211 /* Get the base size of the (external stream) resource descriptor */
206 212
207 total_size = acpi_gbl_aml_resource_sizes[resource->type]; 213 total_size = acpi_gbl_aml_resource_sizes[resource->type];
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index cab51445189d..b5fc0db2e87b 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -385,6 +385,14 @@ void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
385 return; 385 return;
386 } 386 }
387 387
388 /* Sanity check the length. It must not be zero, or we loop forever */
389
390 if (!resource_list->length) {
391 acpi_os_printf
392 ("Invalid zero length descriptor in resource list\n");
393 return;
394 }
395
388 /* Dump the resource descriptor */ 396 /* Dump the resource descriptor */
389 397
390 if (type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 398 if (type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index ee2e206fc6c8..6053aa182093 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -178,6 +178,14 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
178 return_ACPI_STATUS(AE_BAD_DATA); 178 return_ACPI_STATUS(AE_BAD_DATA);
179 } 179 }
180 180
181 /* Sanity check the length. It must not be zero, or we loop forever */
182
183 if (!resource->length) {
184 ACPI_ERROR((AE_INFO,
185 "Invalid zero length descriptor in resource list\n"));
186 return_ACPI_STATUS(AE_AML_BAD_RESOURCE_LENGTH);
187 }
188
181 /* Perform the conversion */ 189 /* Perform the conversion */
182 190
183 if (resource->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 191 if (resource->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 15d6eaef0e28..c0e5d2d3ce67 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -563,13 +563,19 @@ acpi_walk_resource_buffer(struct acpi_buffer * buffer,
563 563
564 while (resource < resource_end) { 564 while (resource < resource_end) {
565 565
566 /* Sanity check the resource */ 566 /* Sanity check the resource type */
567 567
568 if (resource->type > ACPI_RESOURCE_TYPE_MAX) { 568 if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
569 status = AE_AML_INVALID_RESOURCE_TYPE; 569 status = AE_AML_INVALID_RESOURCE_TYPE;
570 break; 570 break;
571 } 571 }
572 572
573 /* Sanity check the length. It must not be zero, or we loop forever */
574
575 if (!resource->length) {
576 return_ACPI_STATUS(AE_AML_BAD_RESOURCE_LENGTH);
577 }
578
573 /* Invoke the user function, abort on any error returned */ 579 /* Invoke the user function, abort on any error returned */
574 580
575 status = user_function(resource, context); 581 status = user_function(resource, context);
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 74181bf181ec..33b00d22300a 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -559,8 +559,12 @@ static void acpi_tb_validate_fadt(void)
559 /* 559 /*
560 * For each extended field, check for length mismatch between the 560 * For each extended field, check for length mismatch between the
561 * legacy length field and the corresponding 64-bit X length field. 561 * legacy length field and the corresponding 64-bit X length field.
562 * Note: If the legacy length field is > 0xFF bits, ignore this
563 * check. (GPE registers can be larger than the 64-bit GAS structure
564 * can accomodate, 0xFF bits).
562 */ 565 */
563 if (address64->address && 566 if (address64->address &&
567 (ACPI_MUL_8(length) <= ACPI_UINT8_MAX) &&
564 (address64->bit_width != ACPI_MUL_8(length))) { 568 (address64->bit_width != ACPI_MUL_8(length))) {
565 ACPI_BIOS_WARNING((AE_INFO, 569 ACPI_BIOS_WARNING((AE_INFO,
566 "32/64X length mismatch in FADT/%s: %u/%u", 570 "32/64X length mismatch in FADT/%s: %u/%u",
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index b35a5e6d653a..ad11162482ff 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Module Name: tbxface - ACPI table oriented external interfaces 3 * Module Name: tbxface - ACPI table-oriented external interfaces
4 * 4 *
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
@@ -80,7 +80,7 @@ acpi_status acpi_allocate_root_table(u32 initial_table_count)
80 * array is dynamically allocated. 80 * array is dynamically allocated.
81 * initial_table_count - Size of initial_table_array, in number of 81 * initial_table_count - Size of initial_table_array, in number of
82 * struct acpi_table_desc structures 82 * struct acpi_table_desc structures
83 * allow_realloc - Flag to tell Table Manager if resize of 83 * allow_resize - Flag to tell Table Manager if resize of
84 * pre-allocated array is allowed. Ignored 84 * pre-allocated array is allowed. Ignored
85 * if initial_table_array is NULL. 85 * if initial_table_array is NULL.
86 * 86 *
@@ -107,8 +107,8 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
107 ACPI_FUNCTION_TRACE(acpi_initialize_tables); 107 ACPI_FUNCTION_TRACE(acpi_initialize_tables);
108 108
109 /* 109 /*
110 * Set up the Root Table Array 110 * Setup the Root Table Array and allocate the table array
111 * Allocate the table array if requested 111 * if requested
112 */ 112 */
113 if (!initial_table_array) { 113 if (!initial_table_array) {
114 status = acpi_allocate_root_table(initial_table_count); 114 status = acpi_allocate_root_table(initial_table_count);
@@ -305,9 +305,10 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
305 * instance - Which instance (for SSDTs) 305 * instance - Which instance (for SSDTs)
306 * out_table - Where the pointer to the table is returned 306 * out_table - Where the pointer to the table is returned
307 * 307 *
308 * RETURN: Status and pointer to table 308 * RETURN: Status and pointer to the requested table
309 * 309 *
310 * DESCRIPTION: Finds and verifies an ACPI table. 310 * DESCRIPTION: Finds and verifies an ACPI table. Table must be in the
311 * RSDT/XSDT.
311 * 312 *
312 ******************************************************************************/ 313 ******************************************************************************/
313acpi_status 314acpi_status
@@ -375,9 +376,10 @@ ACPI_EXPORT_SYMBOL(acpi_get_table)
375 * PARAMETERS: table_index - Table index 376 * PARAMETERS: table_index - Table index
376 * table - Where the pointer to the table is returned 377 * table - Where the pointer to the table is returned
377 * 378 *
378 * RETURN: Status and pointer to the table 379 * RETURN: Status and pointer to the requested table
379 * 380 *
380 * DESCRIPTION: Obtain a table by an index into the global table list. 381 * DESCRIPTION: Obtain a table by an index into the global table list. Used
382 * internally also.
381 * 383 *
382 ******************************************************************************/ 384 ******************************************************************************/
383acpi_status 385acpi_status
@@ -432,7 +434,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
432 * 434 *
433 * RETURN: Status 435 * RETURN: Status
434 * 436 *
435 * DESCRIPTION: Install table event handler 437 * DESCRIPTION: Install a global table event handler.
436 * 438 *
437 ******************************************************************************/ 439 ******************************************************************************/
438acpi_status 440acpi_status
@@ -479,7 +481,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_table_handler)
479 * 481 *
480 * RETURN: Status 482 * RETURN: Status
481 * 483 *
482 * DESCRIPTION: Remove table event handler 484 * DESCRIPTION: Remove a table event handler
483 * 485 *
484 ******************************************************************************/ 486 ******************************************************************************/
485acpi_status acpi_remove_table_handler(acpi_table_handler handler) 487acpi_status acpi_remove_table_handler(acpi_table_handler handler)
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index 698b9d385516..e0a2e2779c2e 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -214,7 +214,7 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
214 214
215 if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) && 215 if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
216 (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) { 216 (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
217 return_VALUE(0); 217 return_UINT32(0);
218 } 218 }
219 219
220 range_info = acpi_gbl_address_range_list[space_id]; 220 range_info = acpi_gbl_address_range_list[space_id];
@@ -256,7 +256,7 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
256 range_info = range_info->next; 256 range_info = range_info->next;
257 } 257 }
258 258
259 return_VALUE(overlap_count); 259 return_UINT32(overlap_count);
260} 260}
261 261
262/******************************************************************************* 262/*******************************************************************************
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index e0e8579deaac..a877a9647fd9 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -85,7 +85,6 @@ acpi_os_create_cache(char *cache_name,
85 /* Populate the cache object and return it */ 85 /* Populate the cache object and return it */
86 86
87 ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list)); 87 ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list));
88 cache->link_offset = 8;
89 cache->list_name = cache_name; 88 cache->list_name = cache_name;
90 cache->object_size = object_size; 89 cache->object_size = object_size;
91 cache->max_depth = max_depth; 90 cache->max_depth = max_depth;
@@ -108,7 +107,7 @@ acpi_os_create_cache(char *cache_name,
108 107
109acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache) 108acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache)
110{ 109{
111 char *next; 110 void *next;
112 acpi_status status; 111 acpi_status status;
113 112
114 ACPI_FUNCTION_ENTRY(); 113 ACPI_FUNCTION_ENTRY();
@@ -128,10 +127,7 @@ acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache)
128 127
129 /* Delete and unlink one cached state object */ 128 /* Delete and unlink one cached state object */
130 129
131 next = *(ACPI_CAST_INDIRECT_PTR(char, 130 next = ACPI_GET_DESCRIPTOR_PTR(cache->list_head);
132 &(((char *)cache->
133 list_head)[cache->
134 link_offset])));
135 ACPI_FREE(cache->list_head); 131 ACPI_FREE(cache->list_head);
136 132
137 cache->list_head = next; 133 cache->list_head = next;
@@ -221,10 +217,7 @@ acpi_os_release_object(struct acpi_memory_list * cache, void *object)
221 217
222 /* Put the object at the head of the cache list */ 218 /* Put the object at the head of the cache list */
223 219
224 *(ACPI_CAST_INDIRECT_PTR(char, 220 ACPI_SET_DESCRIPTOR_PTR(object, cache->list_head);
225 &(((char *)object)[cache->
226 link_offset]))) =
227 cache->list_head;
228 cache->list_head = object; 221 cache->list_head = object;
229 cache->current_depth++; 222 cache->current_depth++;
230 223
@@ -272,10 +265,7 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
272 /* There is an object available, use it */ 265 /* There is an object available, use it */
273 266
274 object = cache->list_head; 267 object = cache->list_head;
275 cache->list_head = *(ACPI_CAST_INDIRECT_PTR(char, 268 cache->list_head = ACPI_GET_DESCRIPTOR_PTR(object);
276 &(((char *)
277 object)[cache->
278 link_offset])));
279 269
280 cache->current_depth--; 270 cache->current_depth--;
281 271
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 2541de420249..29b930250b6f 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -359,19 +359,20 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list)
359 * FUNCTION: acpi_ut_update_ref_count 359 * FUNCTION: acpi_ut_update_ref_count
360 * 360 *
361 * PARAMETERS: object - Object whose ref count is to be updated 361 * PARAMETERS: object - Object whose ref count is to be updated
362 * action - What to do 362 * action - What to do (REF_INCREMENT or REF_DECREMENT)
363 * 363 *
364 * RETURN: New ref count 364 * RETURN: None. Sets new reference count within the object
365 * 365 *
366 * DESCRIPTION: Modify the ref count and return it. 366 * DESCRIPTION: Modify the reference count for an internal acpi object
367 * 367 *
368 ******************************************************************************/ 368 ******************************************************************************/
369 369
370static void 370static void
371acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action) 371acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
372{ 372{
373 u16 count; 373 u16 original_count;
374 u16 new_count; 374 u16 new_count = 0;
375 acpi_cpu_flags lock_flags;
375 376
376 ACPI_FUNCTION_NAME(ut_update_ref_count); 377 ACPI_FUNCTION_NAME(ut_update_ref_count);
377 378
@@ -379,76 +380,79 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
379 return; 380 return;
380 } 381 }
381 382
382 count = object->common.reference_count;
383 new_count = count;
384
385 /* 383 /*
386 * Perform the reference count action (increment, decrement, force delete) 384 * Always get the reference count lock. Note: Interpreter and/or
385 * Namespace is not always locked when this function is called.
387 */ 386 */
387 lock_flags = acpi_os_acquire_lock(acpi_gbl_reference_count_lock);
388 original_count = object->common.reference_count;
389
390 /* Perform the reference count action (increment, decrement) */
391
388 switch (action) { 392 switch (action) {
389 case REF_INCREMENT: 393 case REF_INCREMENT:
390 394
391 new_count++; 395 new_count = original_count + 1;
392 object->common.reference_count = new_count; 396 object->common.reference_count = new_count;
397 acpi_os_release_lock(acpi_gbl_reference_count_lock, lock_flags);
398
399 /* The current reference count should never be zero here */
400
401 if (!original_count) {
402 ACPI_WARNING((AE_INFO,
403 "Obj %p, Reference Count was zero before increment\n",
404 object));
405 }
393 406
394 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, 407 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
395 "Obj %p Refs=%X, [Incremented]\n", 408 "Obj %p Type %.2X Refs %.2X [Incremented]\n",
396 object, new_count)); 409 object, object->common.type, new_count));
397 break; 410 break;
398 411
399 case REF_DECREMENT: 412 case REF_DECREMENT:
400 413
401 if (count < 1) { 414 /* The current reference count must be non-zero */
402 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
403 "Obj %p Refs=%X, can't decrement! (Set to 0)\n",
404 object, new_count));
405
406 new_count = 0;
407 } else {
408 new_count--;
409 415
410 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, 416 if (original_count) {
411 "Obj %p Refs=%X, [Decremented]\n", 417 new_count = original_count - 1;
412 object, new_count)); 418 object->common.reference_count = new_count;
413 } 419 }
414 420
415 if (object->common.type == ACPI_TYPE_METHOD) { 421 acpi_os_release_lock(acpi_gbl_reference_count_lock, lock_flags);
416 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
417 "Method Obj %p Refs=%X, [Decremented]\n",
418 object, new_count));
419 }
420 422
421 object->common.reference_count = new_count; 423 if (!original_count) {
422 if (new_count == 0) { 424 ACPI_WARNING((AE_INFO,
423 acpi_ut_delete_internal_obj(object); 425 "Obj %p, Reference Count is already zero, cannot decrement\n",
426 object));
424 } 427 }
425 break;
426
427 case REF_FORCE_DELETE:
428 428
429 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, 429 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
430 "Obj %p Refs=%X, Force delete! (Set to 0)\n", 430 "Obj %p Type %.2X Refs %.2X [Decremented]\n",
431 object, count)); 431 object, object->common.type, new_count));
432 432
433 new_count = 0; 433 /* Actually delete the object on a reference count of zero */
434 object->common.reference_count = new_count; 434
435 acpi_ut_delete_internal_obj(object); 435 if (new_count == 0) {
436 acpi_ut_delete_internal_obj(object);
437 }
436 break; 438 break;
437 439
438 default: 440 default:
439 441
440 ACPI_ERROR((AE_INFO, "Unknown action (0x%X)", action)); 442 acpi_os_release_lock(acpi_gbl_reference_count_lock, lock_flags);
441 break; 443 ACPI_ERROR((AE_INFO, "Unknown Reference Count action (0x%X)",
444 action));
445 return;
442 } 446 }
443 447
444 /* 448 /*
445 * Sanity check the reference count, for debug purposes only. 449 * Sanity check the reference count, for debug purposes only.
446 * (A deleted object will have a huge reference count) 450 * (A deleted object will have a huge reference count)
447 */ 451 */
448 if (count > ACPI_MAX_REFERENCE_COUNT) { 452 if (new_count > ACPI_MAX_REFERENCE_COUNT) {
449 ACPI_WARNING((AE_INFO, 453 ACPI_WARNING((AE_INFO,
450 "Large Reference Count (0x%X) in object %p", 454 "Large Reference Count (0x%X) in object %p, Type=0x%.2X",
451 count, object)); 455 new_count, object, object->common.type));
452 } 456 }
453} 457}
454 458
@@ -458,8 +462,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
458 * 462 *
459 * PARAMETERS: object - Increment ref count for this object 463 * PARAMETERS: object - Increment ref count for this object
460 * and all sub-objects 464 * and all sub-objects
461 * action - Either REF_INCREMENT or REF_DECREMENT or 465 * action - Either REF_INCREMENT or REF_DECREMENT
462 * REF_FORCE_DELETE
463 * 466 *
464 * RETURN: Status 467 * RETURN: Status
465 * 468 *
@@ -714,7 +717,6 @@ void acpi_ut_remove_reference(union acpi_operand_object *object)
714 /* 717 /*
715 * Allow a NULL pointer to be passed in, just ignore it. This saves 718 * Allow a NULL pointer to be passed in, just ignore it. This saves
716 * each caller from having to check. Also, ignore NS nodes. 719 * each caller from having to check. Also, ignore NS nodes.
717 *
718 */ 720 */
719 if (!object || 721 if (!object ||
720 (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED)) { 722 (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED)) {
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
index a0ab7c02e87c..b543a144941a 100644
--- a/drivers/acpi/acpica/utexcep.c
+++ b/drivers/acpi/acpica/utexcep.c
@@ -64,7 +64,7 @@ ACPI_MODULE_NAME("utexcep")
64 ******************************************************************************/ 64 ******************************************************************************/
65const char *acpi_format_exception(acpi_status status) 65const char *acpi_format_exception(acpi_status status)
66{ 66{
67 const char *exception = NULL; 67 const struct acpi_exception_info *exception;
68 68
69 ACPI_FUNCTION_ENTRY(); 69 ACPI_FUNCTION_ENTRY();
70 70
@@ -76,10 +76,10 @@ const char *acpi_format_exception(acpi_status status)
76 ACPI_ERROR((AE_INFO, 76 ACPI_ERROR((AE_INFO,
77 "Unknown exception code: 0x%8.8X", status)); 77 "Unknown exception code: 0x%8.8X", status));
78 78
79 exception = "UNKNOWN_STATUS_CODE"; 79 return ("UNKNOWN_STATUS_CODE");
80 } 80 }
81 81
82 return (ACPI_CAST_PTR(const char, exception)); 82 return (exception->name);
83} 83}
84 84
85ACPI_EXPORT_SYMBOL(acpi_format_exception) 85ACPI_EXPORT_SYMBOL(acpi_format_exception)
@@ -97,10 +97,10 @@ ACPI_EXPORT_SYMBOL(acpi_format_exception)
97 * an ASCII string. 97 * an ASCII string.
98 * 98 *
99 ******************************************************************************/ 99 ******************************************************************************/
100const char *acpi_ut_validate_exception(acpi_status status) 100const struct acpi_exception_info *acpi_ut_validate_exception(acpi_status status)
101{ 101{
102 u32 sub_status; 102 u32 sub_status;
103 const char *exception = NULL; 103 const struct acpi_exception_info *exception = NULL;
104 104
105 ACPI_FUNCTION_ENTRY(); 105 ACPI_FUNCTION_ENTRY();
106 106
@@ -113,35 +113,35 @@ const char *acpi_ut_validate_exception(acpi_status status)
113 case AE_CODE_ENVIRONMENTAL: 113 case AE_CODE_ENVIRONMENTAL:
114 114
115 if (sub_status <= AE_CODE_ENV_MAX) { 115 if (sub_status <= AE_CODE_ENV_MAX) {
116 exception = acpi_gbl_exception_names_env[sub_status]; 116 exception = &acpi_gbl_exception_names_env[sub_status];
117 } 117 }
118 break; 118 break;
119 119
120 case AE_CODE_PROGRAMMER: 120 case AE_CODE_PROGRAMMER:
121 121
122 if (sub_status <= AE_CODE_PGM_MAX) { 122 if (sub_status <= AE_CODE_PGM_MAX) {
123 exception = acpi_gbl_exception_names_pgm[sub_status]; 123 exception = &acpi_gbl_exception_names_pgm[sub_status];
124 } 124 }
125 break; 125 break;
126 126
127 case AE_CODE_ACPI_TABLES: 127 case AE_CODE_ACPI_TABLES:
128 128
129 if (sub_status <= AE_CODE_TBL_MAX) { 129 if (sub_status <= AE_CODE_TBL_MAX) {
130 exception = acpi_gbl_exception_names_tbl[sub_status]; 130 exception = &acpi_gbl_exception_names_tbl[sub_status];
131 } 131 }
132 break; 132 break;
133 133
134 case AE_CODE_AML: 134 case AE_CODE_AML:
135 135
136 if (sub_status <= AE_CODE_AML_MAX) { 136 if (sub_status <= AE_CODE_AML_MAX) {
137 exception = acpi_gbl_exception_names_aml[sub_status]; 137 exception = &acpi_gbl_exception_names_aml[sub_status];
138 } 138 }
139 break; 139 break;
140 140
141 case AE_CODE_CONTROL: 141 case AE_CODE_CONTROL:
142 142
143 if (sub_status <= AE_CODE_CTRL_MAX) { 143 if (sub_status <= AE_CODE_CTRL_MAX) {
144 exception = acpi_gbl_exception_names_ctrl[sub_status]; 144 exception = &acpi_gbl_exception_names_ctrl[sub_status];
145 } 145 }
146 break; 146 break;
147 147
@@ -149,5 +149,9 @@ const char *acpi_ut_validate_exception(acpi_status status)
149 break; 149 break;
150 } 150 }
151 151
152 return (ACPI_CAST_PTR(const char, exception)); 152 if (!exception || !exception->name) {
153 return (NULL);
154 }
155
156 return (exception);
153} 157}
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index ffecf4b4f0dd..f736448a8606 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -359,6 +359,8 @@ acpi_status acpi_ut_init_globals(void)
359 359
360#ifdef ACPI_DISASSEMBLER 360#ifdef ACPI_DISASSEMBLER
361 acpi_gbl_external_list = NULL; 361 acpi_gbl_external_list = NULL;
362 acpi_gbl_num_external_methods = 0;
363 acpi_gbl_resolved_external_methods = 0;
362#endif 364#endif
363 365
364#ifdef ACPI_DEBUG_OUTPUT 366#ifdef ACPI_DEBUG_OUTPUT
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 22feb99b8e35..08c323245584 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -81,7 +81,7 @@ acpi_status acpi_ut_mutex_initialize(void)
81 } 81 }
82 } 82 }
83 83
84 /* Create the spinlocks for use at interrupt level */ 84 /* Create the spinlocks for use at interrupt level or for speed */
85 85
86 status = acpi_os_create_lock (&acpi_gbl_gpe_lock); 86 status = acpi_os_create_lock (&acpi_gbl_gpe_lock);
87 if (ACPI_FAILURE (status)) { 87 if (ACPI_FAILURE (status)) {
@@ -93,7 +93,13 @@ acpi_status acpi_ut_mutex_initialize(void)
93 return_ACPI_STATUS (status); 93 return_ACPI_STATUS (status);
94 } 94 }
95 95
96 status = acpi_os_create_lock(&acpi_gbl_reference_count_lock);
97 if (ACPI_FAILURE(status)) {
98 return_ACPI_STATUS(status);
99 }
100
96 /* Mutex for _OSI support */ 101 /* Mutex for _OSI support */
102
97 status = acpi_os_create_mutex(&acpi_gbl_osi_mutex); 103 status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
98 if (ACPI_FAILURE(status)) { 104 if (ACPI_FAILURE(status)) {
99 return_ACPI_STATUS(status); 105 return_ACPI_STATUS(status);
@@ -136,6 +142,7 @@ void acpi_ut_mutex_terminate(void)
136 142
137 acpi_os_delete_lock(acpi_gbl_gpe_lock); 143 acpi_os_delete_lock(acpi_gbl_gpe_lock);
138 acpi_os_delete_lock(acpi_gbl_hardware_lock); 144 acpi_os_delete_lock(acpi_gbl_hardware_lock);
145 acpi_os_delete_lock(acpi_gbl_reference_count_lock);
139 146
140 /* Delete the reader/writer lock */ 147 /* Delete the reader/writer lock */
141 148
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 36a7d361d7cb..b15acebb96a1 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -108,9 +108,14 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
108 108
109acpi_status acpi_ut_initialize_interfaces(void) 109acpi_status acpi_ut_initialize_interfaces(void)
110{ 110{
111 acpi_status status;
111 u32 i; 112 u32 i;
112 113
113 (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); 114 status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
115 if (ACPI_FAILURE(status)) {
116 return (status);
117 }
118
114 acpi_gbl_supported_interfaces = acpi_default_supported_interfaces; 119 acpi_gbl_supported_interfaces = acpi_default_supported_interfaces;
115 120
116 /* Link the static list of supported interfaces */ 121 /* Link the static list of supported interfaces */
@@ -132,20 +137,24 @@ acpi_status acpi_ut_initialize_interfaces(void)
132 * 137 *
133 * PARAMETERS: None 138 * PARAMETERS: None
134 * 139 *
135 * RETURN: None 140 * RETURN: Status
136 * 141 *
137 * DESCRIPTION: Delete all interfaces in the global list. Sets 142 * DESCRIPTION: Delete all interfaces in the global list. Sets
138 * acpi_gbl_supported_interfaces to NULL. 143 * acpi_gbl_supported_interfaces to NULL.
139 * 144 *
140 ******************************************************************************/ 145 ******************************************************************************/
141 146
142void acpi_ut_interface_terminate(void) 147acpi_status acpi_ut_interface_terminate(void)
143{ 148{
149 acpi_status status;
144 struct acpi_interface_info *next_interface; 150 struct acpi_interface_info *next_interface;
145 151
146 (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); 152 status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
147 next_interface = acpi_gbl_supported_interfaces; 153 if (ACPI_FAILURE(status)) {
154 return (status);
155 }
148 156
157 next_interface = acpi_gbl_supported_interfaces;
149 while (next_interface) { 158 while (next_interface) {
150 acpi_gbl_supported_interfaces = next_interface->next; 159 acpi_gbl_supported_interfaces = next_interface->next;
151 160
@@ -160,6 +169,7 @@ void acpi_ut_interface_terminate(void)
160 } 169 }
161 170
162 acpi_os_release_mutex(acpi_gbl_osi_mutex); 171 acpi_os_release_mutex(acpi_gbl_osi_mutex);
172 return (AE_OK);
163} 173}
164 174
165/******************************************************************************* 175/*******************************************************************************
@@ -315,6 +325,7 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state * walk_state)
315 union acpi_operand_object *return_desc; 325 union acpi_operand_object *return_desc;
316 struct acpi_interface_info *interface_info; 326 struct acpi_interface_info *interface_info;
317 acpi_interface_handler interface_handler; 327 acpi_interface_handler interface_handler;
328 acpi_status status;
318 u32 return_value; 329 u32 return_value;
319 330
320 ACPI_FUNCTION_TRACE(ut_osi_implementation); 331 ACPI_FUNCTION_TRACE(ut_osi_implementation);
@@ -336,7 +347,10 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state * walk_state)
336 /* Default return value is 0, NOT SUPPORTED */ 347 /* Default return value is 0, NOT SUPPORTED */
337 348
338 return_value = 0; 349 return_value = 0;
339 (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); 350 status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
351 if (ACPI_FAILURE(status)) {
352 return (status);
353 }
340 354
341 /* Lookup the interface in the global _OSI list */ 355 /* Lookup the interface in the global _OSI list */
342 356
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
new file mode 100644
index 000000000000..29459479148f
--- /dev/null
+++ b/drivers/acpi/acpica/utpredef.c
@@ -0,0 +1,399 @@
1/******************************************************************************
2 *
3 * Module Name: utpredef - support functions for predefined names
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acpredef.h"
47
48#define _COMPONENT ACPI_UTILITIES
49ACPI_MODULE_NAME("utpredef")
50
51/*
52 * Names for the types that can be returned by the predefined objects.
53 * Used for warning messages. Must be in the same order as the ACPI_RTYPEs
54 */
55static const char *ut_rtype_names[] = {
56 "/Integer",
57 "/String",
58 "/Buffer",
59 "/Package",
60 "/Reference",
61};
62
63/*******************************************************************************
64 *
65 * FUNCTION: acpi_ut_get_next_predefined_method
66 *
67 * PARAMETERS: this_name - Entry in the predefined method/name table
68 *
69 * RETURN: Pointer to next entry in predefined table.
70 *
71 * DESCRIPTION: Get the next entry in the predefine method table. Handles the
72 * cases where a package info entry follows a method name that
73 * returns a package.
74 *
75 ******************************************************************************/
76
77const union acpi_predefined_info *acpi_ut_get_next_predefined_method(const union
78 acpi_predefined_info
79 *this_name)
80{
81
82 /*
83 * Skip next entry in the table if this name returns a Package
84 * (next entry contains the package info)
85 */
86 if ((this_name->info.expected_btypes & ACPI_RTYPE_PACKAGE) &&
87 (this_name->info.expected_btypes != ACPI_RTYPE_ALL)) {
88 this_name++;
89 }
90
91 this_name++;
92 return (this_name);
93}
94
95/*******************************************************************************
96 *
97 * FUNCTION: acpi_ut_match_predefined_method
98 *
99 * PARAMETERS: name - Name to find
100 *
101 * RETURN: Pointer to entry in predefined table. NULL indicates not found.
102 *
103 * DESCRIPTION: Check an object name against the predefined object list.
104 *
105 ******************************************************************************/
106
107const union acpi_predefined_info *acpi_ut_match_predefined_method(char *name)
108{
109 const union acpi_predefined_info *this_name;
110
111 /* Quick check for a predefined name, first character must be underscore */
112
113 if (name[0] != '_') {
114 return (NULL);
115 }
116
117 /* Search info table for a predefined method/object name */
118
119 this_name = acpi_gbl_predefined_methods;
120 while (this_name->info.name[0]) {
121 if (ACPI_COMPARE_NAME(name, this_name->info.name)) {
122 return (this_name);
123 }
124
125 this_name = acpi_ut_get_next_predefined_method(this_name);
126 }
127
128 return (NULL); /* Not found */
129}
130
131/*******************************************************************************
132 *
133 * FUNCTION: acpi_ut_get_expected_return_types
134 *
135 * PARAMETERS: buffer - Where the formatted string is returned
136 * expected_Btypes - Bitfield of expected data types
137 *
138 * RETURN: Formatted string in Buffer.
139 *
140 * DESCRIPTION: Format the expected object types into a printable string.
141 *
142 ******************************************************************************/
143
144void acpi_ut_get_expected_return_types(char *buffer, u32 expected_btypes)
145{
146 u32 this_rtype;
147 u32 i;
148 u32 j;
149
150 j = 1;
151 buffer[0] = 0;
152 this_rtype = ACPI_RTYPE_INTEGER;
153
154 for (i = 0; i < ACPI_NUM_RTYPES; i++) {
155
156 /* If one of the expected types, concatenate the name of this type */
157
158 if (expected_btypes & this_rtype) {
159 ACPI_STRCAT(buffer, &ut_rtype_names[i][j]);
160 j = 0; /* Use name separator from now on */
161 }
162
163 this_rtype <<= 1; /* Next Rtype */
164 }
165}
166
167/*******************************************************************************
168 *
169 * The remaining functions are used by iASL and acpi_help only
170 *
171 ******************************************************************************/
172
173#if (defined ACPI_ASL_COMPILER || defined ACPI_HELP_APP)
174#include <stdio.h>
175#include <string.h>
176
177/* Local prototypes */
178
179static u32 acpi_ut_get_argument_types(char *buffer, u16 argument_types);
180
181/* Types that can be returned externally by a predefined name */
182
183static const char *ut_external_type_names[] = /* Indexed by ACPI_TYPE_* */
184{
185 ", UNSUPPORTED-TYPE",
186 ", Integer",
187 ", String",
188 ", Buffer",
189 ", Package"
190};
191
192/* Bit widths for resource descriptor predefined names */
193
194static const char *ut_resource_type_names[] = {
195 "/1",
196 "/2",
197 "/3",
198 "/8",
199 "/16",
200 "/32",
201 "/64",
202 "/variable",
203};
204
205/*******************************************************************************
206 *
207 * FUNCTION: acpi_ut_match_resource_name
208 *
209 * PARAMETERS: name - Name to find
210 *
211 * RETURN: Pointer to entry in the resource table. NULL indicates not
212 * found.
213 *
214 * DESCRIPTION: Check an object name against the predefined resource
215 * descriptor object list.
216 *
217 ******************************************************************************/
218
219const union acpi_predefined_info *acpi_ut_match_resource_name(char *name)
220{
221 const union acpi_predefined_info *this_name;
222
223 /* Quick check for a predefined name, first character must be underscore */
224
225 if (name[0] != '_') {
226 return (NULL);
227 }
228
229 /* Search info table for a predefined method/object name */
230
231 this_name = acpi_gbl_resource_names;
232 while (this_name->info.name[0]) {
233 if (ACPI_COMPARE_NAME(name, this_name->info.name)) {
234 return (this_name);
235 }
236
237 this_name++;
238 }
239
240 return (NULL); /* Not found */
241}
242
243/*******************************************************************************
244 *
245 * FUNCTION: acpi_ut_display_predefined_method
246 *
247 * PARAMETERS: buffer - Scratch buffer for this function
248 * this_name - Entry in the predefined method/name table
249 * multi_line - TRUE if output should be on >1 line
250 *
251 * RETURN: None
252 *
253 * DESCRIPTION: Display information about a predefined method. Number and
254 * type of the input arguments, and expected type(s) for the
255 * return value, if any.
256 *
257 ******************************************************************************/
258
259void
260acpi_ut_display_predefined_method(char *buffer,
261 const union acpi_predefined_info *this_name,
262 u8 multi_line)
263{
264 u32 arg_count;
265
266 /*
267 * Get the argument count and the string buffer
268 * containing all argument types
269 */
270 arg_count = acpi_ut_get_argument_types(buffer,
271 this_name->info.argument_list);
272
273 if (multi_line) {
274 printf(" ");
275 }
276
277 printf("%4.4s Requires %s%u argument%s",
278 this_name->info.name,
279 (this_name->info.argument_list & ARG_COUNT_IS_MINIMUM) ?
280 "(at least) " : "", arg_count, arg_count != 1 ? "s" : "");
281
282 /* Display the types for any arguments */
283
284 if (arg_count > 0) {
285 printf(" (%s)", buffer);
286 }
287
288 if (multi_line) {
289 printf("\n ");
290 }
291
292 /* Get the return value type(s) allowed */
293
294 if (this_name->info.expected_btypes) {
295 acpi_ut_get_expected_return_types(buffer,
296 this_name->info.
297 expected_btypes);
298 printf(" Return value types: %s\n", buffer);
299 } else {
300 printf(" No return value\n");
301 }
302}
303
304/*******************************************************************************
305 *
306 * FUNCTION: acpi_ut_get_argument_types
307 *
308 * PARAMETERS: buffer - Where to return the formatted types
309 * argument_types - Types field for this method
310 *
311 * RETURN: count - the number of arguments required for this method
312 *
313 * DESCRIPTION: Format the required data types for this method (Integer,
314 * String, Buffer, or Package) and return the required argument
315 * count.
316 *
317 ******************************************************************************/
318
319static u32 acpi_ut_get_argument_types(char *buffer, u16 argument_types)
320{
321 u16 this_argument_type;
322 u16 sub_index;
323 u16 arg_count;
324 u32 i;
325
326 *buffer = 0;
327 sub_index = 2;
328
329 /* First field in the types list is the count of args to follow */
330
331 arg_count = (argument_types & METHOD_ARG_MASK);
332 argument_types >>= METHOD_ARG_BIT_WIDTH;
333
334 if (arg_count > METHOD_PREDEF_ARGS_MAX) {
335 printf("**** Invalid argument count (%u) "
336 "in predefined info structure\n", arg_count);
337 return (arg_count);
338 }
339
340 /* Get each argument from the list, convert to ascii, store to buffer */
341
342 for (i = 0; i < arg_count; i++) {
343 this_argument_type = (argument_types & METHOD_ARG_MASK);
344 if (!this_argument_type
345 || (this_argument_type > METHOD_MAX_ARG_TYPE)) {
346 printf("**** Invalid argument type (%u) "
347 "in predefined info structure\n",
348 this_argument_type);
349 return (arg_count);
350 }
351
352 strcat(buffer,
353 ut_external_type_names[this_argument_type] + sub_index);
354
355 /* Shift to next argument type field */
356
357 argument_types >>= METHOD_ARG_BIT_WIDTH;
358 sub_index = 0;
359 }
360
361 return (arg_count);
362}
363
364/*******************************************************************************
365 *
366 * FUNCTION: acpi_ut_get_resource_bit_width
367 *
368 * PARAMETERS: buffer - Where the formatted string is returned
369 * types - Bitfield of expected data types
370 *
371 * RETURN: Count of return types. Formatted string in Buffer.
372 *
373 * DESCRIPTION: Format the resource bit widths into a printable string.
374 *
375 ******************************************************************************/
376
377u32 acpi_ut_get_resource_bit_width(char *buffer, u16 types)
378{
379 u32 i;
380 u16 sub_index;
381 u32 found;
382
383 *buffer = 0;
384 sub_index = 1;
385 found = 0;
386
387 for (i = 0; i < NUM_RESOURCE_WIDTHS; i++) {
388 if (types & 1) {
389 strcat(buffer, &(ut_resource_type_names[i][sub_index]));
390 sub_index = 0;
391 found++;
392 }
393
394 types >>= 1;
395 }
396
397 return (found);
398}
399#endif
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 48efb446258c..6505774f223e 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -287,7 +287,10 @@ acpi_status acpi_install_interface(acpi_string interface_name)
287 return (AE_BAD_PARAMETER); 287 return (AE_BAD_PARAMETER);
288 } 288 }
289 289
290 (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); 290 status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
291 if (ACPI_FAILURE(status)) {
292 return (status);
293 }
291 294
292 /* Check if the interface name is already in the global list */ 295 /* Check if the interface name is already in the global list */
293 296
@@ -336,7 +339,10 @@ acpi_status acpi_remove_interface(acpi_string interface_name)
336 return (AE_BAD_PARAMETER); 339 return (AE_BAD_PARAMETER);
337 } 340 }
338 341
339 (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); 342 status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
343 if (ACPI_FAILURE(status)) {
344 return (status);
345 }
340 346
341 status = acpi_ut_remove_interface(interface_name); 347 status = acpi_ut_remove_interface(interface_name);
342 348
@@ -362,9 +368,12 @@ ACPI_EXPORT_SYMBOL(acpi_remove_interface)
362 ****************************************************************************/ 368 ****************************************************************************/
363acpi_status acpi_install_interface_handler(acpi_interface_handler handler) 369acpi_status acpi_install_interface_handler(acpi_interface_handler handler)
364{ 370{
365 acpi_status status = AE_OK; 371 acpi_status status;
366 372
367 (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); 373 status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
374 if (ACPI_FAILURE(status)) {
375 return (status);
376 }
368 377
369 if (handler && acpi_gbl_interface_handler) { 378 if (handler && acpi_gbl_interface_handler) {
370 status = AE_ALREADY_EXISTS; 379 status = AE_ALREADY_EXISTS;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index c5cd5b5513e6..0cc384b72943 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -146,7 +146,7 @@ struct acpi_battery {
146 146
147#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat) 147#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat)
148 148
149inline int acpi_battery_present(struct acpi_battery *battery) 149static inline int acpi_battery_present(struct acpi_battery *battery)
150{ 150{
151 return battery->device->status.battery_present; 151 return battery->device->status.battery_present;
152} 152}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 01708a165368..292de3cab9cc 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -288,13 +288,12 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
288 } 288 }
289out_success: 289out_success:
290 context->ret.length = out_obj->buffer.length; 290 context->ret.length = out_obj->buffer.length;
291 context->ret.pointer = kmalloc(context->ret.length, GFP_KERNEL); 291 context->ret.pointer = kmemdup(out_obj->buffer.pointer,
292 context->ret.length, GFP_KERNEL);
292 if (!context->ret.pointer) { 293 if (!context->ret.pointer) {
293 status = AE_NO_MEMORY; 294 status = AE_NO_MEMORY;
294 goto out_kfree; 295 goto out_kfree;
295 } 296 }
296 memcpy(context->ret.pointer, out_obj->buffer.pointer,
297 context->ret.length);
298 status = AE_OK; 297 status = AE_OK;
299 298
300out_kfree: 299out_kfree:
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 86c7d5445c38..92a659aa6396 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -33,6 +33,7 @@
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <acpi/acpi_bus.h> 34#include <acpi/acpi_bus.h>
35#include <acpi/acpi_drivers.h> 35#include <acpi/acpi_drivers.h>
36#include <acpi/button.h>
36 37
37#define PREFIX "ACPI: " 38#define PREFIX "ACPI: "
38 39
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 5523ba7d764d..e23151667655 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * acpi_container.c - ACPI Generic Container Driver 2 * container.c - ACPI Generic Container Driver
3 * ($Revision: )
4 * 3 *
5 * Copyright (C) 2004 Anil S Keshavamurthy (anil.s.keshavamurthy@intel.com) 4 * Copyright (C) 2004 Anil S Keshavamurthy (anil.s.keshavamurthy@intel.com)
6 * Copyright (C) 2004 Keiichiro Tokunaga (tokunaga.keiich@jp.fujitsu.com) 5 * Copyright (C) 2004 Keiichiro Tokunaga (tokunaga.keiich@jp.fujitsu.com)
7 * Copyright (C) 2004 Motoyuki Ito (motoyuki@soft.fujitsu.com) 6 * Copyright (C) 2004 Motoyuki Ito (motoyuki@soft.fujitsu.com)
8 * Copyright (C) 2004 Intel Corp.
9 * Copyright (C) 2004 FUJITSU LIMITED 7 * Copyright (C) 2004 FUJITSU LIMITED
8 * Copyright (C) 2004, 2013 Intel Corp.
9 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
10 * 10 *
11 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12 * 12 *
@@ -26,14 +26,11 @@
26 * 26 *
27 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
28 */ 28 */
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/slab.h>
33#include <linux/types.h>
34#include <linux/acpi.h> 29#include <linux/acpi.h>
35#include <acpi/acpi_bus.h> 30
36#include <acpi/acpi_drivers.h> 31#include "internal.h"
32
33#include "internal.h"
37 34
38#define PREFIX "ACPI: " 35#define PREFIX "ACPI: "
39 36
@@ -50,141 +47,20 @@ static const struct acpi_device_id container_device_ids[] = {
50static int container_device_attach(struct acpi_device *device, 47static int container_device_attach(struct acpi_device *device,
51 const struct acpi_device_id *not_used) 48 const struct acpi_device_id *not_used)
52{ 49{
53 /* 50 /* This is necessary for container hotplug to work. */
54 * FIXME: This is necessary, so that acpi_eject_store() doesn't return
55 * -ENODEV for containers.
56 */
57 return 1; 51 return 1;
58} 52}
59 53
60static struct acpi_scan_handler container_device_handler = { 54static struct acpi_scan_handler container_handler = {
61 .ids = container_device_ids, 55 .ids = container_device_ids,
62 .attach = container_device_attach, 56 .attach = container_device_attach,
57 .hotplug = {
58 .enabled = true,
59 .mode = AHM_CONTAINER,
60 },
63}; 61};
64 62
65static int is_device_present(acpi_handle handle)
66{
67 acpi_handle temp;
68 acpi_status status;
69 unsigned long long sta;
70
71
72 status = acpi_get_handle(handle, "_STA", &temp);
73 if (ACPI_FAILURE(status))
74 return 1; /* _STA not found, assume device present */
75
76 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
77 if (ACPI_FAILURE(status))
78 return 0; /* Firmware error */
79
80 return ((sta & ACPI_STA_DEVICE_PRESENT) == ACPI_STA_DEVICE_PRESENT);
81}
82
83static void container_notify_cb(acpi_handle handle, u32 type, void *context)
84{
85 struct acpi_device *device = NULL;
86 int result;
87 int present;
88 acpi_status status;
89 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
90
91 acpi_scan_lock_acquire();
92
93 switch (type) {
94 case ACPI_NOTIFY_BUS_CHECK:
95 /* Fall through */
96 case ACPI_NOTIFY_DEVICE_CHECK:
97 pr_debug("Container driver received %s event\n",
98 (type == ACPI_NOTIFY_BUS_CHECK) ?
99 "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK");
100
101 present = is_device_present(handle);
102 status = acpi_bus_get_device(handle, &device);
103 if (!present) {
104 if (ACPI_SUCCESS(status)) {
105 /* device exist and this is a remove request */
106 device->flags.eject_pending = 1;
107 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
108 goto out;
109 }
110 break;
111 }
112
113 if (!ACPI_FAILURE(status) || device)
114 break;
115
116 result = acpi_bus_scan(handle);
117 if (result) {
118 acpi_handle_warn(handle, "Failed to add container\n");
119 break;
120 }
121 result = acpi_bus_get_device(handle, &device);
122 if (result) {
123 acpi_handle_warn(handle, "Missing device object\n");
124 break;
125 }
126
127 kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
128 ost_code = ACPI_OST_SC_SUCCESS;
129 break;
130
131 case ACPI_NOTIFY_EJECT_REQUEST:
132 if (!acpi_bus_get_device(handle, &device) && device) {
133 device->flags.eject_pending = 1;
134 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
135 goto out;
136 }
137 break;
138
139 default:
140 /* non-hotplug event; possibly handled by other handler */
141 goto out;
142 }
143
144 /* Inform firmware that the hotplug operation has completed */
145 (void) acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
146
147 out:
148 acpi_scan_lock_release();
149}
150
151static bool is_container(acpi_handle handle)
152{
153 struct acpi_device_info *info;
154 bool ret = false;
155
156 if (ACPI_FAILURE(acpi_get_object_info(handle, &info)))
157 return false;
158
159 if (info->valid & ACPI_VALID_HID) {
160 const struct acpi_device_id *id;
161
162 for (id = container_device_ids; id->id[0]; id++) {
163 ret = !strcmp((char *)id->id, info->hardware_id.string);
164 if (ret)
165 break;
166 }
167 }
168 kfree(info);
169 return ret;
170}
171
172static acpi_status acpi_container_register_notify_handler(acpi_handle handle,
173 u32 lvl, void *ctxt,
174 void **retv)
175{
176 if (is_container(handle))
177 acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
178 container_notify_cb, NULL);
179
180 return AE_OK;
181}
182
183void __init acpi_container_init(void) 63void __init acpi_container_init(void)
184{ 64{
185 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, 65 acpi_scan_add_handler_with_hotplug(&container_handler, "container");
186 acpi_container_register_notify_handler, NULL,
187 NULL, NULL);
188
189 acpi_scan_add_handler(&container_device_handler);
190} 66}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index dd314ef9bff1..96de787e6104 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -145,27 +145,36 @@ int acpi_device_get_power(struct acpi_device *device, int *state)
145 } 145 }
146 146
147 /* 147 /*
148 * Get the device's power state either directly (via _PSC) or 148 * Get the device's power state from power resources settings and _PSC,
149 * indirectly (via power resources). 149 * if available.
150 */ 150 */
151 if (device->power.flags.power_resources) {
152 int error = acpi_power_get_inferred_state(device, &result);
153 if (error)
154 return error;
155 }
151 if (device->power.flags.explicit_get) { 156 if (device->power.flags.explicit_get) {
157 acpi_handle handle = device->handle;
152 unsigned long long psc; 158 unsigned long long psc;
153 acpi_status status = acpi_evaluate_integer(device->handle, 159 acpi_status status;
154 "_PSC", NULL, &psc); 160
161 status = acpi_evaluate_integer(handle, "_PSC", NULL, &psc);
155 if (ACPI_FAILURE(status)) 162 if (ACPI_FAILURE(status))
156 return -ENODEV; 163 return -ENODEV;
157 164
158 result = psc; 165 /*
159 } 166 * The power resources settings may indicate a power state
160 /* The test below covers ACPI_STATE_UNKNOWN too. */ 167 * shallower than the actual power state of the device.
161 if (result <= ACPI_STATE_D2) { 168 *
162 ; /* Do nothing. */ 169 * Moreover, on systems predating ACPI 4.0, if the device
163 } else if (device->power.flags.power_resources) { 170 * doesn't depend on any power resources and _PSC returns 3,
164 int error = acpi_power_get_inferred_state(device, &result); 171 * that means "power off". We need to maintain compatibility
165 if (error) 172 * with those systems.
166 return error; 173 */
167 } else if (result == ACPI_STATE_D3_HOT) { 174 if (psc > result && psc < ACPI_STATE_D3_COLD)
168 result = ACPI_STATE_D3; 175 result = psc;
176 else if (result == ACPI_STATE_UNKNOWN)
177 result = psc > ACPI_STATE_D2 ? ACPI_STATE_D3_COLD : psc;
169 } 178 }
170 179
171 /* 180 /*
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index f815da82c765..8d1c0105e113 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -174,9 +174,13 @@ static int acpi_fan_add(struct acpi_device *device)
174 174
175static int acpi_fan_remove(struct acpi_device *device) 175static int acpi_fan_remove(struct acpi_device *device)
176{ 176{
177 struct thermal_cooling_device *cdev = acpi_driver_data(device); 177 struct thermal_cooling_device *cdev;
178
179 if (!device)
180 return -EINVAL;
178 181
179 if (!device || !cdev) 182 cdev = acpi_driver_data(device);
183 if (!cdev)
180 return -EINVAL; 184 return -EINVAL;
181 185
182 sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); 186 sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 3c94a732b4b3..6f1afd9118c8 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -41,6 +41,17 @@ void acpi_container_init(void);
41#else 41#else
42static inline void acpi_container_init(void) {} 42static inline void acpi_container_init(void) {}
43#endif 43#endif
44#ifdef CONFIG_ACPI_HOTPLUG_MEMORY
45void acpi_memory_hotplug_init(void);
46#else
47static inline void acpi_memory_hotplug_init(void) {}
48#endif
49
50void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
51 const char *name);
52int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
53 const char *hotplug_profile_name);
54void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val);
44 55
45#ifdef CONFIG_DEBUG_FS 56#ifdef CONFIG_DEBUG_FS
46extern struct dentry *acpi_debugfs_dir; 57extern struct dentry *acpi_debugfs_dir;
@@ -48,6 +59,11 @@ int acpi_debugfs_init(void);
48#else 59#else
49static inline void acpi_debugfs_init(void) { return; } 60static inline void acpi_debugfs_init(void) { return; }
50#endif 61#endif
62#ifdef CONFIG_X86_INTEL_LPSS
63void acpi_lpss_init(void);
64#else
65static inline void acpi_lpss_init(void) {}
66#endif
51 67
52/* -------------------------------------------------------------------------- 68/* --------------------------------------------------------------------------
53 Device Node Initialization / Removal 69 Device Node Initialization / Removal
@@ -60,7 +76,7 @@ int acpi_device_add(struct acpi_device *device,
60void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, 76void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
61 int type, unsigned long long sta); 77 int type, unsigned long long sta);
62void acpi_device_add_finalize(struct acpi_device *device); 78void acpi_device_add_finalize(struct acpi_device *device);
63void acpi_free_ids(struct acpi_device *device); 79void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
64 80
65/* -------------------------------------------------------------------------- 81/* --------------------------------------------------------------------------
66 Power Resource 82 Power Resource
@@ -131,4 +147,7 @@ static inline void suspend_nvs_restore(void) {}
131 -------------------------------------------------------------------------- */ 147 -------------------------------------------------------------------------- */
132struct platform_device; 148struct platform_device;
133 149
150int acpi_create_platform_device(struct acpi_device *adev,
151 const struct acpi_device_id *id);
152
134#endif /* _ACPI_INTERNAL_H_ */ 153#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 586e7e993d3d..e72186340fec 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -641,7 +641,7 @@ void __init acpi_initrd_override(void *data, size_t size)
641 * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area) 641 * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
642 * works fine. 642 * works fine.
643 */ 643 */
644 memblock_reserve(acpi_tables_addr, acpi_tables_addr + all_tables_size); 644 memblock_reserve(acpi_tables_addr, all_tables_size);
645 arch_reserve_mem_area(acpi_tables_addr, all_tables_size); 645 arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
646 646
647 p = early_ioremap(acpi_tables_addr, all_tables_size); 647 p = early_ioremap(acpi_tables_addr, all_tables_size);
@@ -1555,7 +1555,7 @@ int acpi_check_resource_conflict(const struct resource *res)
1555 else 1555 else
1556 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY; 1556 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1557 1557
1558 length = res->end - res->start + 1; 1558 length = resource_size(res);
1559 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) 1559 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1560 warn = 1; 1560 warn = 1;
1561 clash = acpi_check_address_range(space_id, res->start, length, warn); 1561 clash = acpi_check_address_range(space_id, res->start, length, warn);
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index ab764ed34a50..2652a614deeb 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -354,6 +354,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
354 354
355 } 355 }
356 resource->end.type = ACPI_RESOURCE_TYPE_END_TAG; 356 resource->end.type = ACPI_RESOURCE_TYPE_END_TAG;
357 resource->end.length = sizeof(struct acpi_resource);
357 358
358 /* Attempt to set the resource */ 359 /* Attempt to set the resource */
359 status = acpi_set_current_resources(link->device->handle, &buffer); 360 status = acpi_set_current_resources(link->device->handle, &buffer);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index ac8688b89705..1dd6f6c85874 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -169,8 +169,8 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
169 *control &= OSC_PCI_CONTROL_MASKS; 169 *control &= OSC_PCI_CONTROL_MASKS;
170 capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set; 170 capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
171 } else { 171 } else {
172 /* Run _OSC query for all possible controls. */ 172 /* Run _OSC query only with existing controls. */
173 capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS; 173 capbuf[OSC_CONTROL_TYPE] = root->osc_control_set;
174 } 174 }
175 175
176 status = acpi_pci_run_osc(root->device->handle, capbuf, &result); 176 status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 34f5ef11d427..f962047c6c85 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -459,57 +459,79 @@ static struct attribute_group attr_groups[] = {
459 }, 459 },
460}; 460};
461 461
462static void acpi_power_hide_list(struct acpi_device *adev, int state) 462static struct attribute_group wakeup_attr_group = {
463 .name = "power_resources_wakeup",
464 .attrs = attrs,
465};
466
467static void acpi_power_hide_list(struct acpi_device *adev,
468 struct list_head *resources,
469 struct attribute_group *attr_group)
463{ 470{
464 struct acpi_device_power_state *ps = &adev->power.states[state];
465 struct acpi_power_resource_entry *entry; 471 struct acpi_power_resource_entry *entry;
466 472
467 if (list_empty(&ps->resources)) 473 if (list_empty(resources))
468 return; 474 return;
469 475
470 list_for_each_entry_reverse(entry, &ps->resources, node) { 476 list_for_each_entry_reverse(entry, resources, node) {
471 struct acpi_device *res_dev = &entry->resource->device; 477 struct acpi_device *res_dev = &entry->resource->device;
472 478
473 sysfs_remove_link_from_group(&adev->dev.kobj, 479 sysfs_remove_link_from_group(&adev->dev.kobj,
474 attr_groups[state].name, 480 attr_group->name,
475 dev_name(&res_dev->dev)); 481 dev_name(&res_dev->dev));
476 } 482 }
477 sysfs_remove_group(&adev->dev.kobj, &attr_groups[state]); 483 sysfs_remove_group(&adev->dev.kobj, attr_group);
478} 484}
479 485
480static void acpi_power_expose_list(struct acpi_device *adev, int state) 486static void acpi_power_expose_list(struct acpi_device *adev,
487 struct list_head *resources,
488 struct attribute_group *attr_group)
481{ 489{
482 struct acpi_device_power_state *ps = &adev->power.states[state];
483 struct acpi_power_resource_entry *entry; 490 struct acpi_power_resource_entry *entry;
484 int ret; 491 int ret;
485 492
486 if (list_empty(&ps->resources)) 493 if (list_empty(resources))
487 return; 494 return;
488 495
489 ret = sysfs_create_group(&adev->dev.kobj, &attr_groups[state]); 496 ret = sysfs_create_group(&adev->dev.kobj, attr_group);
490 if (ret) 497 if (ret)
491 return; 498 return;
492 499
493 list_for_each_entry(entry, &ps->resources, node) { 500 list_for_each_entry(entry, resources, node) {
494 struct acpi_device *res_dev = &entry->resource->device; 501 struct acpi_device *res_dev = &entry->resource->device;
495 502
496 ret = sysfs_add_link_to_group(&adev->dev.kobj, 503 ret = sysfs_add_link_to_group(&adev->dev.kobj,
497 attr_groups[state].name, 504 attr_group->name,
498 &res_dev->dev.kobj, 505 &res_dev->dev.kobj,
499 dev_name(&res_dev->dev)); 506 dev_name(&res_dev->dev));
500 if (ret) { 507 if (ret) {
501 acpi_power_hide_list(adev, state); 508 acpi_power_hide_list(adev, resources, attr_group);
502 break; 509 break;
503 } 510 }
504 } 511 }
505} 512}
506 513
514static void acpi_power_expose_hide(struct acpi_device *adev,
515 struct list_head *resources,
516 struct attribute_group *attr_group,
517 bool expose)
518{
519 if (expose)
520 acpi_power_expose_list(adev, resources, attr_group);
521 else
522 acpi_power_hide_list(adev, resources, attr_group);
523}
524
507void acpi_power_add_remove_device(struct acpi_device *adev, bool add) 525void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
508{ 526{
509 struct acpi_device_power_state *ps; 527 struct acpi_device_power_state *ps;
510 struct acpi_power_resource_entry *entry; 528 struct acpi_power_resource_entry *entry;
511 int state; 529 int state;
512 530
531 if (adev->wakeup.flags.valid)
532 acpi_power_expose_hide(adev, &adev->wakeup.resources,
533 &wakeup_attr_group, add);
534
513 if (!adev->power.flags.power_resources) 535 if (!adev->power.flags.power_resources)
514 return; 536 return;
515 537
@@ -523,12 +545,10 @@ void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
523 acpi_power_remove_dependent(resource, adev); 545 acpi_power_remove_dependent(resource, adev);
524 } 546 }
525 547
526 for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++) { 548 for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++)
527 if (add) 549 acpi_power_expose_hide(adev,
528 acpi_power_expose_list(adev, state); 550 &adev->power.states[state].resources,
529 else 551 &attr_groups[state], add);
530 acpi_power_hide_list(adev, state);
531 }
532} 552}
533 553
534int acpi_power_wakeup_list_init(struct list_head *list, int *system_level_p) 554int acpi_power_wakeup_list_init(struct list_head *list, int *system_level_p)
@@ -824,7 +844,7 @@ static void acpi_release_power_resource(struct device *dev)
824 list_del(&resource->list_node); 844 list_del(&resource->list_node);
825 mutex_unlock(&power_resource_list_lock); 845 mutex_unlock(&power_resource_list_lock);
826 846
827 acpi_free_ids(device); 847 acpi_free_pnp_ids(&device->pnp);
828 kfree(resource); 848 kfree(resource);
829} 849}
830 850
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index ee255c60bdac..f0df2c9434d2 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -918,7 +918,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
918struct cpuidle_driver acpi_idle_driver = { 918struct cpuidle_driver acpi_idle_driver = {
919 .name = "acpi_idle", 919 .name = "acpi_idle",
920 .owner = THIS_MODULE, 920 .owner = THIS_MODULE,
921 .en_core_tk_irqen = 1,
922}; 921};
923 922
924/** 923/**
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 641b5450a0db..e8e652710e65 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -218,9 +218,13 @@ processor_get_max_state(struct thermal_cooling_device *cdev,
218 unsigned long *state) 218 unsigned long *state)
219{ 219{
220 struct acpi_device *device = cdev->devdata; 220 struct acpi_device *device = cdev->devdata;
221 struct acpi_processor *pr = acpi_driver_data(device); 221 struct acpi_processor *pr;
222 222
223 if (!device || !pr) 223 if (!device)
224 return -EINVAL;
225
226 pr = acpi_driver_data(device);
227 if (!pr)
224 return -EINVAL; 228 return -EINVAL;
225 229
226 *state = acpi_processor_max_state(pr); 230 *state = acpi_processor_max_state(pr);
@@ -232,9 +236,13 @@ processor_get_cur_state(struct thermal_cooling_device *cdev,
232 unsigned long *cur_state) 236 unsigned long *cur_state)
233{ 237{
234 struct acpi_device *device = cdev->devdata; 238 struct acpi_device *device = cdev->devdata;
235 struct acpi_processor *pr = acpi_driver_data(device); 239 struct acpi_processor *pr;
236 240
237 if (!device || !pr) 241 if (!device)
242 return -EINVAL;
243
244 pr = acpi_driver_data(device);
245 if (!pr)
238 return -EINVAL; 246 return -EINVAL;
239 247
240 *cur_state = cpufreq_get_cur_state(pr->id); 248 *cur_state = cpufreq_get_cur_state(pr->id);
@@ -248,11 +256,15 @@ processor_set_cur_state(struct thermal_cooling_device *cdev,
248 unsigned long state) 256 unsigned long state)
249{ 257{
250 struct acpi_device *device = cdev->devdata; 258 struct acpi_device *device = cdev->devdata;
251 struct acpi_processor *pr = acpi_driver_data(device); 259 struct acpi_processor *pr;
252 int result = 0; 260 int result = 0;
253 int max_pstate; 261 int max_pstate;
254 262
255 if (!device || !pr) 263 if (!device)
264 return -EINVAL;
265
266 pr = acpi_driver_data(device);
267 if (!pr)
256 return -EINVAL; 268 return -EINVAL;
257 269
258 max_pstate = cpufreq_get_max_state(pr->id); 270 max_pstate = cpufreq_get_max_state(pr->id);
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 1d02b7b5ade0..e7dd2c1fee79 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -211,9 +211,10 @@ err_ret:
211 */ 211 */
212void acpi_processor_throttling_init(void) 212void acpi_processor_throttling_init(void)
213{ 213{
214 if (acpi_processor_update_tsd_coord()) 214 if (acpi_processor_update_tsd_coord()) {
215 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 215 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
216 "Assume no T-state coordination\n")); 216 "Assume no T-state coordination\n"));
217 }
217 218
218 return; 219 return;
219} 220}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index f54d1985e594..fe158fd4f1df 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -63,6 +63,19 @@ int acpi_scan_add_handler(struct acpi_scan_handler *handler)
63 return 0; 63 return 0;
64} 64}
65 65
66int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
67 const char *hotplug_profile_name)
68{
69 int error;
70
71 error = acpi_scan_add_handler(handler);
72 if (error)
73 return error;
74
75 acpi_sysfs_add_hotplug_profile(&handler->hotplug, hotplug_profile_name);
76 return 0;
77}
78
66/* 79/*
67 * Creates hid/cid(s) string needed for modalias and uevent 80 * Creates hid/cid(s) string needed for modalias and uevent
68 * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get: 81 * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
@@ -107,32 +120,20 @@ acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, cha
107} 120}
108static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL); 121static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
109 122
110/** 123static int acpi_scan_hot_remove(struct acpi_device *device)
111 * acpi_bus_hot_remove_device: hot-remove a device and its children
112 * @context: struct acpi_eject_event pointer (freed in this func)
113 *
114 * Hot-remove a device and its children. This function frees up the
115 * memory space passed by arg context, so that the caller may call
116 * this function asynchronously through acpi_os_hotplug_execute().
117 */
118void acpi_bus_hot_remove_device(void *context)
119{ 124{
120 struct acpi_eject_event *ej_event = context;
121 struct acpi_device *device = ej_event->device;
122 acpi_handle handle = device->handle; 125 acpi_handle handle = device->handle;
123 acpi_handle temp; 126 acpi_handle not_used;
124 struct acpi_object_list arg_list; 127 struct acpi_object_list arg_list;
125 union acpi_object arg; 128 union acpi_object arg;
126 acpi_status status = AE_OK; 129 acpi_status status;
127 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */ 130 unsigned long long sta;
128
129 mutex_lock(&acpi_scan_lock);
130 131
131 /* If there is no handle, the device node has been unregistered. */ 132 /* If there is no handle, the device node has been unregistered. */
132 if (!device->handle) { 133 if (!handle) {
133 dev_dbg(&device->dev, "ACPI handle missing\n"); 134 dev_dbg(&device->dev, "ACPI handle missing\n");
134 put_device(&device->dev); 135 put_device(&device->dev);
135 goto out; 136 return -EINVAL;
136 } 137 }
137 138
138 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 139 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -143,7 +144,7 @@ void acpi_bus_hot_remove_device(void *context)
143 put_device(&device->dev); 144 put_device(&device->dev);
144 device = NULL; 145 device = NULL;
145 146
146 if (ACPI_SUCCESS(acpi_get_handle(handle, "_LCK", &temp))) { 147 if (ACPI_SUCCESS(acpi_get_handle(handle, "_LCK", &not_used))) {
147 arg_list.count = 1; 148 arg_list.count = 1;
148 arg_list.pointer = &arg; 149 arg_list.pointer = &arg;
149 arg.type = ACPI_TYPE_INTEGER; 150 arg.type = ACPI_TYPE_INTEGER;
@@ -161,18 +162,205 @@ void acpi_bus_hot_remove_device(void *context)
161 */ 162 */
162 status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL); 163 status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL);
163 if (ACPI_FAILURE(status)) { 164 if (ACPI_FAILURE(status)) {
164 if (status != AE_NOT_FOUND) 165 if (status == AE_NOT_FOUND) {
165 acpi_handle_warn(handle, "Eject failed\n"); 166 return -ENODEV;
167 } else {
168 acpi_handle_warn(handle, "Eject failed (0x%x)\n",
169 status);
170 return -EIO;
171 }
172 }
166 173
167 /* Tell the firmware the hot-remove operation has failed. */ 174 /*
168 acpi_evaluate_hotplug_ost(handle, ej_event->event, 175 * Verify if eject was indeed successful. If not, log an error
169 ost_code, NULL); 176 * message. No need to call _OST since _EJ0 call was made OK.
177 */
178 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
179 if (ACPI_FAILURE(status)) {
180 acpi_handle_warn(handle,
181 "Status check after eject failed (0x%x)\n", status);
182 } else if (sta & ACPI_STA_DEVICE_ENABLED) {
183 acpi_handle_warn(handle,
184 "Eject incomplete - status 0x%llx\n", sta);
185 }
186
187 return 0;
188}
189
190static void acpi_bus_device_eject(void *context)
191{
192 acpi_handle handle = context;
193 struct acpi_device *device = NULL;
194 struct acpi_scan_handler *handler;
195 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
196
197 mutex_lock(&acpi_scan_lock);
198
199 acpi_bus_get_device(handle, &device);
200 if (!device)
201 goto err_out;
202
203 handler = device->handler;
204 if (!handler || !handler->hotplug.enabled) {
205 ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
206 goto err_out;
207 }
208 acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST,
209 ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
210 if (handler->hotplug.mode == AHM_CONTAINER) {
211 device->flags.eject_pending = true;
212 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
213 } else {
214 int error;
215
216 get_device(&device->dev);
217 error = acpi_scan_hot_remove(device);
218 if (error)
219 goto err_out;
170 } 220 }
171 221
172 out: 222 out:
173 mutex_unlock(&acpi_scan_lock); 223 mutex_unlock(&acpi_scan_lock);
174 kfree(context);
175 return; 224 return;
225
226 err_out:
227 acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST, ost_code,
228 NULL);
229 goto out;
230}
231
232static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source)
233{
234 struct acpi_device *device = NULL;
235 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
236 int error;
237
238 mutex_lock(&acpi_scan_lock);
239
240 acpi_bus_get_device(handle, &device);
241 if (device) {
242 dev_warn(&device->dev, "Attempt to re-insert\n");
243 goto out;
244 }
245 acpi_evaluate_hotplug_ost(handle, ost_source,
246 ACPI_OST_SC_INSERT_IN_PROGRESS, NULL);
247 error = acpi_bus_scan(handle);
248 if (error) {
249 acpi_handle_warn(handle, "Namespace scan failure\n");
250 goto out;
251 }
252 error = acpi_bus_get_device(handle, &device);
253 if (error) {
254 acpi_handle_warn(handle, "Missing device node object\n");
255 goto out;
256 }
257 ost_code = ACPI_OST_SC_SUCCESS;
258 if (device->handler && device->handler->hotplug.mode == AHM_CONTAINER)
259 kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
260
261 out:
262 acpi_evaluate_hotplug_ost(handle, ost_source, ost_code, NULL);
263 mutex_unlock(&acpi_scan_lock);
264}
265
266static void acpi_scan_bus_check(void *context)
267{
268 acpi_scan_bus_device_check((acpi_handle)context,
269 ACPI_NOTIFY_BUS_CHECK);
270}
271
272static void acpi_scan_device_check(void *context)
273{
274 acpi_scan_bus_device_check((acpi_handle)context,
275 ACPI_NOTIFY_DEVICE_CHECK);
276}
277
278static void acpi_hotplug_unsupported(acpi_handle handle, u32 type)
279{
280 u32 ost_status;
281
282 switch (type) {
283 case ACPI_NOTIFY_BUS_CHECK:
284 acpi_handle_debug(handle,
285 "ACPI_NOTIFY_BUS_CHECK event: unsupported\n");
286 ost_status = ACPI_OST_SC_INSERT_NOT_SUPPORTED;
287 break;
288 case ACPI_NOTIFY_DEVICE_CHECK:
289 acpi_handle_debug(handle,
290 "ACPI_NOTIFY_DEVICE_CHECK event: unsupported\n");
291 ost_status = ACPI_OST_SC_INSERT_NOT_SUPPORTED;
292 break;
293 case ACPI_NOTIFY_EJECT_REQUEST:
294 acpi_handle_debug(handle,
295 "ACPI_NOTIFY_EJECT_REQUEST event: unsupported\n");
296 ost_status = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
297 break;
298 default:
299 /* non-hotplug event; possibly handled by other handler */
300 return;
301 }
302
303 acpi_evaluate_hotplug_ost(handle, type, ost_status, NULL);
304}
305
306static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
307{
308 acpi_osd_exec_callback callback;
309 struct acpi_scan_handler *handler = data;
310 acpi_status status;
311
312 if (!handler->hotplug.enabled)
313 return acpi_hotplug_unsupported(handle, type);
314
315 switch (type) {
316 case ACPI_NOTIFY_BUS_CHECK:
317 acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
318 callback = acpi_scan_bus_check;
319 break;
320 case ACPI_NOTIFY_DEVICE_CHECK:
321 acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
322 callback = acpi_scan_device_check;
323 break;
324 case ACPI_NOTIFY_EJECT_REQUEST:
325 acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
326 callback = acpi_bus_device_eject;
327 break;
328 default:
329 /* non-hotplug event; possibly handled by other handler */
330 return;
331 }
332 status = acpi_os_hotplug_execute(callback, handle);
333 if (ACPI_FAILURE(status))
334 acpi_evaluate_hotplug_ost(handle, type,
335 ACPI_OST_SC_NON_SPECIFIC_FAILURE,
336 NULL);
337}
338
339/**
340 * acpi_bus_hot_remove_device: hot-remove a device and its children
341 * @context: struct acpi_eject_event pointer (freed in this func)
342 *
343 * Hot-remove a device and its children. This function frees up the
344 * memory space passed by arg context, so that the caller may call
345 * this function asynchronously through acpi_os_hotplug_execute().
346 */
347void acpi_bus_hot_remove_device(void *context)
348{
349 struct acpi_eject_event *ej_event = context;
350 struct acpi_device *device = ej_event->device;
351 acpi_handle handle = device->handle;
352 int error;
353
354 mutex_lock(&acpi_scan_lock);
355
356 error = acpi_scan_hot_remove(device);
357 if (error && handle)
358 acpi_evaluate_hotplug_ost(handle, ej_event->event,
359 ACPI_OST_SC_NON_SPECIFIC_FAILURE,
360 NULL);
361
362 mutex_unlock(&acpi_scan_lock);
363 kfree(context);
176} 364}
177EXPORT_SYMBOL(acpi_bus_hot_remove_device); 365EXPORT_SYMBOL(acpi_bus_hot_remove_device);
178 366
@@ -206,51 +394,61 @@ static ssize_t
206acpi_eject_store(struct device *d, struct device_attribute *attr, 394acpi_eject_store(struct device *d, struct device_attribute *attr,
207 const char *buf, size_t count) 395 const char *buf, size_t count)
208{ 396{
209 int ret = count;
210 acpi_status status;
211 acpi_object_type type = 0;
212 struct acpi_device *acpi_device = to_acpi_device(d); 397 struct acpi_device *acpi_device = to_acpi_device(d);
213 struct acpi_eject_event *ej_event; 398 struct acpi_eject_event *ej_event;
399 acpi_object_type not_used;
400 acpi_status status;
401 u32 ost_source;
402 int ret;
214 403
215 if ((!count) || (buf[0] != '1')) { 404 if (!count || buf[0] != '1')
216 return -EINVAL; 405 return -EINVAL;
217 }
218 if (!acpi_device->driver && !acpi_device->handler) {
219 ret = -ENODEV;
220 goto err;
221 }
222 status = acpi_get_type(acpi_device->handle, &type);
223 if (ACPI_FAILURE(status) || (!acpi_device->flags.ejectable)) {
224 ret = -ENODEV;
225 goto err;
226 }
227 406
228 ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL); 407 if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled)
229 if (!ej_event) { 408 && !acpi_device->driver)
230 ret = -ENOMEM; 409 return -ENODEV;
231 goto err; 410
232 } 411 status = acpi_get_type(acpi_device->handle, &not_used);
412 if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
413 return -ENODEV;
414
415 mutex_lock(&acpi_scan_lock);
233 416
234 get_device(&acpi_device->dev);
235 ej_event->device = acpi_device;
236 if (acpi_device->flags.eject_pending) { 417 if (acpi_device->flags.eject_pending) {
237 /* event originated from ACPI eject notification */ 418 /* ACPI eject notification event. */
238 ej_event->event = ACPI_NOTIFY_EJECT_REQUEST; 419 ost_source = ACPI_NOTIFY_EJECT_REQUEST;
239 acpi_device->flags.eject_pending = 0; 420 acpi_device->flags.eject_pending = 0;
240 } else { 421 } else {
241 /* event originated from user */ 422 /* Eject initiated by user space. */
242 ej_event->event = ACPI_OST_EC_OSPM_EJECT; 423 ost_source = ACPI_OST_EC_OSPM_EJECT;
243 (void) acpi_evaluate_hotplug_ost(acpi_device->handle,
244 ej_event->event, ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
245 } 424 }
246 425 ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
426 if (!ej_event) {
427 ret = -ENOMEM;
428 goto err_out;
429 }
430 acpi_evaluate_hotplug_ost(acpi_device->handle, ost_source,
431 ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
432 ej_event->device = acpi_device;
433 ej_event->event = ost_source;
434 get_device(&acpi_device->dev);
247 status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event); 435 status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
248 if (ACPI_FAILURE(status)) { 436 if (ACPI_FAILURE(status)) {
249 put_device(&acpi_device->dev); 437 put_device(&acpi_device->dev);
250 kfree(ej_event); 438 kfree(ej_event);
439 ret = status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
440 goto err_out;
251 } 441 }
252err: 442 ret = count;
443
444 out:
445 mutex_unlock(&acpi_scan_lock);
253 return ret; 446 return ret;
447
448 err_out:
449 acpi_evaluate_hotplug_ost(acpi_device->handle, ost_source,
450 ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
451 goto out;
254} 452}
255 453
256static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store); 454static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
@@ -376,7 +574,7 @@ static int acpi_device_setup_files(struct acpi_device *dev)
376 goto end; 574 goto end;
377 } 575 }
378 576
379 if (dev->flags.bus_address) 577 if (dev->pnp.type.bus_address)
380 result = device_create_file(&dev->dev, &dev_attr_adr); 578 result = device_create_file(&dev->dev, &dev_attr_adr);
381 if (dev->pnp.unique_id) 579 if (dev->pnp.unique_id)
382 result = device_create_file(&dev->dev, &dev_attr_uid); 580 result = device_create_file(&dev->dev, &dev_attr_uid);
@@ -449,7 +647,7 @@ static void acpi_device_remove_files(struct acpi_device *dev)
449 647
450 if (dev->pnp.unique_id) 648 if (dev->pnp.unique_id)
451 device_remove_file(&dev->dev, &dev_attr_uid); 649 device_remove_file(&dev->dev, &dev_attr_uid);
452 if (dev->flags.bus_address) 650 if (dev->pnp.type.bus_address)
453 device_remove_file(&dev->dev, &dev_attr_adr); 651 device_remove_file(&dev->dev, &dev_attr_adr);
454 device_remove_file(&dev->dev, &dev_attr_modalias); 652 device_remove_file(&dev->dev, &dev_attr_modalias);
455 device_remove_file(&dev->dev, &dev_attr_hid); 653 device_remove_file(&dev->dev, &dev_attr_hid);
@@ -512,17 +710,6 @@ int acpi_match_device_ids(struct acpi_device *device,
512} 710}
513EXPORT_SYMBOL(acpi_match_device_ids); 711EXPORT_SYMBOL(acpi_match_device_ids);
514 712
515void acpi_free_ids(struct acpi_device *device)
516{
517 struct acpi_hardware_id *id, *tmp;
518
519 list_for_each_entry_safe(id, tmp, &device->pnp.ids, list) {
520 kfree(id->id);
521 kfree(id);
522 }
523 kfree(device->pnp.unique_id);
524}
525
526static void acpi_free_power_resources_lists(struct acpi_device *device) 713static void acpi_free_power_resources_lists(struct acpi_device *device)
527{ 714{
528 int i; 715 int i;
@@ -543,7 +730,7 @@ static void acpi_device_release(struct device *dev)
543{ 730{
544 struct acpi_device *acpi_dev = to_acpi_device(dev); 731 struct acpi_device *acpi_dev = to_acpi_device(dev);
545 732
546 acpi_free_ids(acpi_dev); 733 acpi_free_pnp_ids(&acpi_dev->pnp);
547 acpi_free_power_resources_lists(acpi_dev); 734 acpi_free_power_resources_lists(acpi_dev);
548 kfree(acpi_dev); 735 kfree(acpi_dev);
549} 736}
@@ -1256,19 +1443,17 @@ static void acpi_device_get_busid(struct acpi_device *device)
1256} 1443}
1257 1444
1258/* 1445/*
1259 * acpi_bay_match - see if a device is an ejectable driver bay 1446 * acpi_bay_match - see if an acpi object is an ejectable driver bay
1260 * 1447 *
1261 * If an acpi object is ejectable and has one of the ACPI ATA methods defined, 1448 * If an acpi object is ejectable and has one of the ACPI ATA methods defined,
1262 * then we can safely call it an ejectable drive bay 1449 * then we can safely call it an ejectable drive bay
1263 */ 1450 */
1264static int acpi_bay_match(struct acpi_device *device){ 1451static int acpi_bay_match(acpi_handle handle)
1452{
1265 acpi_status status; 1453 acpi_status status;
1266 acpi_handle handle;
1267 acpi_handle tmp; 1454 acpi_handle tmp;
1268 acpi_handle phandle; 1455 acpi_handle phandle;
1269 1456
1270 handle = device->handle;
1271
1272 status = acpi_get_handle(handle, "_EJ0", &tmp); 1457 status = acpi_get_handle(handle, "_EJ0", &tmp);
1273 if (ACPI_FAILURE(status)) 1458 if (ACPI_FAILURE(status))
1274 return -ENODEV; 1459 return -ENODEV;
@@ -1292,12 +1477,12 @@ static int acpi_bay_match(struct acpi_device *device){
1292} 1477}
1293 1478
1294/* 1479/*
1295 * acpi_dock_match - see if a device has a _DCK method 1480 * acpi_dock_match - see if an acpi object has a _DCK method
1296 */ 1481 */
1297static int acpi_dock_match(struct acpi_device *device) 1482static int acpi_dock_match(acpi_handle handle)
1298{ 1483{
1299 acpi_handle tmp; 1484 acpi_handle tmp;
1300 return acpi_get_handle(device->handle, "_DCK", &tmp); 1485 return acpi_get_handle(handle, "_DCK", &tmp);
1301} 1486}
1302 1487
1303const char *acpi_device_hid(struct acpi_device *device) 1488const char *acpi_device_hid(struct acpi_device *device)
@@ -1312,7 +1497,7 @@ const char *acpi_device_hid(struct acpi_device *device)
1312} 1497}
1313EXPORT_SYMBOL(acpi_device_hid); 1498EXPORT_SYMBOL(acpi_device_hid);
1314 1499
1315static void acpi_add_id(struct acpi_device *device, const char *dev_id) 1500static void acpi_add_id(struct acpi_device_pnp *pnp, const char *dev_id)
1316{ 1501{
1317 struct acpi_hardware_id *id; 1502 struct acpi_hardware_id *id;
1318 1503
@@ -1326,7 +1511,8 @@ static void acpi_add_id(struct acpi_device *device, const char *dev_id)
1326 return; 1511 return;
1327 } 1512 }
1328 1513
1329 list_add_tail(&id->list, &device->pnp.ids); 1514 list_add_tail(&id->list, &pnp->ids);
1515 pnp->type.hardware_id = 1;
1330} 1516}
1331 1517
1332/* 1518/*
@@ -1334,7 +1520,7 @@ static void acpi_add_id(struct acpi_device *device, const char *dev_id)
1334 * lacks the SMBUS01 HID and the methods do not have the necessary "_" 1520 * lacks the SMBUS01 HID and the methods do not have the necessary "_"
1335 * prefix. Work around this. 1521 * prefix. Work around this.
1336 */ 1522 */
1337static int acpi_ibm_smbus_match(struct acpi_device *device) 1523static int acpi_ibm_smbus_match(acpi_handle handle)
1338{ 1524{
1339 acpi_handle h_dummy; 1525 acpi_handle h_dummy;
1340 struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL}; 1526 struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
@@ -1344,7 +1530,7 @@ static int acpi_ibm_smbus_match(struct acpi_device *device)
1344 return -ENODEV; 1530 return -ENODEV;
1345 1531
1346 /* Look for SMBS object */ 1532 /* Look for SMBS object */
1347 result = acpi_get_name(device->handle, ACPI_SINGLE_NAME, &path); 1533 result = acpi_get_name(handle, ACPI_SINGLE_NAME, &path);
1348 if (result) 1534 if (result)
1349 return result; 1535 return result;
1350 1536
@@ -1355,48 +1541,50 @@ static int acpi_ibm_smbus_match(struct acpi_device *device)
1355 1541
1356 /* Does it have the necessary (but misnamed) methods? */ 1542 /* Does it have the necessary (but misnamed) methods? */
1357 result = -ENODEV; 1543 result = -ENODEV;
1358 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "SBI", &h_dummy)) && 1544 if (ACPI_SUCCESS(acpi_get_handle(handle, "SBI", &h_dummy)) &&
1359 ACPI_SUCCESS(acpi_get_handle(device->handle, "SBR", &h_dummy)) && 1545 ACPI_SUCCESS(acpi_get_handle(handle, "SBR", &h_dummy)) &&
1360 ACPI_SUCCESS(acpi_get_handle(device->handle, "SBW", &h_dummy))) 1546 ACPI_SUCCESS(acpi_get_handle(handle, "SBW", &h_dummy)))
1361 result = 0; 1547 result = 0;
1362out: 1548out:
1363 kfree(path.pointer); 1549 kfree(path.pointer);
1364 return result; 1550 return result;
1365} 1551}
1366 1552
1367static void acpi_device_set_id(struct acpi_device *device) 1553static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
1554 int device_type)
1368{ 1555{
1369 acpi_status status; 1556 acpi_status status;
1370 struct acpi_device_info *info; 1557 struct acpi_device_info *info;
1371 struct acpi_pnp_device_id_list *cid_list; 1558 struct acpi_pnp_device_id_list *cid_list;
1372 int i; 1559 int i;
1373 1560
1374 switch (device->device_type) { 1561 switch (device_type) {
1375 case ACPI_BUS_TYPE_DEVICE: 1562 case ACPI_BUS_TYPE_DEVICE:
1376 if (ACPI_IS_ROOT_DEVICE(device)) { 1563 if (handle == ACPI_ROOT_OBJECT) {
1377 acpi_add_id(device, ACPI_SYSTEM_HID); 1564 acpi_add_id(pnp, ACPI_SYSTEM_HID);
1378 break; 1565 break;
1379 } 1566 }
1380 1567
1381 status = acpi_get_object_info(device->handle, &info); 1568 status = acpi_get_object_info(handle, &info);
1382 if (ACPI_FAILURE(status)) { 1569 if (ACPI_FAILURE(status)) {
1383 printk(KERN_ERR PREFIX "%s: Error reading device info\n", __func__); 1570 pr_err(PREFIX "%s: Error reading device info\n",
1571 __func__);
1384 return; 1572 return;
1385 } 1573 }
1386 1574
1387 if (info->valid & ACPI_VALID_HID) 1575 if (info->valid & ACPI_VALID_HID)
1388 acpi_add_id(device, info->hardware_id.string); 1576 acpi_add_id(pnp, info->hardware_id.string);
1389 if (info->valid & ACPI_VALID_CID) { 1577 if (info->valid & ACPI_VALID_CID) {
1390 cid_list = &info->compatible_id_list; 1578 cid_list = &info->compatible_id_list;
1391 for (i = 0; i < cid_list->count; i++) 1579 for (i = 0; i < cid_list->count; i++)
1392 acpi_add_id(device, cid_list->ids[i].string); 1580 acpi_add_id(pnp, cid_list->ids[i].string);
1393 } 1581 }
1394 if (info->valid & ACPI_VALID_ADR) { 1582 if (info->valid & ACPI_VALID_ADR) {
1395 device->pnp.bus_address = info->address; 1583 pnp->bus_address = info->address;
1396 device->flags.bus_address = 1; 1584 pnp->type.bus_address = 1;
1397 } 1585 }
1398 if (info->valid & ACPI_VALID_UID) 1586 if (info->valid & ACPI_VALID_UID)
1399 device->pnp.unique_id = kstrdup(info->unique_id.string, 1587 pnp->unique_id = kstrdup(info->unique_id.string,
1400 GFP_KERNEL); 1588 GFP_KERNEL);
1401 1589
1402 kfree(info); 1590 kfree(info);
@@ -1405,40 +1593,50 @@ static void acpi_device_set_id(struct acpi_device *device)
1405 * Some devices don't reliably have _HIDs & _CIDs, so add 1593 * Some devices don't reliably have _HIDs & _CIDs, so add
1406 * synthetic HIDs to make sure drivers can find them. 1594 * synthetic HIDs to make sure drivers can find them.
1407 */ 1595 */
1408 if (acpi_is_video_device(device)) 1596 if (acpi_is_video_device(handle))
1409 acpi_add_id(device, ACPI_VIDEO_HID); 1597 acpi_add_id(pnp, ACPI_VIDEO_HID);
1410 else if (ACPI_SUCCESS(acpi_bay_match(device))) 1598 else if (ACPI_SUCCESS(acpi_bay_match(handle)))
1411 acpi_add_id(device, ACPI_BAY_HID); 1599 acpi_add_id(pnp, ACPI_BAY_HID);
1412 else if (ACPI_SUCCESS(acpi_dock_match(device))) 1600 else if (ACPI_SUCCESS(acpi_dock_match(handle)))
1413 acpi_add_id(device, ACPI_DOCK_HID); 1601 acpi_add_id(pnp, ACPI_DOCK_HID);
1414 else if (!acpi_ibm_smbus_match(device)) 1602 else if (!acpi_ibm_smbus_match(handle))
1415 acpi_add_id(device, ACPI_SMBUS_IBM_HID); 1603 acpi_add_id(pnp, ACPI_SMBUS_IBM_HID);
1416 else if (list_empty(&device->pnp.ids) && 1604 else if (list_empty(&pnp->ids) && handle == ACPI_ROOT_OBJECT) {
1417 ACPI_IS_ROOT_DEVICE(device->parent)) { 1605 acpi_add_id(pnp, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
1418 acpi_add_id(device, ACPI_BUS_HID); /* \_SB, LNXSYBUS */ 1606 strcpy(pnp->device_name, ACPI_BUS_DEVICE_NAME);
1419 strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME); 1607 strcpy(pnp->device_class, ACPI_BUS_CLASS);
1420 strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
1421 } 1608 }
1422 1609
1423 break; 1610 break;
1424 case ACPI_BUS_TYPE_POWER: 1611 case ACPI_BUS_TYPE_POWER:
1425 acpi_add_id(device, ACPI_POWER_HID); 1612 acpi_add_id(pnp, ACPI_POWER_HID);
1426 break; 1613 break;
1427 case ACPI_BUS_TYPE_PROCESSOR: 1614 case ACPI_BUS_TYPE_PROCESSOR:
1428 acpi_add_id(device, ACPI_PROCESSOR_OBJECT_HID); 1615 acpi_add_id(pnp, ACPI_PROCESSOR_OBJECT_HID);
1429 break; 1616 break;
1430 case ACPI_BUS_TYPE_THERMAL: 1617 case ACPI_BUS_TYPE_THERMAL:
1431 acpi_add_id(device, ACPI_THERMAL_HID); 1618 acpi_add_id(pnp, ACPI_THERMAL_HID);
1432 break; 1619 break;
1433 case ACPI_BUS_TYPE_POWER_BUTTON: 1620 case ACPI_BUS_TYPE_POWER_BUTTON:
1434 acpi_add_id(device, ACPI_BUTTON_HID_POWERF); 1621 acpi_add_id(pnp, ACPI_BUTTON_HID_POWERF);
1435 break; 1622 break;
1436 case ACPI_BUS_TYPE_SLEEP_BUTTON: 1623 case ACPI_BUS_TYPE_SLEEP_BUTTON:
1437 acpi_add_id(device, ACPI_BUTTON_HID_SLEEPF); 1624 acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF);
1438 break; 1625 break;
1439 } 1626 }
1440} 1627}
1441 1628
1629void acpi_free_pnp_ids(struct acpi_device_pnp *pnp)
1630{
1631 struct acpi_hardware_id *id, *tmp;
1632
1633 list_for_each_entry_safe(id, tmp, &pnp->ids, list) {
1634 kfree(id->id);
1635 kfree(id);
1636 }
1637 kfree(pnp->unique_id);
1638}
1639
1442void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, 1640void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
1443 int type, unsigned long long sta) 1641 int type, unsigned long long sta)
1444{ 1642{
@@ -1448,7 +1646,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
1448 device->parent = acpi_bus_get_parent(handle); 1646 device->parent = acpi_bus_get_parent(handle);
1449 STRUCT_TO_INT(device->status) = sta; 1647 STRUCT_TO_INT(device->status) = sta;
1450 acpi_device_get_busid(device); 1648 acpi_device_get_busid(device);
1451 acpi_device_set_id(device); 1649 acpi_set_pnp_ids(handle, &device->pnp, type);
1452 acpi_bus_get_flags(device); 1650 acpi_bus_get_flags(device);
1453 device->flags.match_driver = false; 1651 device->flags.match_driver = false;
1454 device_initialize(&device->dev); 1652 device_initialize(&device->dev);
@@ -1536,6 +1734,75 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
1536 return 0; 1734 return 0;
1537} 1735}
1538 1736
1737static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler,
1738 char *idstr,
1739 const struct acpi_device_id **matchid)
1740{
1741 const struct acpi_device_id *devid;
1742
1743 for (devid = handler->ids; devid->id[0]; devid++)
1744 if (!strcmp((char *)devid->id, idstr)) {
1745 if (matchid)
1746 *matchid = devid;
1747
1748 return true;
1749 }
1750
1751 return false;
1752}
1753
1754static struct acpi_scan_handler *acpi_scan_match_handler(char *idstr,
1755 const struct acpi_device_id **matchid)
1756{
1757 struct acpi_scan_handler *handler;
1758
1759 list_for_each_entry(handler, &acpi_scan_handlers_list, list_node)
1760 if (acpi_scan_handler_matching(handler, idstr, matchid))
1761 return handler;
1762
1763 return NULL;
1764}
1765
1766void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val)
1767{
1768 if (!!hotplug->enabled == !!val)
1769 return;
1770
1771 mutex_lock(&acpi_scan_lock);
1772
1773 hotplug->enabled = val;
1774
1775 mutex_unlock(&acpi_scan_lock);
1776}
1777
1778static void acpi_scan_init_hotplug(acpi_handle handle, int type)
1779{
1780 struct acpi_device_pnp pnp = {};
1781 struct acpi_hardware_id *hwid;
1782 struct acpi_scan_handler *handler;
1783
1784 INIT_LIST_HEAD(&pnp.ids);
1785 acpi_set_pnp_ids(handle, &pnp, type);
1786
1787 if (!pnp.type.hardware_id)
1788 return;
1789
1790 /*
1791 * This relies on the fact that acpi_install_notify_handler() will not
1792 * install the same notify handler routine twice for the same handle.
1793 */
1794 list_for_each_entry(hwid, &pnp.ids, list) {
1795 handler = acpi_scan_match_handler(hwid->id, NULL);
1796 if (handler) {
1797 acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
1798 acpi_hotplug_notify_cb, handler);
1799 break;
1800 }
1801 }
1802
1803 acpi_free_pnp_ids(&pnp);
1804}
1805
1539static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used, 1806static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
1540 void *not_used, void **return_value) 1807 void *not_used, void **return_value)
1541{ 1808{
@@ -1558,6 +1825,8 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
1558 return AE_OK; 1825 return AE_OK;
1559 } 1826 }
1560 1827
1828 acpi_scan_init_hotplug(handle, type);
1829
1561 if (!(sta & ACPI_STA_DEVICE_PRESENT) && 1830 if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
1562 !(sta & ACPI_STA_DEVICE_FUNCTIONING)) { 1831 !(sta & ACPI_STA_DEVICE_FUNCTIONING)) {
1563 struct acpi_device_wakeup wakeup; 1832 struct acpi_device_wakeup wakeup;
@@ -1583,42 +1852,26 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
1583 return AE_OK; 1852 return AE_OK;
1584} 1853}
1585 1854
1586static int acpi_scan_do_attach_handler(struct acpi_device *device, char *id) 1855static int acpi_scan_attach_handler(struct acpi_device *device)
1587{ 1856{
1588 struct acpi_scan_handler *handler; 1857 struct acpi_hardware_id *hwid;
1858 int ret = 0;
1589 1859
1590 list_for_each_entry(handler, &acpi_scan_handlers_list, list_node) { 1860 list_for_each_entry(hwid, &device->pnp.ids, list) {
1591 const struct acpi_device_id *devid; 1861 const struct acpi_device_id *devid;
1862 struct acpi_scan_handler *handler;
1592 1863
1593 for (devid = handler->ids; devid->id[0]; devid++) { 1864 handler = acpi_scan_match_handler(hwid->id, &devid);
1594 int ret; 1865 if (handler) {
1595
1596 if (strcmp((char *)devid->id, id))
1597 continue;
1598
1599 ret = handler->attach(device, devid); 1866 ret = handler->attach(device, devid);
1600 if (ret > 0) { 1867 if (ret > 0) {
1601 device->handler = handler; 1868 device->handler = handler;
1602 return ret; 1869 break;
1603 } else if (ret < 0) { 1870 } else if (ret < 0) {
1604 return ret; 1871 break;
1605 } 1872 }
1606 } 1873 }
1607 } 1874 }
1608 return 0;
1609}
1610
1611static int acpi_scan_attach_handler(struct acpi_device *device)
1612{
1613 struct acpi_hardware_id *hwid;
1614 int ret = 0;
1615
1616 list_for_each_entry(hwid, &device->pnp.ids, list) {
1617 ret = acpi_scan_do_attach_handler(device, hwid->id);
1618 if (ret)
1619 break;
1620
1621 }
1622 return ret; 1875 return ret;
1623} 1876}
1624 1877
@@ -1788,8 +2041,10 @@ int __init acpi_scan_init(void)
1788 acpi_pci_root_init(); 2041 acpi_pci_root_init();
1789 acpi_pci_link_init(); 2042 acpi_pci_link_init();
1790 acpi_platform_init(); 2043 acpi_platform_init();
2044 acpi_lpss_init();
1791 acpi_csrt_init(); 2045 acpi_csrt_init();
1792 acpi_container_init(); 2046 acpi_container_init();
2047 acpi_memory_hotplug_init();
1793 2048
1794 mutex_lock(&acpi_scan_lock); 2049 mutex_lock(&acpi_scan_lock);
1795 /* 2050 /*
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 41c0504470db..fcae5fa2e1b3 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -7,6 +7,8 @@
7#include <linux/moduleparam.h> 7#include <linux/moduleparam.h>
8#include <acpi/acpi_drivers.h> 8#include <acpi/acpi_drivers.h>
9 9
10#include "internal.h"
11
10#define _COMPONENT ACPI_SYSTEM_COMPONENT 12#define _COMPONENT ACPI_SYSTEM_COMPONENT
11ACPI_MODULE_NAME("sysfs"); 13ACPI_MODULE_NAME("sysfs");
12 14
@@ -249,6 +251,7 @@ module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
249static LIST_HEAD(acpi_table_attr_list); 251static LIST_HEAD(acpi_table_attr_list);
250static struct kobject *tables_kobj; 252static struct kobject *tables_kobj;
251static struct kobject *dynamic_tables_kobj; 253static struct kobject *dynamic_tables_kobj;
254static struct kobject *hotplug_kobj;
252 255
253struct acpi_table_attr { 256struct acpi_table_attr {
254 struct bin_attribute attr; 257 struct bin_attribute attr;
@@ -716,6 +719,67 @@ acpi_show_profile(struct device *dev, struct device_attribute *attr,
716static const struct device_attribute pm_profile_attr = 719static const struct device_attribute pm_profile_attr =
717 __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL); 720 __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
718 721
722static ssize_t hotplug_enabled_show(struct kobject *kobj,
723 struct kobj_attribute *attr, char *buf)
724{
725 struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
726
727 return sprintf(buf, "%d\n", hotplug->enabled);
728}
729
730static ssize_t hotplug_enabled_store(struct kobject *kobj,
731 struct kobj_attribute *attr,
732 const char *buf, size_t size)
733{
734 struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
735 unsigned int val;
736
737 if (kstrtouint(buf, 10, &val) || val > 1)
738 return -EINVAL;
739
740 acpi_scan_hotplug_enabled(hotplug, val);
741 return size;
742}
743
744static struct kobj_attribute hotplug_enabled_attr =
745 __ATTR(enabled, S_IRUGO | S_IWUSR, hotplug_enabled_show,
746 hotplug_enabled_store);
747
748static struct attribute *hotplug_profile_attrs[] = {
749 &hotplug_enabled_attr.attr,
750 NULL
751};
752
753static struct kobj_type acpi_hotplug_profile_ktype = {
754 .sysfs_ops = &kobj_sysfs_ops,
755 .default_attrs = hotplug_profile_attrs,
756};
757
758void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
759 const char *name)
760{
761 int error;
762
763 if (!hotplug_kobj)
764 goto err_out;
765
766 kobject_init(&hotplug->kobj, &acpi_hotplug_profile_ktype);
767 error = kobject_set_name(&hotplug->kobj, "%s", name);
768 if (error)
769 goto err_out;
770
771 hotplug->kobj.parent = hotplug_kobj;
772 error = kobject_add(&hotplug->kobj, hotplug_kobj, NULL);
773 if (error)
774 goto err_out;
775
776 kobject_uevent(&hotplug->kobj, KOBJ_ADD);
777 return;
778
779 err_out:
780 pr_err(PREFIX "Unable to add hotplug profile '%s'\n", name);
781}
782
719int __init acpi_sysfs_init(void) 783int __init acpi_sysfs_init(void)
720{ 784{
721 int result; 785 int result;
@@ -723,6 +787,8 @@ int __init acpi_sysfs_init(void)
723 result = acpi_tables_sysfs_init(); 787 result = acpi_tables_sysfs_init();
724 if (result) 788 if (result)
725 return result; 789 return result;
790
791 hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj);
726 result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr); 792 result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
727 return result; 793 return result;
728} 794}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 8470771e5eae..a33821ca3895 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -723,9 +723,19 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
723 return -EINVAL; 723 return -EINVAL;
724 724
725 if (type == THERMAL_TRIP_ACTIVE) { 725 if (type == THERMAL_TRIP_ACTIVE) {
726 /* aggressive active cooling */ 726 unsigned long trip_temp;
727 *trend = THERMAL_TREND_RAISING; 727 unsigned long temp = KELVIN_TO_MILLICELSIUS(tz->temperature,
728 return 0; 728 tz->kelvin_offset);
729 if (thermal_get_trip_temp(thermal, trip, &trip_temp))
730 return -EINVAL;
731
732 if (temp > trip_temp) {
733 *trend = THERMAL_TREND_RAISING;
734 return 0;
735 } else {
736 /* Fall back on default trend */
737 return -EINVAL;
738 }
729 } 739 }
730 740
731 /* 741 /*
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 313f959413dc..c3932d0876e0 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -167,7 +167,8 @@ struct acpi_video_device_flags {
167 u8 dvi:1; 167 u8 dvi:1;
168 u8 bios:1; 168 u8 bios:1;
169 u8 unknown:1; 169 u8 unknown:1;
170 u8 reserved:2; 170 u8 notify:1;
171 u8 reserved:1;
171}; 172};
172 173
173struct acpi_video_device_cap { 174struct acpi_video_device_cap {
@@ -222,7 +223,7 @@ static int acpi_video_device_lcd_set_level(struct acpi_video_device *device,
222 int level); 223 int level);
223static int acpi_video_device_lcd_get_level_current( 224static int acpi_video_device_lcd_get_level_current(
224 struct acpi_video_device *device, 225 struct acpi_video_device *device,
225 unsigned long long *level, int init); 226 unsigned long long *level, bool raw);
226static int acpi_video_get_next_level(struct acpi_video_device *device, 227static int acpi_video_get_next_level(struct acpi_video_device *device,
227 u32 level_current, u32 event); 228 u32 level_current, u32 event);
228static int acpi_video_switch_brightness(struct acpi_video_device *device, 229static int acpi_video_switch_brightness(struct acpi_video_device *device,
@@ -236,7 +237,7 @@ static int acpi_video_get_brightness(struct backlight_device *bd)
236 struct acpi_video_device *vd = 237 struct acpi_video_device *vd =
237 (struct acpi_video_device *)bl_get_data(bd); 238 (struct acpi_video_device *)bl_get_data(bd);
238 239
239 if (acpi_video_device_lcd_get_level_current(vd, &cur_level, 0)) 240 if (acpi_video_device_lcd_get_level_current(vd, &cur_level, false))
240 return -EINVAL; 241 return -EINVAL;
241 for (i = 2; i < vd->brightness->count; i++) { 242 for (i = 2; i < vd->brightness->count; i++) {
242 if (vd->brightness->levels[i] == cur_level) 243 if (vd->brightness->levels[i] == cur_level)
@@ -281,7 +282,7 @@ static int video_get_cur_state(struct thermal_cooling_device *cooling_dev, unsig
281 unsigned long long level; 282 unsigned long long level;
282 int offset; 283 int offset;
283 284
284 if (acpi_video_device_lcd_get_level_current(video, &level, 0)) 285 if (acpi_video_device_lcd_get_level_current(video, &level, false))
285 return -EINVAL; 286 return -EINVAL;
286 for (offset = 2; offset < video->brightness->count; offset++) 287 for (offset = 2; offset < video->brightness->count; offset++)
287 if (level == video->brightness->levels[offset]) { 288 if (level == video->brightness->levels[offset]) {
@@ -447,12 +448,45 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
447 DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"), 448 DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"),
448 }, 449 },
449 }, 450 },
451 {
452 .callback = video_ignore_initial_backlight,
453 .ident = "HP Pavilion dm4",
454 .matches = {
455 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
456 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"),
457 },
458 },
450 {} 459 {}
451}; 460};
452 461
462static unsigned long long
463acpi_video_bqc_value_to_level(struct acpi_video_device *device,
464 unsigned long long bqc_value)
465{
466 unsigned long long level;
467
468 if (device->brightness->flags._BQC_use_index) {
469 /*
470 * _BQC returns an index that doesn't account for
471 * the first 2 items with special meaning, so we need
472 * to compensate for that by offsetting ourselves
473 */
474 if (device->brightness->flags._BCL_reversed)
475 bqc_value = device->brightness->count - 3 - bqc_value;
476
477 level = device->brightness->levels[bqc_value + 2];
478 } else {
479 level = bqc_value;
480 }
481
482 level += bqc_offset_aml_bug_workaround;
483
484 return level;
485}
486
453static int 487static int
454acpi_video_device_lcd_get_level_current(struct acpi_video_device *device, 488acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
455 unsigned long long *level, int init) 489 unsigned long long *level, bool raw)
456{ 490{
457 acpi_status status = AE_OK; 491 acpi_status status = AE_OK;
458 int i; 492 int i;
@@ -463,29 +497,30 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
463 status = acpi_evaluate_integer(device->dev->handle, buf, 497 status = acpi_evaluate_integer(device->dev->handle, buf,
464 NULL, level); 498 NULL, level);
465 if (ACPI_SUCCESS(status)) { 499 if (ACPI_SUCCESS(status)) {
466 if (device->brightness->flags._BQC_use_index) { 500 if (raw) {
467 if (device->brightness->flags._BCL_reversed) 501 /*
468 *level = device->brightness->count 502 * Caller has indicated he wants the raw
469 - 3 - (*level); 503 * value returned by _BQC, so don't furtherly
470 *level = device->brightness->levels[*level + 2]; 504 * mess with the value.
471 505 */
506 return 0;
472 } 507 }
473 *level += bqc_offset_aml_bug_workaround; 508
509 *level = acpi_video_bqc_value_to_level(device, *level);
510
474 for (i = 2; i < device->brightness->count; i++) 511 for (i = 2; i < device->brightness->count; i++)
475 if (device->brightness->levels[i] == *level) { 512 if (device->brightness->levels[i] == *level) {
476 device->brightness->curr = *level; 513 device->brightness->curr = *level;
477 return 0; 514 return 0;
478 } 515 }
479 if (!init) { 516 /*
480 /* 517 * BQC returned an invalid level.
481 * BQC returned an invalid level. 518 * Stop using it.
482 * Stop using it. 519 */
483 */ 520 ACPI_WARNING((AE_INFO,
484 ACPI_WARNING((AE_INFO, 521 "%s returned an invalid level",
485 "%s returned an invalid level", 522 buf));
486 buf)); 523 device->cap._BQC = device->cap._BCQ = 0;
487 device->cap._BQC = device->cap._BCQ = 0;
488 }
489 } else { 524 } else {
490 /* Fixme: 525 /* Fixme:
491 * should we return an error or ignore this failure? 526 * should we return an error or ignore this failure?
@@ -598,6 +633,56 @@ acpi_video_cmp_level(const void *a, const void *b)
598} 633}
599 634
600/* 635/*
636 * Decides if _BQC/_BCQ for this system is usable
637 *
638 * We do this by changing the level first and then read out the current
639 * brightness level, if the value does not match, find out if it is using
640 * index. If not, clear the _BQC/_BCQ capability.
641 */
642static int acpi_video_bqc_quirk(struct acpi_video_device *device,
643 int max_level, int current_level)
644{
645 struct acpi_video_device_brightness *br = device->brightness;
646 int result;
647 unsigned long long level;
648 int test_level;
649
650 /* don't mess with existing known broken systems */
651 if (bqc_offset_aml_bug_workaround)
652 return 0;
653
654 /*
655 * Some systems always report current brightness level as maximum
656 * through _BQC, we need to test another value for them.
657 */
658 test_level = current_level == max_level ? br->levels[2] : max_level;
659
660 result = acpi_video_device_lcd_set_level(device, test_level);
661 if (result)
662 return result;
663
664 result = acpi_video_device_lcd_get_level_current(device, &level, true);
665 if (result)
666 return result;
667
668 if (level != test_level) {
669 /* buggy _BQC found, need to find out if it uses index */
670 if (level < br->count) {
671 if (br->flags._BCL_reversed)
672 level = br->count - 3 - level;
673 if (br->levels[level + 2] == test_level)
674 br->flags._BQC_use_index = 1;
675 }
676
677 if (!br->flags._BQC_use_index)
678 device->cap._BQC = device->cap._BCQ = 0;
679 }
680
681 return 0;
682}
683
684
685/*
601 * Arg: 686 * Arg:
602 * device : video output device (LCD, CRT, ..) 687 * device : video output device (LCD, CRT, ..)
603 * 688 *
@@ -703,42 +788,36 @@ acpi_video_init_brightness(struct acpi_video_device *device)
703 if (!device->cap._BQC) 788 if (!device->cap._BQC)
704 goto set_level; 789 goto set_level;
705 790
706 result = acpi_video_device_lcd_get_level_current(device, &level_old, 1); 791 result = acpi_video_device_lcd_get_level_current(device,
707 if (result) 792 &level_old, true);
708 goto out_free_levels;
709
710 /*
711 * Set the level to maximum and check if _BQC uses indexed value
712 */
713 result = acpi_video_device_lcd_set_level(device, max_level);
714 if (result) 793 if (result)
715 goto out_free_levels; 794 goto out_free_levels;
716 795
717 result = acpi_video_device_lcd_get_level_current(device, &level, 0); 796 result = acpi_video_bqc_quirk(device, max_level, level_old);
718 if (result) 797 if (result)
719 goto out_free_levels; 798 goto out_free_levels;
799 /*
800 * cap._BQC may get cleared due to _BQC is found to be broken
801 * in acpi_video_bqc_quirk, so check again here.
802 */
803 if (!device->cap._BQC)
804 goto set_level;
720 805
721 br->flags._BQC_use_index = (level == max_level ? 0 : 1); 806 if (use_bios_initial_backlight) {
722 807 level = acpi_video_bqc_value_to_level(device, level_old);
723 if (!br->flags._BQC_use_index) {
724 /* 808 /*
725 * Set the backlight to the initial state. 809 * On some buggy laptops, _BQC returns an uninitialized
726 * On some buggy laptops, _BQC returns an uninitialized value 810 * value when invoked for the first time, i.e.
727 * when invoked for the first time, i.e. level_old is invalid. 811 * level_old is invalid (no matter whether it's a level
728 * set the backlight to max_level in this case 812 * or an index). Set the backlight to max_level in this case.
729 */ 813 */
730 if (use_bios_initial_backlight) { 814 for (i = 2; i < br->count; i++)
731 for (i = 2; i < br->count; i++) 815 if (level_old == br->levels[i])
732 if (level_old == br->levels[i]) 816 break;
733 level = level_old; 817 if (i == br->count)
734 } 818 level = max_level;
735 goto set_level;
736 } 819 }
737 820
738 if (br->flags._BCL_reversed)
739 level_old = (br->count - 1) - level_old;
740 level = br->levels[level_old];
741
742set_level: 821set_level:
743 result = acpi_video_device_lcd_set_level(device, level); 822 result = acpi_video_device_lcd_set_level(device, level);
744 if (result) 823 if (result)
@@ -996,53 +1075,51 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
996 struct acpi_video_device *data; 1075 struct acpi_video_device *data;
997 struct acpi_video_device_attrib* attribute; 1076 struct acpi_video_device_attrib* attribute;
998 1077
999 if (!device || !video)
1000 return -EINVAL;
1001
1002 status = 1078 status =
1003 acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id); 1079 acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id);
1004 if (ACPI_SUCCESS(status)) { 1080 /* Some device omits _ADR, we skip them instead of fail */
1005 1081 if (ACPI_FAILURE(status))
1006 data = kzalloc(sizeof(struct acpi_video_device), GFP_KERNEL); 1082 return 0;
1007 if (!data)
1008 return -ENOMEM;
1009
1010 strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
1011 strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
1012 device->driver_data = data;
1013
1014 data->device_id = device_id;
1015 data->video = video;
1016 data->dev = device;
1017 1083
1018 attribute = acpi_video_get_device_attr(video, device_id); 1084 data = kzalloc(sizeof(struct acpi_video_device), GFP_KERNEL);
1085 if (!data)
1086 return -ENOMEM;
1019 1087
1020 if((attribute != NULL) && attribute->device_id_scheme) { 1088 strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
1021 switch (attribute->display_type) { 1089 strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
1022 case ACPI_VIDEO_DISPLAY_CRT: 1090 device->driver_data = data;
1023 data->flags.crt = 1; 1091
1024 break; 1092 data->device_id = device_id;
1025 case ACPI_VIDEO_DISPLAY_TV: 1093 data->video = video;
1026 data->flags.tvout = 1; 1094 data->dev = device;
1027 break; 1095
1028 case ACPI_VIDEO_DISPLAY_DVI: 1096 attribute = acpi_video_get_device_attr(video, device_id);
1029 data->flags.dvi = 1; 1097
1030 break; 1098 if((attribute != NULL) && attribute->device_id_scheme) {
1031 case ACPI_VIDEO_DISPLAY_LCD: 1099 switch (attribute->display_type) {
1032 data->flags.lcd = 1; 1100 case ACPI_VIDEO_DISPLAY_CRT:
1033 break; 1101 data->flags.crt = 1;
1034 default: 1102 break;
1035 data->flags.unknown = 1; 1103 case ACPI_VIDEO_DISPLAY_TV:
1036 break; 1104 data->flags.tvout = 1;
1037 } 1105 break;
1038 if(attribute->bios_can_detect) 1106 case ACPI_VIDEO_DISPLAY_DVI:
1039 data->flags.bios = 1; 1107 data->flags.dvi = 1;
1040 } else { 1108 break;
1041 /* Check for legacy IDs */ 1109 case ACPI_VIDEO_DISPLAY_LCD:
1042 device_type = acpi_video_get_device_type(video, 1110 data->flags.lcd = 1;
1043 device_id); 1111 break;
1044 /* Ignore bits 16 and 18-20 */ 1112 default:
1045 switch (device_type & 0xffe2ffff) { 1113 data->flags.unknown = 1;
1114 break;
1115 }
1116 if(attribute->bios_can_detect)
1117 data->flags.bios = 1;
1118 } else {
1119 /* Check for legacy IDs */
1120 device_type = acpi_video_get_device_type(video, device_id);
1121 /* Ignore bits 16 and 18-20 */
1122 switch (device_type & 0xffe2ffff) {
1046 case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR: 1123 case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR:
1047 data->flags.crt = 1; 1124 data->flags.crt = 1;
1048 break; 1125 break;
@@ -1054,34 +1131,24 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
1054 break; 1131 break;
1055 default: 1132 default:
1056 data->flags.unknown = 1; 1133 data->flags.unknown = 1;
1057 }
1058 } 1134 }
1135 }
1059 1136
1060 acpi_video_device_bind(video, data); 1137 acpi_video_device_bind(video, data);
1061 acpi_video_device_find_cap(data); 1138 acpi_video_device_find_cap(data);
1062
1063 status = acpi_install_notify_handler(device->handle,
1064 ACPI_DEVICE_NOTIFY,
1065 acpi_video_device_notify,
1066 data);
1067 if (ACPI_FAILURE(status)) {
1068 printk(KERN_ERR PREFIX
1069 "Error installing notify handler\n");
1070 if(data->brightness)
1071 kfree(data->brightness->levels);
1072 kfree(data->brightness);
1073 kfree(data);
1074 return -ENODEV;
1075 }
1076 1139
1077 mutex_lock(&video->device_list_lock); 1140 status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
1078 list_add_tail(&data->entry, &video->video_device_list); 1141 acpi_video_device_notify, data);
1079 mutex_unlock(&video->device_list_lock); 1142 if (ACPI_FAILURE(status))
1143 dev_err(&device->dev, "Error installing notify handler\n");
1144 else
1145 data->flags.notify = 1;
1080 1146
1081 return 0; 1147 mutex_lock(&video->device_list_lock);
1082 } 1148 list_add_tail(&data->entry, &video->video_device_list);
1149 mutex_unlock(&video->device_list_lock);
1083 1150
1084 return -ENOENT; 1151 return status;
1085} 1152}
1086 1153
1087/* 1154/*
@@ -1268,7 +1335,8 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
1268 goto out; 1335 goto out;
1269 1336
1270 result = acpi_video_device_lcd_get_level_current(device, 1337 result = acpi_video_device_lcd_get_level_current(device,
1271 &level_current, 0); 1338 &level_current,
1339 false);
1272 if (result) 1340 if (result)
1273 goto out; 1341 goto out;
1274 1342
@@ -1373,9 +1441,8 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video,
1373 1441
1374 status = acpi_video_bus_get_one_device(dev, video); 1442 status = acpi_video_bus_get_one_device(dev, video);
1375 if (status) { 1443 if (status) {
1376 printk(KERN_WARNING PREFIX 1444 dev_err(&dev->dev, "Can't attach device\n");
1377 "Can't attach device\n"); 1445 break;
1378 continue;
1379 } 1446 }
1380 } 1447 }
1381 return status; 1448 return status;
@@ -1388,13 +1455,14 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
1388 if (!device || !device->video) 1455 if (!device || !device->video)
1389 return -ENOENT; 1456 return -ENOENT;
1390 1457
1391 status = acpi_remove_notify_handler(device->dev->handle, 1458 if (device->flags.notify) {
1392 ACPI_DEVICE_NOTIFY, 1459 status = acpi_remove_notify_handler(device->dev->handle,
1393 acpi_video_device_notify); 1460 ACPI_DEVICE_NOTIFY, acpi_video_device_notify);
1394 if (ACPI_FAILURE(status)) { 1461 if (ACPI_FAILURE(status))
1395 printk(KERN_WARNING PREFIX 1462 dev_err(&device->dev->dev,
1396 "Can't remove video notify handler\n"); 1463 "Can't remove video notify handler\n");
1397 } 1464 }
1465
1398 if (device->backlight) { 1466 if (device->backlight) {
1399 backlight_device_unregister(device->backlight); 1467 backlight_device_unregister(device->backlight);
1400 device->backlight = NULL; 1468 device->backlight = NULL;
@@ -1676,7 +1744,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
1676 1744
1677 error = acpi_video_bus_get_devices(video, device); 1745 error = acpi_video_bus_get_devices(video, device);
1678 if (error) 1746 if (error)
1679 goto err_free_video; 1747 goto err_put_video;
1680 1748
1681 video->input = input = input_allocate_device(); 1749 video->input = input = input_allocate_device();
1682 if (!input) { 1750 if (!input) {
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 4ac2593234e7..66f67626f02e 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -67,40 +67,37 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
67 return 0; 67 return 0;
68} 68}
69 69
70/* Returns true if the device is a video device which can be handled by 70/* Returns true if the ACPI object is a video device which can be
71 * video.ko. 71 * handled by video.ko.
72 * The device will get a Linux specific CID added in scan.c to 72 * The device will get a Linux specific CID added in scan.c to
73 * identify the device as an ACPI graphics device 73 * identify the device as an ACPI graphics device
74 * Be aware that the graphics device may not be physically present 74 * Be aware that the graphics device may not be physically present
75 * Use acpi_video_get_capabilities() to detect general ACPI video 75 * Use acpi_video_get_capabilities() to detect general ACPI video
76 * capabilities of present cards 76 * capabilities of present cards
77 */ 77 */
78long acpi_is_video_device(struct acpi_device *device) 78long acpi_is_video_device(acpi_handle handle)
79{ 79{
80 acpi_handle h_dummy; 80 acpi_handle h_dummy;
81 long video_caps = 0; 81 long video_caps = 0;
82 82
83 if (!device)
84 return 0;
85
86 /* Is this device able to support video switching ? */ 83 /* Is this device able to support video switching ? */
87 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) || 84 if (ACPI_SUCCESS(acpi_get_handle(handle, "_DOD", &h_dummy)) ||
88 ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy))) 85 ACPI_SUCCESS(acpi_get_handle(handle, "_DOS", &h_dummy)))
89 video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING; 86 video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
90 87
91 /* Is this device able to retrieve a video ROM ? */ 88 /* Is this device able to retrieve a video ROM ? */
92 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy))) 89 if (ACPI_SUCCESS(acpi_get_handle(handle, "_ROM", &h_dummy)))
93 video_caps |= ACPI_VIDEO_ROM_AVAILABLE; 90 video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
94 91
95 /* Is this device able to configure which video head to be POSTed ? */ 92 /* Is this device able to configure which video head to be POSTed ? */
96 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_VPO", &h_dummy)) && 93 if (ACPI_SUCCESS(acpi_get_handle(handle, "_VPO", &h_dummy)) &&
97 ACPI_SUCCESS(acpi_get_handle(device->handle, "_GPD", &h_dummy)) && 94 ACPI_SUCCESS(acpi_get_handle(handle, "_GPD", &h_dummy)) &&
98 ACPI_SUCCESS(acpi_get_handle(device->handle, "_SPD", &h_dummy))) 95 ACPI_SUCCESS(acpi_get_handle(handle, "_SPD", &h_dummy)))
99 video_caps |= ACPI_VIDEO_DEVICE_POSTING; 96 video_caps |= ACPI_VIDEO_DEVICE_POSTING;
100 97
101 /* Only check for backlight functionality if one of the above hit. */ 98 /* Only check for backlight functionality if one of the above hit. */
102 if (video_caps) 99 if (video_caps)
103 acpi_walk_namespace(ACPI_TYPE_DEVICE, device->handle, 100 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
104 ACPI_UINT32_MAX, acpi_backlight_cap_match, NULL, 101 ACPI_UINT32_MAX, acpi_backlight_cap_match, NULL,
105 &video_caps, NULL); 102 &video_caps, NULL);
106 103
@@ -127,7 +124,7 @@ find_video(acpi_handle handle, u32 lvl, void *context, void **rv)
127 if (!dev) 124 if (!dev)
128 return AE_OK; 125 return AE_OK;
129 pci_dev_put(dev); 126 pci_dev_put(dev);
130 *cap |= acpi_is_video_device(acpi_dev); 127 *cap |= acpi_is_video_device(handle);
131 } 128 }
132 return AE_OK; 129 return AE_OK;
133} 130}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 9a6b05a35603..7072404c8b6d 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -920,7 +920,7 @@ static int pm_genpd_prepare(struct device *dev)
920 pm_wakeup_event(dev, 0); 920 pm_wakeup_event(dev, 0);
921 921
922 if (pm_wakeup_pending()) { 922 if (pm_wakeup_pending()) {
923 pm_runtime_put_sync(dev); 923 pm_runtime_put(dev);
924 return -EBUSY; 924 return -EBUSY;
925 } 925 }
926 926
@@ -961,7 +961,7 @@ static int pm_genpd_prepare(struct device *dev)
961 pm_runtime_enable(dev); 961 pm_runtime_enable(dev);
962 } 962 }
963 963
964 pm_runtime_put_sync(dev); 964 pm_runtime_put(dev);
965 return ret; 965 return ret;
966} 966}
967 967
@@ -1327,7 +1327,7 @@ static void pm_genpd_complete(struct device *dev)
1327 pm_generic_complete(dev); 1327 pm_generic_complete(dev);
1328 pm_runtime_set_active(dev); 1328 pm_runtime_set_active(dev);
1329 pm_runtime_enable(dev); 1329 pm_runtime_enable(dev);
1330 pm_runtime_idle(dev); 1330 pm_request_idle(dev);
1331 } 1331 }
1332} 1332}
1333 1333
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index d03d290f31c2..bfd898b8988e 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -324,6 +324,6 @@ void pm_generic_complete(struct device *dev)
324 * Let runtime PM try to suspend devices that haven't been in use before 324 * Let runtime PM try to suspend devices that haven't been in use before
325 * going into the system-wide sleep state we're resuming from. 325 * going into the system-wide sleep state we're resuming from.
326 */ 326 */
327 pm_runtime_idle(dev); 327 pm_request_idle(dev);
328} 328}
329#endif /* CONFIG_PM_SLEEP */ 329#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 15beb500a4e4..5a9b6569dd74 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -756,7 +756,7 @@ static void device_complete(struct device *dev, pm_message_t state)
756 756
757 device_unlock(dev); 757 device_unlock(dev);
758 758
759 pm_runtime_put_sync(dev); 759 pm_runtime_put(dev);
760} 760}
761 761
762/** 762/**
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 32ee0fc7ea54..f0077cb8e249 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -55,6 +55,7 @@
55 * @rate: Frequency in hertz 55 * @rate: Frequency in hertz
56 * @u_volt: Nominal voltage in microvolts corresponding to this OPP 56 * @u_volt: Nominal voltage in microvolts corresponding to this OPP
57 * @dev_opp: points back to the device_opp struct this opp belongs to 57 * @dev_opp: points back to the device_opp struct this opp belongs to
58 * @head: RCU callback head used for deferred freeing
58 * 59 *
59 * This structure stores the OPP information for a given device. 60 * This structure stores the OPP information for a given device.
60 */ 61 */
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 1244930e3d7a..ef13ad08afb2 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1400,5 +1400,5 @@ void pm_runtime_remove(struct device *dev)
1400 if (dev->power.runtime_status == RPM_ACTIVE) 1400 if (dev->power.runtime_status == RPM_ACTIVE)
1401 pm_runtime_set_suspended(dev); 1401 pm_runtime_set_suspended(dev);
1402 if (dev->power.irq_safe && dev->parent) 1402 if (dev->power.irq_safe && dev->parent)
1403 pm_runtime_put_sync(dev->parent); 1403 pm_runtime_put(dev->parent);
1404} 1404}
diff --git a/drivers/clk/x86/Makefile b/drivers/clk/x86/Makefile
index f9ba4fab0ddc..04781389d0fb 100644
--- a/drivers/clk/x86/Makefile
+++ b/drivers/clk/x86/Makefile
@@ -1,2 +1,2 @@
1clk-x86-lpss-objs := clk-lpss.o clk-lpt.o 1clk-x86-lpss-objs := clk-lpt.o
2obj-$(CONFIG_X86_INTEL_LPSS) += clk-x86-lpss.o 2obj-$(CONFIG_X86_INTEL_LPSS) += clk-x86-lpss.o
diff --git a/drivers/clk/x86/clk-lpss.c b/drivers/clk/x86/clk-lpss.c
deleted file mode 100644
index b5e229f3c3d9..000000000000
--- a/drivers/clk/x86/clk-lpss.c
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * Intel Low Power Subsystem clocks.
3 *
4 * Copyright (C) 2013, Intel Corporation
5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
6 * Heikki Krogerus <heikki.krogerus@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/acpi.h>
14#include <linux/clk.h>
15#include <linux/clk-provider.h>
16#include <linux/err.h>
17#include <linux/io.h>
18#include <linux/module.h>
19
20static int clk_lpss_is_mmio_resource(struct acpi_resource *res, void *data)
21{
22 struct resource r;
23 return !acpi_dev_resource_memory(res, &r);
24}
25
26static acpi_status clk_lpss_find_mmio(acpi_handle handle, u32 level,
27 void *data, void **retval)
28{
29 struct resource_list_entry *rentry;
30 struct list_head resource_list;
31 struct acpi_device *adev;
32 const char *uid = data;
33 int ret;
34
35 if (acpi_bus_get_device(handle, &adev))
36 return AE_OK;
37
38 if (uid) {
39 if (!adev->pnp.unique_id)
40 return AE_OK;
41 if (strcmp(uid, adev->pnp.unique_id))
42 return AE_OK;
43 }
44
45 INIT_LIST_HEAD(&resource_list);
46 ret = acpi_dev_get_resources(adev, &resource_list,
47 clk_lpss_is_mmio_resource, NULL);
48 if (ret < 0)
49 return AE_NO_MEMORY;
50
51 list_for_each_entry(rentry, &resource_list, node)
52 if (resource_type(&rentry->res) == IORESOURCE_MEM) {
53 *(struct resource *)retval = rentry->res;
54 break;
55 }
56
57 acpi_dev_free_resource_list(&resource_list);
58 return AE_OK;
59}
60
61/**
62 * clk_register_lpss_gate - register LPSS clock gate
63 * @name: name of this clock gate
64 * @parent_name: parent clock name
65 * @hid: ACPI _HID of the device
66 * @uid: ACPI _UID of the device (optional)
67 * @offset: LPSS PRV_CLOCK_PARAMS offset
68 *
69 * Creates and registers LPSS clock gate.
70 */
71struct clk *clk_register_lpss_gate(const char *name, const char *parent_name,
72 const char *hid, const char *uid,
73 unsigned offset)
74{
75 struct resource res = { };
76 void __iomem *mmio_base;
77 acpi_status status;
78 struct clk *clk;
79
80 /*
81 * First try to look the device and its mmio resource from the
82 * ACPI namespace.
83 */
84 status = acpi_get_devices(hid, clk_lpss_find_mmio, (void *)uid,
85 (void **)&res);
86 if (ACPI_FAILURE(status) || !res.start)
87 return ERR_PTR(-ENODEV);
88
89 mmio_base = ioremap(res.start, resource_size(&res));
90 if (!mmio_base)
91 return ERR_PTR(-ENOMEM);
92
93 clk = clk_register_gate(NULL, name, parent_name, 0, mmio_base + offset,
94 0, 0, NULL);
95 if (IS_ERR(clk))
96 iounmap(mmio_base);
97
98 return clk;
99}
diff --git a/drivers/clk/x86/clk-lpss.h b/drivers/clk/x86/clk-lpss.h
deleted file mode 100644
index e9460f442297..000000000000
--- a/drivers/clk/x86/clk-lpss.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Intel Low Power Subsystem clock.
3 *
4 * Copyright (C) 2013, Intel Corporation
5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
6 * Heikki Krogerus <heikki.krogerus@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef __CLK_LPSS_H
14#define __CLK_LPSS_H
15
16#include <linux/err.h>
17#include <linux/errno.h>
18#include <linux/clk.h>
19
20#ifdef CONFIG_ACPI
21extern struct clk *clk_register_lpss_gate(const char *name,
22 const char *parent_name,
23 const char *hid, const char *uid,
24 unsigned offset);
25#else
26static inline struct clk *clk_register_lpss_gate(const char *name,
27 const char *parent_name,
28 const char *hid,
29 const char *uid,
30 unsigned offset)
31{
32 return ERR_PTR(-ENODEV);
33}
34#endif
35
36#endif /* __CLK_LPSS_H */
diff --git a/drivers/clk/x86/clk-lpt.c b/drivers/clk/x86/clk-lpt.c
index 81298aeef7e3..5cf4f4686406 100644
--- a/drivers/clk/x86/clk-lpt.c
+++ b/drivers/clk/x86/clk-lpt.c
@@ -10,7 +10,6 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/acpi.h>
14#include <linux/clk.h> 13#include <linux/clk.h>
15#include <linux/clkdev.h> 14#include <linux/clkdev.h>
16#include <linux/clk-provider.h> 15#include <linux/clk-provider.h>
@@ -18,8 +17,6 @@
18#include <linux/module.h> 17#include <linux/module.h>
19#include <linux/platform_device.h> 18#include <linux/platform_device.h>
20 19
21#include "clk-lpss.h"
22
23#define PRV_CLOCK_PARAMS 0x800 20#define PRV_CLOCK_PARAMS 0x800
24 21
25static int lpt_clk_probe(struct platform_device *pdev) 22static int lpt_clk_probe(struct platform_device *pdev)
@@ -34,40 +31,6 @@ static int lpt_clk_probe(struct platform_device *pdev)
34 31
35 /* Shared DMA clock */ 32 /* Shared DMA clock */
36 clk_register_clkdev(clk, "hclk", "INTL9C60.0.auto"); 33 clk_register_clkdev(clk, "hclk", "INTL9C60.0.auto");
37
38 /* SPI clocks */
39 clk = clk_register_lpss_gate("spi0_clk", "lpss_clk", "INT33C0", NULL,
40 PRV_CLOCK_PARAMS);
41 if (!IS_ERR(clk))
42 clk_register_clkdev(clk, NULL, "INT33C0:00");
43
44 clk = clk_register_lpss_gate("spi1_clk", "lpss_clk", "INT33C1", NULL,
45 PRV_CLOCK_PARAMS);
46 if (!IS_ERR(clk))
47 clk_register_clkdev(clk, NULL, "INT33C1:00");
48
49 /* I2C clocks */
50 clk = clk_register_lpss_gate("i2c0_clk", "lpss_clk", "INT33C2", NULL,
51 PRV_CLOCK_PARAMS);
52 if (!IS_ERR(clk))
53 clk_register_clkdev(clk, NULL, "INT33C2:00");
54
55 clk = clk_register_lpss_gate("i2c1_clk", "lpss_clk", "INT33C3", NULL,
56 PRV_CLOCK_PARAMS);
57 if (!IS_ERR(clk))
58 clk_register_clkdev(clk, NULL, "INT33C3:00");
59
60 /* UART clocks */
61 clk = clk_register_lpss_gate("uart0_clk", "lpss_clk", "INT33C4", NULL,
62 PRV_CLOCK_PARAMS);
63 if (!IS_ERR(clk))
64 clk_register_clkdev(clk, NULL, "INT33C4:00");
65
66 clk = clk_register_lpss_gate("uart1_clk", "lpss_clk", "INT33C5", NULL,
67 PRV_CLOCK_PARAMS);
68 if (!IS_ERR(clk))
69 clk_register_clkdev(clk, NULL, "INT33C5:00");
70
71 return 0; 34 return 0;
72} 35}
73 36
@@ -79,8 +42,7 @@ static struct platform_driver lpt_clk_driver = {
79 .probe = lpt_clk_probe, 42 .probe = lpt_clk_probe,
80}; 43};
81 44
82static int __init lpt_clk_init(void) 45int __init lpt_clk_init(void)
83{ 46{
84 return platform_driver_register(&lpt_clk_driver); 47 return platform_driver_register(&lpt_clk_driver);
85} 48}
86arch_initcall(lpt_clk_init);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index cbcb21e32771..a1488f58f6ca 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -205,10 +205,99 @@ depends on ARM
205source "drivers/cpufreq/Kconfig.arm" 205source "drivers/cpufreq/Kconfig.arm"
206endmenu 206endmenu
207 207
208menu "AVR32 CPU frequency scaling drivers"
209depends on AVR32
210
211config AVR32_AT32AP_CPUFREQ
212 bool "CPU frequency driver for AT32AP"
213 depends on PLATFORM_AT32AP
214 default n
215 help
216 This enables the CPU frequency driver for AT32AP processors.
217 If in doubt, say N.
218
219endmenu
220
221menu "CPUFreq processor drivers"
222depends on IA64
223
224config IA64_ACPI_CPUFREQ
225 tristate "ACPI Processor P-States driver"
226 select CPU_FREQ_TABLE
227 depends on ACPI_PROCESSOR
228 help
229 This driver adds a CPUFreq driver which utilizes the ACPI
230 Processor Performance States.
231
232 For details, take a look at <file:Documentation/cpu-freq/>.
233
234 If in doubt, say N.
235
236endmenu
237
238menu "MIPS CPUFreq processor drivers"
239depends on MIPS
240
241config LOONGSON2_CPUFREQ
242 tristate "Loongson2 CPUFreq Driver"
243 select CPU_FREQ_TABLE
244 help
245 This option adds a CPUFreq driver for loongson processors which
246 support software configurable cpu frequency.
247
248 Loongson2F and it's successors support this feature.
249
250 For details, take a look at <file:Documentation/cpu-freq/>.
251
252 If in doubt, say N.
253
254endmenu
255
208menu "PowerPC CPU frequency scaling drivers" 256menu "PowerPC CPU frequency scaling drivers"
209depends on PPC32 || PPC64 257depends on PPC32 || PPC64
210source "drivers/cpufreq/Kconfig.powerpc" 258source "drivers/cpufreq/Kconfig.powerpc"
211endmenu 259endmenu
212 260
261menu "SPARC CPU frequency scaling drivers"
262depends on SPARC64
263config SPARC_US3_CPUFREQ
264 tristate "UltraSPARC-III CPU Frequency driver"
265 select CPU_FREQ_TABLE
266 help
267 This adds the CPUFreq driver for UltraSPARC-III processors.
268
269 For details, take a look at <file:Documentation/cpu-freq>.
270
271 If in doubt, say N.
272
273config SPARC_US2E_CPUFREQ
274 tristate "UltraSPARC-IIe CPU Frequency driver"
275 select CPU_FREQ_TABLE
276 help
277 This adds the CPUFreq driver for UltraSPARC-IIe processors.
278
279 For details, take a look at <file:Documentation/cpu-freq>.
280
281 If in doubt, say N.
282endmenu
283
284menu "SH CPU Frequency scaling"
285depends on SUPERH
286config SH_CPU_FREQ
287 tristate "SuperH CPU Frequency driver"
288 select CPU_FREQ_TABLE
289 help
290 This adds the cpufreq driver for SuperH. Any CPU that supports
291 clock rate rounding through the clock framework can use this
292 driver. While it will make the kernel slightly larger, this is
293 harmless for CPUs that don't support rate rounding. The driver
294 will also generate a notice in the boot log before disabling
295 itself if the CPU in question is not capable of rate rounding.
296
297 For details, take a look at <file:Documentation/cpu-freq>.
298
299 If unsure, say N.
300endmenu
301
213endif 302endif
214endmenu 303endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 030ddf6dd3f1..f3af18b9acc5 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -2,6 +2,93 @@
2# ARM CPU Frequency scaling drivers 2# ARM CPU Frequency scaling drivers
3# 3#
4 4
5config ARM_BIG_LITTLE_CPUFREQ
6 tristate
7 depends on ARM_CPU_TOPOLOGY
8
9config ARM_DT_BL_CPUFREQ
10 tristate "Generic ARM big LITTLE CPUfreq driver probed via DT"
11 select ARM_BIG_LITTLE_CPUFREQ
12 depends on OF && HAVE_CLK
13 help
14 This enables the Generic CPUfreq driver for ARM big.LITTLE platform.
15 This gets frequency tables from DT.
16
17config ARM_EXYNOS_CPUFREQ
18 bool "SAMSUNG EXYNOS SoCs"
19 depends on ARCH_EXYNOS
20 default y
21 help
22 This adds the CPUFreq driver common part for Samsung
23 EXYNOS SoCs.
24
25 If in doubt, say N.
26
27config ARM_EXYNOS4210_CPUFREQ
28 def_bool CPU_EXYNOS4210
29 help
30 This adds the CPUFreq driver for Samsung EXYNOS4210
31 SoC (S5PV310 or S5PC210).
32
33config ARM_EXYNOS4X12_CPUFREQ
34 def_bool (SOC_EXYNOS4212 || SOC_EXYNOS4412)
35 help
36 This adds the CPUFreq driver for Samsung EXYNOS4X12
37 SoC (EXYNOS4212 or EXYNOS4412).
38
39config ARM_EXYNOS5250_CPUFREQ
40 def_bool SOC_EXYNOS5250
41 help
42 This adds the CPUFreq driver for Samsung EXYNOS5250
43 SoC.
44
45config ARM_EXYNOS5440_CPUFREQ
46 def_bool SOC_EXYNOS5440
47 depends on HAVE_CLK && PM_OPP && OF
48 help
49 This adds the CPUFreq driver for Samsung EXYNOS5440
50 SoC. The nature of exynos5440 clock controller is
51 different than previous exynos controllers so not using
52 the common exynos framework.
53
54config ARM_HIGHBANK_CPUFREQ
55 tristate "Calxeda Highbank-based"
56 depends on ARCH_HIGHBANK
57 select CPU_FREQ_TABLE
58 select GENERIC_CPUFREQ_CPU0
59 select PM_OPP
60 select REGULATOR
61
62 default m
63 help
64 This adds the CPUFreq driver for Calxeda Highbank SoC
65 based boards.
66
67 If in doubt, say N.
68
69config ARM_IMX6Q_CPUFREQ
70 tristate "Freescale i.MX6Q cpufreq support"
71 depends on SOC_IMX6Q
72 depends on REGULATOR_ANATOP
73 help
74 This adds cpufreq driver support for Freescale i.MX6Q SOC.
75
76 If in doubt, say N.
77
78config ARM_INTEGRATOR
79 tristate "CPUfreq driver for ARM Integrator CPUs"
80 depends on ARCH_INTEGRATOR
81 default y
82 help
83 This enables the CPUfreq driver for ARM Integrator CPUs.
84 If in doubt, say Y.
85
86config ARM_KIRKWOOD_CPUFREQ
87 def_bool ARCH_KIRKWOOD && OF
88 help
89 This adds the CPUFreq driver for Marvell Kirkwood
90 SoCs.
91
5config ARM_OMAP2PLUS_CPUFREQ 92config ARM_OMAP2PLUS_CPUFREQ
6 bool "TI OMAP2+" 93 bool "TI OMAP2+"
7 depends on ARCH_OMAP2PLUS 94 depends on ARCH_OMAP2PLUS
@@ -42,6 +129,7 @@ config ARM_S3C64XX_CPUFREQ
42config ARM_S5PV210_CPUFREQ 129config ARM_S5PV210_CPUFREQ
43 bool "Samsung S5PV210 and S5PC110" 130 bool "Samsung S5PV210 and S5PC110"
44 depends on CPU_S5PV210 131 depends on CPU_S5PV210
132 select CPU_FREQ_TABLE
45 default y 133 default y
46 help 134 help
47 This adds the CPUFreq driver for Samsung S5PV210 and 135 This adds the CPUFreq driver for Samsung S5PV210 and
@@ -49,48 +137,11 @@ config ARM_S5PV210_CPUFREQ
49 137
50 If in doubt, say N. 138 If in doubt, say N.
51 139
52config ARM_EXYNOS_CPUFREQ 140config ARM_SA1100_CPUFREQ
53 bool "SAMSUNG EXYNOS SoCs" 141 bool
54 depends on ARCH_EXYNOS
55 default y
56 help
57 This adds the CPUFreq driver common part for Samsung
58 EXYNOS SoCs.
59
60 If in doubt, say N.
61 142
62config ARM_EXYNOS4210_CPUFREQ 143config ARM_SA1110_CPUFREQ
63 def_bool CPU_EXYNOS4210 144 bool
64 help
65 This adds the CPUFreq driver for Samsung EXYNOS4210
66 SoC (S5PV310 or S5PC210).
67
68config ARM_EXYNOS4X12_CPUFREQ
69 def_bool (SOC_EXYNOS4212 || SOC_EXYNOS4412)
70 help
71 This adds the CPUFreq driver for Samsung EXYNOS4X12
72 SoC (EXYNOS4212 or EXYNOS4412).
73
74config ARM_EXYNOS5250_CPUFREQ
75 def_bool SOC_EXYNOS5250
76 help
77 This adds the CPUFreq driver for Samsung EXYNOS5250
78 SoC.
79
80config ARM_KIRKWOOD_CPUFREQ
81 def_bool ARCH_KIRKWOOD && OF
82 help
83 This adds the CPUFreq driver for Marvell Kirkwood
84 SoCs.
85
86config ARM_IMX6Q_CPUFREQ
87 tristate "Freescale i.MX6Q cpufreq support"
88 depends on SOC_IMX6Q
89 depends on REGULATOR_ANATOP
90 help
91 This adds cpufreq driver support for Freescale i.MX6Q SOC.
92
93 If in doubt, say N.
94 145
95config ARM_SPEAR_CPUFREQ 146config ARM_SPEAR_CPUFREQ
96 bool "SPEAr CPUFreq support" 147 bool "SPEAr CPUFreq support"
@@ -98,18 +149,3 @@ config ARM_SPEAR_CPUFREQ
98 default y 149 default y
99 help 150 help
100 This adds the CPUFreq driver support for SPEAr SOCs. 151 This adds the CPUFreq driver support for SPEAr SOCs.
101
102config ARM_HIGHBANK_CPUFREQ
103 tristate "Calxeda Highbank-based"
104 depends on ARCH_HIGHBANK
105 select CPU_FREQ_TABLE
106 select GENERIC_CPUFREQ_CPU0
107 select PM_OPP
108 select REGULATOR
109
110 default m
111 help
112 This adds the CPUFreq driver for Calxeda Highbank SoC
113 based boards.
114
115 If in doubt, say N.
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index e76992f79683..9c926ca0d718 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -1,3 +1,21 @@
1config CPU_FREQ_CBE
2 tristate "CBE frequency scaling"
3 depends on CBE_RAS && PPC_CELL
4 default m
5 help
6 This adds the cpufreq driver for Cell BE processors.
7 For details, take a look at <file:Documentation/cpu-freq/>.
8 If you don't have such processor, say N
9
10config CPU_FREQ_CBE_PMI
11 bool "CBE frequency scaling using PMI interface"
12 depends on CPU_FREQ_CBE
13 default n
14 help
15 Select this, if you want to use the PMI interface to switch
16 frequencies. Using PMI, the processor will not only be able to run at
17 lower speed, but also at lower core voltage.
18
1config CPU_FREQ_MAPLE 19config CPU_FREQ_MAPLE
2 bool "Support for Maple 970FX Evaluation Board" 20 bool "Support for Maple 970FX Evaluation Board"
3 depends on PPC_MAPLE 21 depends on PPC_MAPLE
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index d7dc0ed6adb0..2b8a8c374548 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -129,6 +129,23 @@ config X86_POWERNOW_K8
129 129
130 For details, take a look at <file:Documentation/cpu-freq/>. 130 For details, take a look at <file:Documentation/cpu-freq/>.
131 131
132config X86_AMD_FREQ_SENSITIVITY
133 tristate "AMD frequency sensitivity feedback powersave bias"
134 depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD
135 help
136 This adds AMD-specific powersave bias function to the ondemand
137 governor, which allows it to make more power-conscious frequency
138 change decisions based on feedback from hardware (availble on AMD
139 Family 16h and above).
140
141 Hardware feedback tells software how "sensitive" to frequency changes
142 the CPUs' workloads are. CPU-bound workloads will be more sensitive
143 -- they will perform better as frequency increases. Memory/IO-bound
144 workloads will be less sensitive -- they will not necessarily perform
145 better as frequency increases.
146
147 If in doubt, say N.
148
132config X86_GX_SUSPMOD 149config X86_GX_SUSPMOD
133 tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" 150 tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
134 depends on X86_32 && PCI 151 depends on X86_32 && PCI
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 863fd1865d45..315b9231feb1 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -41,23 +41,54 @@ obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
41obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o 41obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
42obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o 42obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
43obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o 43obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o
44obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
44 45
45################################################################################## 46##################################################################################
46# ARM SoC drivers 47# ARM SoC drivers
48obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
49# big LITTLE per platform glues. Keep DT_BL_CPUFREQ as the last entry in all big
50# LITTLE drivers, so that it is probed last.
51obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
52
53obj-$(CONFIG_ARCH_DAVINCI_DA850) += davinci-cpufreq.o
47obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o 54obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
48obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
49obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
50obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
51obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o 55obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o
52obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o 56obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
53obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o 57obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
54obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o 58obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
59obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
60obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
61obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
62obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
55obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o 63obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
56obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o 64obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
65obj-$(CONFIG_PXA25x) += pxa2xx-cpufreq.o
66obj-$(CONFIG_PXA27x) += pxa2xx-cpufreq.o
67obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
68obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
69obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
70obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
71obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
72obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
57obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o 73obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
58obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o 74obj-$(CONFIG_ARCH_TEGRA) += tegra-cpufreq.o
59obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
60 75
61################################################################################## 76##################################################################################
62# PowerPC platform drivers 77# PowerPC platform drivers
78obj-$(CONFIG_CPU_FREQ_CBE) += ppc-cbe-cpufreq.o
79ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o
80obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o
63obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o 81obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o
82
83##################################################################################
84# Other platform drivers
85obj-$(CONFIG_AVR32_AT32AP_CPUFREQ) += at32ap-cpufreq.o
86obj-$(CONFIG_BLACKFIN) += blackfin-cpufreq.o
87obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o
88obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o
89obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
90obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
91obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
92obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
93obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
94obj-$(CONFIG_UNICORE32) += unicore2-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 57a8774f0b4e..11b8b4b54ceb 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -423,7 +423,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
423 struct drv_cmd cmd; 423 struct drv_cmd cmd;
424 unsigned int next_state = 0; /* Index into freq_table */ 424 unsigned int next_state = 0; /* Index into freq_table */
425 unsigned int next_perf_state = 0; /* Index into perf table */ 425 unsigned int next_perf_state = 0; /* Index into perf table */
426 unsigned int i;
427 int result = 0; 426 int result = 0;
428 427
429 pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); 428 pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
@@ -486,10 +485,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
486 485
487 freqs.old = perf->states[perf->state].core_frequency * 1000; 486 freqs.old = perf->states[perf->state].core_frequency * 1000;
488 freqs.new = data->freq_table[next_state].frequency; 487 freqs.new = data->freq_table[next_state].frequency;
489 for_each_cpu(i, policy->cpus) { 488 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
490 freqs.cpu = i;
491 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
492 }
493 489
494 drv_write(&cmd); 490 drv_write(&cmd);
495 491
@@ -502,10 +498,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
502 } 498 }
503 } 499 }
504 500
505 for_each_cpu(i, policy->cpus) { 501 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
506 freqs.cpu = i;
507 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
508 }
509 perf->state = next_perf_state; 502 perf->state = next_perf_state;
510 503
511out: 504out:
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
new file mode 100644
index 000000000000..f6b79ab0070b
--- /dev/null
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -0,0 +1,148 @@
1/*
2 * amd_freq_sensitivity.c: AMD frequency sensitivity feedback powersave bias
3 * for the ondemand governor.
4 *
5 * Copyright (C) 2013 Advanced Micro Devices, Inc.
6 *
7 * Author: Jacob Shin <jacob.shin@amd.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/percpu-defs.h>
18#include <linux/init.h>
19#include <linux/mod_devicetable.h>
20
21#include <asm/msr.h>
22#include <asm/cpufeature.h>
23
24#include "cpufreq_governor.h"
25
26#define MSR_AMD64_FREQ_SENSITIVITY_ACTUAL 0xc0010080
27#define MSR_AMD64_FREQ_SENSITIVITY_REFERENCE 0xc0010081
28#define CLASS_CODE_SHIFT 56
29#define POWERSAVE_BIAS_MAX 1000
30#define POWERSAVE_BIAS_DEF 400
31
32struct cpu_data_t {
33 u64 actual;
34 u64 reference;
35 unsigned int freq_prev;
36};
37
38static DEFINE_PER_CPU(struct cpu_data_t, cpu_data);
39
40static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
41 unsigned int freq_next,
42 unsigned int relation)
43{
44 int sensitivity;
45 long d_actual, d_reference;
46 struct msr actual, reference;
47 struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu);
48 struct dbs_data *od_data = policy->governor_data;
49 struct od_dbs_tuners *od_tuners = od_data->tuners;
50 struct od_cpu_dbs_info_s *od_info =
51 od_data->cdata->get_cpu_dbs_info_s(policy->cpu);
52
53 if (!od_info->freq_table)
54 return freq_next;
55
56 rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL,
57 &actual.l, &actual.h);
58 rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_REFERENCE,
59 &reference.l, &reference.h);
60 actual.h &= 0x00ffffff;
61 reference.h &= 0x00ffffff;
62
63 /* counter wrapped around, so stay on current frequency */
64 if (actual.q < data->actual || reference.q < data->reference) {
65 freq_next = policy->cur;
66 goto out;
67 }
68
69 d_actual = actual.q - data->actual;
70 d_reference = reference.q - data->reference;
71
72 /* divide by 0, so stay on current frequency as well */
73 if (d_reference == 0) {
74 freq_next = policy->cur;
75 goto out;
76 }
77
78 sensitivity = POWERSAVE_BIAS_MAX -
79 (POWERSAVE_BIAS_MAX * (d_reference - d_actual) / d_reference);
80
81 clamp(sensitivity, 0, POWERSAVE_BIAS_MAX);
82
83 /* this workload is not CPU bound, so choose a lower freq */
84 if (sensitivity < od_tuners->powersave_bias) {
85 if (data->freq_prev == policy->cur)
86 freq_next = policy->cur;
87
88 if (freq_next > policy->cur)
89 freq_next = policy->cur;
90 else if (freq_next < policy->cur)
91 freq_next = policy->min;
92 else {
93 unsigned int index;
94
95 cpufreq_frequency_table_target(policy,
96 od_info->freq_table, policy->cur - 1,
97 CPUFREQ_RELATION_H, &index);
98 freq_next = od_info->freq_table[index].frequency;
99 }
100
101 data->freq_prev = freq_next;
102 } else
103 data->freq_prev = 0;
104
105out:
106 data->actual = actual.q;
107 data->reference = reference.q;
108 return freq_next;
109}
110
111static int __init amd_freq_sensitivity_init(void)
112{
113 u64 val;
114
115 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
116 return -ENODEV;
117
118 if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK))
119 return -ENODEV;
120
121 if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
122 return -ENODEV;
123
124 if (!(val >> CLASS_CODE_SHIFT))
125 return -ENODEV;
126
127 od_register_powersave_bias_handler(amd_powersave_bias_target,
128 POWERSAVE_BIAS_DEF);
129 return 0;
130}
131late_initcall(amd_freq_sensitivity_init);
132
133static void __exit amd_freq_sensitivity_exit(void)
134{
135 od_unregister_powersave_bias_handler();
136}
137module_exit(amd_freq_sensitivity_exit);
138
139static const struct x86_cpu_id amd_freq_sensitivity_ids[] = {
140 X86_FEATURE_MATCH(X86_FEATURE_PROC_FEEDBACK),
141 {}
142};
143MODULE_DEVICE_TABLE(x86cpu, amd_freq_sensitivity_ids);
144
145MODULE_AUTHOR("Jacob Shin <jacob.shin@amd.com>");
146MODULE_DESCRIPTION("AMD frequency sensitivity feedback powersave bias for "
147 "the ondemand governor.");
148MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
new file mode 100644
index 000000000000..dbdf677d2f36
--- /dev/null
+++ b/drivers/cpufreq/arm_big_little.c
@@ -0,0 +1,278 @@
1/*
2 * ARM big.LITTLE Platforms CPUFreq support
3 *
4 * Copyright (C) 2013 ARM Ltd.
5 * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
6 *
7 * Copyright (C) 2013 Linaro.
8 * Viresh Kumar <viresh.kumar@linaro.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
15 * kind, whether express or implied; without even the implied warranty
16 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/clk.h>
23#include <linux/cpu.h>
24#include <linux/cpufreq.h>
25#include <linux/cpumask.h>
26#include <linux/export.h>
27#include <linux/of_platform.h>
28#include <linux/opp.h>
29#include <linux/slab.h>
30#include <linux/topology.h>
31#include <linux/types.h>
32
33#include "arm_big_little.h"
34
35/* Currently we support only two clusters */
36#define MAX_CLUSTERS 2
37
38static struct cpufreq_arm_bL_ops *arm_bL_ops;
39static struct clk *clk[MAX_CLUSTERS];
40static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
41static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
42
43static int cpu_to_cluster(int cpu)
44{
45 return topology_physical_package_id(cpu);
46}
47
48static unsigned int bL_cpufreq_get(unsigned int cpu)
49{
50 u32 cur_cluster = cpu_to_cluster(cpu);
51
52 return clk_get_rate(clk[cur_cluster]) / 1000;
53}
54
55/* Validate policy frequency range */
56static int bL_cpufreq_verify_policy(struct cpufreq_policy *policy)
57{
58 u32 cur_cluster = cpu_to_cluster(policy->cpu);
59
60 return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
61}
62
63/* Set clock frequency */
64static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
65 unsigned int target_freq, unsigned int relation)
66{
67 struct cpufreq_freqs freqs;
68 u32 cpu = policy->cpu, freq_tab_idx, cur_cluster;
69 int ret = 0;
70
71 cur_cluster = cpu_to_cluster(policy->cpu);
72
73 freqs.old = bL_cpufreq_get(policy->cpu);
74
75 /* Determine valid target frequency using freq_table */
76 cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
77 target_freq, relation, &freq_tab_idx);
78 freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency;
79
80 pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n",
81 __func__, cpu, cur_cluster, freqs.old, target_freq,
82 freqs.new);
83
84 if (freqs.old == freqs.new)
85 return 0;
86
87 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
88
89 ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000);
90 if (ret) {
91 pr_err("clk_set_rate failed: %d\n", ret);
92 return ret;
93 }
94
95 policy->cur = freqs.new;
96
97 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
98
99 return ret;
100}
101
102static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
103{
104 u32 cluster = cpu_to_cluster(cpu_dev->id);
105
106 if (!atomic_dec_return(&cluster_usage[cluster])) {
107 clk_put(clk[cluster]);
108 opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
109 dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
110 }
111}
112
113static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
114{
115 u32 cluster = cpu_to_cluster(cpu_dev->id);
116 char name[14] = "cpu-cluster.";
117 int ret;
118
119 if (atomic_inc_return(&cluster_usage[cluster]) != 1)
120 return 0;
121
122 ret = arm_bL_ops->init_opp_table(cpu_dev);
123 if (ret) {
124 dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
125 __func__, cpu_dev->id, ret);
126 goto atomic_dec;
127 }
128
129 ret = opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
130 if (ret) {
131 dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
132 __func__, cpu_dev->id, ret);
133 goto atomic_dec;
134 }
135
136 name[12] = cluster + '0';
137 clk[cluster] = clk_get_sys(name, NULL);
138 if (!IS_ERR(clk[cluster])) {
139 dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
140 __func__, clk[cluster], freq_table[cluster],
141 cluster);
142 return 0;
143 }
144
145 dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
146 __func__, cpu_dev->id, cluster);
147 ret = PTR_ERR(clk[cluster]);
148 opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
149
150atomic_dec:
151 atomic_dec(&cluster_usage[cluster]);
152 dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
153 cluster);
154 return ret;
155}
156
157/* Per-CPU initialization */
158static int bL_cpufreq_init(struct cpufreq_policy *policy)
159{
160 u32 cur_cluster = cpu_to_cluster(policy->cpu);
161 struct device *cpu_dev;
162 int ret;
163
164 cpu_dev = get_cpu_device(policy->cpu);
165 if (!cpu_dev) {
166 pr_err("%s: failed to get cpu%d device\n", __func__,
167 policy->cpu);
168 return -ENODEV;
169 }
170
171 ret = get_cluster_clk_and_freq_table(cpu_dev);
172 if (ret)
173 return ret;
174
175 ret = cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
176 if (ret) {
177 dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
178 policy->cpu, cur_cluster);
179 put_cluster_clk_and_freq_table(cpu_dev);
180 return ret;
181 }
182
183 cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
184
185 if (arm_bL_ops->get_transition_latency)
186 policy->cpuinfo.transition_latency =
187 arm_bL_ops->get_transition_latency(cpu_dev);
188 else
189 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
190
191 policy->cur = bL_cpufreq_get(policy->cpu);
192
193 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
194
195 dev_info(cpu_dev, "CPU %d initialized\n", policy->cpu);
196 return 0;
197}
198
199static int bL_cpufreq_exit(struct cpufreq_policy *policy)
200{
201 struct device *cpu_dev;
202
203 cpu_dev = get_cpu_device(policy->cpu);
204 if (!cpu_dev) {
205 pr_err("%s: failed to get cpu%d device\n", __func__,
206 policy->cpu);
207 return -ENODEV;
208 }
209
210 put_cluster_clk_and_freq_table(cpu_dev);
211 dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
212
213 return 0;
214}
215
216/* Export freq_table to sysfs */
217static struct freq_attr *bL_cpufreq_attr[] = {
218 &cpufreq_freq_attr_scaling_available_freqs,
219 NULL,
220};
221
222static struct cpufreq_driver bL_cpufreq_driver = {
223 .name = "arm-big-little",
224 .flags = CPUFREQ_STICKY,
225 .verify = bL_cpufreq_verify_policy,
226 .target = bL_cpufreq_set_target,
227 .get = bL_cpufreq_get,
228 .init = bL_cpufreq_init,
229 .exit = bL_cpufreq_exit,
230 .have_governor_per_policy = true,
231 .attr = bL_cpufreq_attr,
232};
233
234int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
235{
236 int ret;
237
238 if (arm_bL_ops) {
239 pr_debug("%s: Already registered: %s, exiting\n", __func__,
240 arm_bL_ops->name);
241 return -EBUSY;
242 }
243
244 if (!ops || !strlen(ops->name) || !ops->init_opp_table) {
245 pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__);
246 return -ENODEV;
247 }
248
249 arm_bL_ops = ops;
250
251 ret = cpufreq_register_driver(&bL_cpufreq_driver);
252 if (ret) {
253 pr_info("%s: Failed registering platform driver: %s, err: %d\n",
254 __func__, ops->name, ret);
255 arm_bL_ops = NULL;
256 } else {
257 pr_info("%s: Registered platform driver: %s\n", __func__,
258 ops->name);
259 }
260
261 return ret;
262}
263EXPORT_SYMBOL_GPL(bL_cpufreq_register);
264
265void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
266{
267 if (arm_bL_ops != ops) {
268 pr_err("%s: Registered with: %s, can't unregister, exiting\n",
269 __func__, arm_bL_ops->name);
270 return;
271 }
272
273 cpufreq_unregister_driver(&bL_cpufreq_driver);
274 pr_info("%s: Un-registered platform driver: %s\n", __func__,
275 arm_bL_ops->name);
276 arm_bL_ops = NULL;
277}
278EXPORT_SYMBOL_GPL(bL_cpufreq_unregister);
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
new file mode 100644
index 000000000000..70f18fc12d4a
--- /dev/null
+++ b/drivers/cpufreq/arm_big_little.h
@@ -0,0 +1,40 @@
1/*
2 * ARM big.LITTLE platform's CPUFreq header file
3 *
4 * Copyright (C) 2013 ARM Ltd.
5 * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
6 *
7 * Copyright (C) 2013 Linaro.
8 * Viresh Kumar <viresh.kumar@linaro.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
15 * kind, whether express or implied; without even the implied warranty
16 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19#ifndef CPUFREQ_ARM_BIG_LITTLE_H
20#define CPUFREQ_ARM_BIG_LITTLE_H
21
22#include <linux/cpufreq.h>
23#include <linux/device.h>
24#include <linux/types.h>
25
26struct cpufreq_arm_bL_ops {
27 char name[CPUFREQ_NAME_LEN];
28 int (*get_transition_latency)(struct device *cpu_dev);
29
30 /*
31 * This must set opp table for cpu_dev in a similar way as done by
32 * of_init_opp_table().
33 */
34 int (*init_opp_table)(struct device *cpu_dev);
35};
36
37int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
38void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
39
40#endif /* CPUFREQ_ARM_BIG_LITTLE_H */
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
new file mode 100644
index 000000000000..44be3115375c
--- /dev/null
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -0,0 +1,107 @@
1/*
2 * Generic big.LITTLE CPUFreq Interface driver
3 *
4 * It provides necessary ops to arm_big_little cpufreq driver and gets
5 * Frequency information from Device Tree. Freq table in DT must be in KHz.
6 *
7 * Copyright (C) 2013 Linaro.
8 * Viresh Kumar <viresh.kumar@linaro.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
15 * kind, whether express or implied; without even the implied warranty
16 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/cpufreq.h>
23#include <linux/device.h>
24#include <linux/export.h>
25#include <linux/module.h>
26#include <linux/of.h>
27#include <linux/opp.h>
28#include <linux/slab.h>
29#include <linux/types.h>
30#include "arm_big_little.h"
31
32static int dt_init_opp_table(struct device *cpu_dev)
33{
34 struct device_node *np, *parent;
35 int count = 0, ret;
36
37 parent = of_find_node_by_path("/cpus");
38 if (!parent) {
39 pr_err("failed to find OF /cpus\n");
40 return -ENOENT;
41 }
42
43 for_each_child_of_node(parent, np) {
44 if (count++ != cpu_dev->id)
45 continue;
46 if (!of_get_property(np, "operating-points", NULL)) {
47 ret = -ENODATA;
48 } else {
49 cpu_dev->of_node = np;
50 ret = of_init_opp_table(cpu_dev);
51 }
52 of_node_put(np);
53 of_node_put(parent);
54
55 return ret;
56 }
57
58 return -ENODEV;
59}
60
61static int dt_get_transition_latency(struct device *cpu_dev)
62{
63 struct device_node *np, *parent;
64 u32 transition_latency = CPUFREQ_ETERNAL;
65 int count = 0;
66
67 parent = of_find_node_by_path("/cpus");
68 if (!parent) {
69 pr_err("failed to find OF /cpus\n");
70 return -ENOENT;
71 }
72
73 for_each_child_of_node(parent, np) {
74 if (count++ != cpu_dev->id)
75 continue;
76
77 of_property_read_u32(np, "clock-latency", &transition_latency);
78 of_node_put(np);
79 of_node_put(parent);
80
81 return 0;
82 }
83
84 return -ENODEV;
85}
86
87static struct cpufreq_arm_bL_ops dt_bL_ops = {
88 .name = "dt-bl",
89 .get_transition_latency = dt_get_transition_latency,
90 .init_opp_table = dt_init_opp_table,
91};
92
93static int generic_bL_init(void)
94{
95 return bL_cpufreq_register(&dt_bL_ops);
96}
97module_init(generic_bL_init);
98
99static void generic_bL_exit(void)
100{
101 return bL_cpufreq_unregister(&dt_bL_ops);
102}
103module_exit(generic_bL_exit);
104
105MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
106MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT");
107MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c
new file mode 100644
index 000000000000..654488723cb5
--- /dev/null
+++ b/drivers/cpufreq/at32ap-cpufreq.c
@@ -0,0 +1,123 @@
1/*
2 * Copyright (C) 2004-2007 Atmel Corporation
3 *
4 * Based on MIPS implementation arch/mips/kernel/time.c
5 * Copyright 2001 MontaVista Software Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12/*#define DEBUG*/
13
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/init.h>
17#include <linux/cpufreq.h>
18#include <linux/io.h>
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/export.h>
22
23static struct clk *cpuclk;
24
25static int at32_verify_speed(struct cpufreq_policy *policy)
26{
27 if (policy->cpu != 0)
28 return -EINVAL;
29
30 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
31 policy->cpuinfo.max_freq);
32 return 0;
33}
34
35static unsigned int at32_get_speed(unsigned int cpu)
36{
37 /* No SMP support */
38 if (cpu)
39 return 0;
40 return (unsigned int)((clk_get_rate(cpuclk) + 500) / 1000);
41}
42
43static unsigned int ref_freq;
44static unsigned long loops_per_jiffy_ref;
45
46static int at32_set_target(struct cpufreq_policy *policy,
47 unsigned int target_freq,
48 unsigned int relation)
49{
50 struct cpufreq_freqs freqs;
51 long freq;
52
53 /* Convert target_freq from kHz to Hz */
54 freq = clk_round_rate(cpuclk, target_freq * 1000);
55
56 /* Check if policy->min <= new_freq <= policy->max */
57 if(freq < (policy->min * 1000) || freq > (policy->max * 1000))
58 return -EINVAL;
59
60 pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
61
62 freqs.old = at32_get_speed(0);
63 freqs.new = (freq + 500) / 1000;
64 freqs.flags = 0;
65
66 if (!ref_freq) {
67 ref_freq = freqs.old;
68 loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
69 }
70
71 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
72 if (freqs.old < freqs.new)
73 boot_cpu_data.loops_per_jiffy = cpufreq_scale(
74 loops_per_jiffy_ref, ref_freq, freqs.new);
75 clk_set_rate(cpuclk, freq);
76 if (freqs.new < freqs.old)
77 boot_cpu_data.loops_per_jiffy = cpufreq_scale(
78 loops_per_jiffy_ref, ref_freq, freqs.new);
79 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
80
81 pr_debug("cpufreq: set frequency %lu Hz\n", freq);
82
83 return 0;
84}
85
86static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
87{
88 if (policy->cpu != 0)
89 return -EINVAL;
90
91 cpuclk = clk_get(NULL, "cpu");
92 if (IS_ERR(cpuclk)) {
93 pr_debug("cpufreq: could not get CPU clk\n");
94 return PTR_ERR(cpuclk);
95 }
96
97 policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
98 policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
99 policy->cpuinfo.transition_latency = 0;
100 policy->cur = at32_get_speed(0);
101 policy->min = policy->cpuinfo.min_freq;
102 policy->max = policy->cpuinfo.max_freq;
103
104 printk("cpufreq: AT32AP CPU frequency driver\n");
105
106 return 0;
107}
108
109static struct cpufreq_driver at32_driver = {
110 .name = "at32ap",
111 .owner = THIS_MODULE,
112 .init = at32_cpufreq_driver_init,
113 .verify = at32_verify_speed,
114 .target = at32_set_target,
115 .get = at32_get_speed,
116 .flags = CPUFREQ_STICKY,
117};
118
119static int __init at32_cpufreq_init(void)
120{
121 return cpufreq_register_driver(&at32_driver);
122}
123late_initcall(at32_cpufreq_init);
diff --git a/drivers/cpufreq/blackfin-cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c
new file mode 100644
index 000000000000..995511e80bef
--- /dev/null
+++ b/drivers/cpufreq/blackfin-cpufreq.c
@@ -0,0 +1,247 @@
1/*
2 * Blackfin core clock scaling
3 *
4 * Copyright 2008-2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/clk.h>
14#include <linux/cpufreq.h>
15#include <linux/fs.h>
16#include <linux/delay.h>
17#include <asm/blackfin.h>
18#include <asm/time.h>
19#include <asm/dpmc.h>
20
21
22/* this is the table of CCLK frequencies, in Hz */
23/* .index is the entry in the auxiliary dpm_state_table[] */
24static struct cpufreq_frequency_table bfin_freq_table[] = {
25 {
26 .frequency = CPUFREQ_TABLE_END,
27 .index = 0,
28 },
29 {
30 .frequency = CPUFREQ_TABLE_END,
31 .index = 1,
32 },
33 {
34 .frequency = CPUFREQ_TABLE_END,
35 .index = 2,
36 },
37 {
38 .frequency = CPUFREQ_TABLE_END,
39 .index = 0,
40 },
41};
42
43static struct bfin_dpm_state {
44 unsigned int csel; /* system clock divider */
45 unsigned int tscale; /* change the divider on the core timer interrupt */
46} dpm_state_table[3];
47
48#if defined(CONFIG_CYCLES_CLOCKSOURCE)
49/*
50 * normalized to maximum frequency offset for CYCLES,
51 * used in time-ts cycles clock source, but could be used
52 * somewhere also.
53 */
54unsigned long long __bfin_cycles_off;
55unsigned int __bfin_cycles_mod;
56#endif
57
58/**************************************************************************/
59static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk)
60{
61
62 unsigned long csel, min_cclk;
63 int index;
64
65 /* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */
66#if ANOMALY_05000273 || ANOMALY_05000274 || \
67 (!(defined(CONFIG_BF54x) || defined(CONFIG_BF60x)) \
68 && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
69 min_cclk = sclk * 2;
70#else
71 min_cclk = sclk;
72#endif
73
74#ifndef CONFIG_BF60x
75 csel = ((bfin_read_PLL_DIV() & CSEL) >> 4);
76#else
77 csel = bfin_read32(CGU0_DIV) & 0x1F;
78#endif
79
80 for (index = 0; (cclk >> index) >= min_cclk && csel <= 3 && index < 3; index++, csel++) {
81 bfin_freq_table[index].frequency = cclk >> index;
82#ifndef CONFIG_BF60x
83 dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
84#else
85 dpm_state_table[index].csel = csel;
86#endif
87 dpm_state_table[index].tscale = (TIME_SCALE >> index) - 1;
88
89 pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
90 bfin_freq_table[index].frequency,
91 dpm_state_table[index].csel,
92 dpm_state_table[index].tscale);
93 }
94 return;
95}
96
97static void bfin_adjust_core_timer(void *info)
98{
99 unsigned int tscale;
100 unsigned int index = *(unsigned int *)info;
101
102 /* we have to adjust the core timer, because it is using cclk */
103 tscale = dpm_state_table[index].tscale;
104 bfin_write_TSCALE(tscale);
105 return;
106}
107
108static unsigned int bfin_getfreq_khz(unsigned int cpu)
109{
110 /* Both CoreA/B have the same core clock */
111 return get_cclk() / 1000;
112}
113
114#ifdef CONFIG_BF60x
115unsigned long cpu_set_cclk(int cpu, unsigned long new)
116{
117 struct clk *clk;
118 int ret;
119
120 clk = clk_get(NULL, "CCLK");
121 if (IS_ERR(clk))
122 return -ENODEV;
123
124 ret = clk_set_rate(clk, new);
125 clk_put(clk);
126 return ret;
127}
128#endif
129
130static int bfin_target(struct cpufreq_policy *policy,
131 unsigned int target_freq, unsigned int relation)
132{
133#ifndef CONFIG_BF60x
134 unsigned int plldiv;
135#endif
136 unsigned int index;
137 unsigned long cclk_hz;
138 struct cpufreq_freqs freqs;
139 static unsigned long lpj_ref;
140 static unsigned int lpj_ref_freq;
141 int ret = 0;
142
143#if defined(CONFIG_CYCLES_CLOCKSOURCE)
144 cycles_t cycles;
145#endif
146
147 if (cpufreq_frequency_table_target(policy, bfin_freq_table, target_freq,
148 relation, &index))
149 return -EINVAL;
150
151 cclk_hz = bfin_freq_table[index].frequency;
152
153 freqs.old = bfin_getfreq_khz(0);
154 freqs.new = cclk_hz;
155
156 pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
157 cclk_hz, target_freq, freqs.old);
158
159 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
160#ifndef CONFIG_BF60x
161 plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel;
162 bfin_write_PLL_DIV(plldiv);
163#else
164 ret = cpu_set_cclk(policy->cpu, freqs.new * 1000);
165 if (ret != 0) {
166 WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret);
167 return ret;
168 }
169#endif
170 on_each_cpu(bfin_adjust_core_timer, &index, 1);
171#if defined(CONFIG_CYCLES_CLOCKSOURCE)
172 cycles = get_cycles();
173 SSYNC();
174 cycles += 10; /* ~10 cycles we lose after get_cycles() */
175 __bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index);
176 __bfin_cycles_mod = index;
177#endif
178 if (!lpj_ref_freq) {
179 lpj_ref = loops_per_jiffy;
180 lpj_ref_freq = freqs.old;
181 }
182 if (freqs.new != freqs.old) {
183 loops_per_jiffy = cpufreq_scale(lpj_ref,
184 lpj_ref_freq, freqs.new);
185 }
186
187 /* TODO: just test case for cycles clock source, remove later */
188 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
189
190 pr_debug("cpufreq: done\n");
191 return ret;
192}
193
194static int bfin_verify_speed(struct cpufreq_policy *policy)
195{
196 return cpufreq_frequency_table_verify(policy, bfin_freq_table);
197}
198
199static int __bfin_cpu_init(struct cpufreq_policy *policy)
200{
201
202 unsigned long cclk, sclk;
203
204 cclk = get_cclk() / 1000;
205 sclk = get_sclk() / 1000;
206
207 if (policy->cpu == CPUFREQ_CPU)
208 bfin_init_tables(cclk, sclk);
209
210 policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
211
212 policy->cur = cclk;
213 cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
214 return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
215}
216
217static struct freq_attr *bfin_freq_attr[] = {
218 &cpufreq_freq_attr_scaling_available_freqs,
219 NULL,
220};
221
222static struct cpufreq_driver bfin_driver = {
223 .verify = bfin_verify_speed,
224 .target = bfin_target,
225 .get = bfin_getfreq_khz,
226 .init = __bfin_cpu_init,
227 .name = "bfin cpufreq",
228 .owner = THIS_MODULE,
229 .attr = bfin_freq_attr,
230};
231
232static int __init bfin_cpu_init(void)
233{
234 return cpufreq_register_driver(&bfin_driver);
235}
236
237static void __exit bfin_cpu_exit(void)
238{
239 cpufreq_unregister_driver(&bfin_driver);
240}
241
242MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
243MODULE_DESCRIPTION("cpufreq driver for Blackfin");
244MODULE_LICENSE("GPL");
245
246module_init(bfin_cpu_init);
247module_exit(bfin_cpu_exit);
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 37d23a0f8c56..3ab8294eab04 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -44,8 +44,9 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
44{ 44{
45 struct cpufreq_freqs freqs; 45 struct cpufreq_freqs freqs;
46 struct opp *opp; 46 struct opp *opp;
47 unsigned long freq_Hz, volt = 0, volt_old = 0, tol = 0; 47 unsigned long volt = 0, volt_old = 0, tol = 0;
48 unsigned int index, cpu; 48 long freq_Hz;
49 unsigned int index;
49 int ret; 50 int ret;
50 51
51 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, 52 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
@@ -65,10 +66,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
65 if (freqs.old == freqs.new) 66 if (freqs.old == freqs.new)
66 return 0; 67 return 0;
67 68
68 for_each_online_cpu(cpu) { 69 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
69 freqs.cpu = cpu;
70 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
71 }
72 70
73 if (cpu_reg) { 71 if (cpu_reg) {
74 rcu_read_lock(); 72 rcu_read_lock();
@@ -76,7 +74,9 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
76 if (IS_ERR(opp)) { 74 if (IS_ERR(opp)) {
77 rcu_read_unlock(); 75 rcu_read_unlock();
78 pr_err("failed to find OPP for %ld\n", freq_Hz); 76 pr_err("failed to find OPP for %ld\n", freq_Hz);
79 return PTR_ERR(opp); 77 freqs.new = freqs.old;
78 ret = PTR_ERR(opp);
79 goto post_notify;
80 } 80 }
81 volt = opp_get_voltage(opp); 81 volt = opp_get_voltage(opp);
82 rcu_read_unlock(); 82 rcu_read_unlock();
@@ -94,7 +94,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
94 if (ret) { 94 if (ret) {
95 pr_err("failed to scale voltage up: %d\n", ret); 95 pr_err("failed to scale voltage up: %d\n", ret);
96 freqs.new = freqs.old; 96 freqs.new = freqs.old;
97 return ret; 97 goto post_notify;
98 } 98 }
99 } 99 }
100 100
@@ -103,7 +103,8 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
103 pr_err("failed to set clock rate: %d\n", ret); 103 pr_err("failed to set clock rate: %d\n", ret);
104 if (cpu_reg) 104 if (cpu_reg)
105 regulator_set_voltage_tol(cpu_reg, volt_old, tol); 105 regulator_set_voltage_tol(cpu_reg, volt_old, tol);
106 return ret; 106 freqs.new = freqs.old;
107 goto post_notify;
107 } 108 }
108 109
109 /* scaling down? scale voltage after frequency */ 110 /* scaling down? scale voltage after frequency */
@@ -113,25 +114,19 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
113 pr_err("failed to scale voltage down: %d\n", ret); 114 pr_err("failed to scale voltage down: %d\n", ret);
114 clk_set_rate(cpu_clk, freqs.old * 1000); 115 clk_set_rate(cpu_clk, freqs.old * 1000);
115 freqs.new = freqs.old; 116 freqs.new = freqs.old;
116 return ret;
117 } 117 }
118 } 118 }
119 119
120 for_each_online_cpu(cpu) { 120post_notify:
121 freqs.cpu = cpu; 121 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
122 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
123 }
124 122
125 return 0; 123 return ret;
126} 124}
127 125
128static int cpu0_cpufreq_init(struct cpufreq_policy *policy) 126static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
129{ 127{
130 int ret; 128 int ret;
131 129
132 if (policy->cpu != 0)
133 return -EINVAL;
134
135 ret = cpufreq_frequency_table_cpuinfo(policy, freq_table); 130 ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
136 if (ret) { 131 if (ret) {
137 pr_err("invalid frequency table: %d\n", ret); 132 pr_err("invalid frequency table: %d\n", ret);
@@ -262,6 +257,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
262 } 257 }
263 258
264 of_node_put(np); 259 of_node_put(np);
260 of_node_put(parent);
265 return 0; 261 return 0;
266 262
267out_free_table: 263out_free_table:
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index 13d311ee08b3..af1542d41440 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -263,7 +263,6 @@ static int nforce2_target(struct cpufreq_policy *policy,
263 263
264 freqs.old = nforce2_get(policy->cpu); 264 freqs.old = nforce2_get(policy->cpu);
265 freqs.new = target_fsb * fid * 100; 265 freqs.new = target_fsb * fid * 100;
266 freqs.cpu = 0; /* Only one CPU on nForce2 platforms */
267 266
268 if (freqs.old == freqs.new) 267 if (freqs.old == freqs.new)
269 return 0; 268 return 0;
@@ -271,7 +270,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
271 pr_debug("Old CPU frequency %d kHz, new %d kHz\n", 270 pr_debug("Old CPU frequency %d kHz, new %d kHz\n",
272 freqs.old, freqs.new); 271 freqs.old, freqs.new);
273 272
274 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 273 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
275 274
276 /* Disable IRQs */ 275 /* Disable IRQs */
277 /* local_irq_save(flags); */ 276 /* local_irq_save(flags); */
@@ -286,7 +285,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
286 /* Enable IRQs */ 285 /* Enable IRQs */
287 /* local_irq_restore(flags); */ 286 /* local_irq_restore(flags); */
288 287
289 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 288 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
290 289
291 return 0; 290 return 0;
292} 291}
@@ -360,12 +359,10 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
360 min_fsb = NFORCE2_MIN_FSB; 359 min_fsb = NFORCE2_MIN_FSB;
361 360
362 /* cpuinfo and default policy values */ 361 /* cpuinfo and default policy values */
363 policy->cpuinfo.min_freq = min_fsb * fid * 100; 362 policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100;
364 policy->cpuinfo.max_freq = max_fsb * fid * 100; 363 policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100;
365 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 364 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
366 policy->cur = nforce2_get(policy->cpu); 365 policy->cur = nforce2_get(policy->cpu);
367 policy->min = policy->cpuinfo.min_freq;
368 policy->max = policy->cpuinfo.max_freq;
369 366
370 return 0; 367 return 0;
371} 368}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b02824d092e7..1b8a48eaf90f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -45,7 +45,7 @@ static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
45/* This one keeps track of the previously set governor of a removed CPU */ 45/* This one keeps track of the previously set governor of a removed CPU */
46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
47#endif 47#endif
48static DEFINE_SPINLOCK(cpufreq_driver_lock); 48static DEFINE_RWLOCK(cpufreq_driver_lock);
49 49
50/* 50/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure 51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
@@ -128,6 +128,11 @@ void disable_cpufreq(void)
128static LIST_HEAD(cpufreq_governor_list); 128static LIST_HEAD(cpufreq_governor_list);
129static DEFINE_MUTEX(cpufreq_governor_mutex); 129static DEFINE_MUTEX(cpufreq_governor_mutex);
130 130
131bool have_governor_per_policy(void)
132{
133 return cpufreq_driver->have_governor_per_policy;
134}
135
131static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs) 136static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
132{ 137{
133 struct cpufreq_policy *data; 138 struct cpufreq_policy *data;
@@ -137,7 +142,7 @@ static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
137 goto err_out; 142 goto err_out;
138 143
139 /* get the cpufreq driver */ 144 /* get the cpufreq driver */
140 spin_lock_irqsave(&cpufreq_driver_lock, flags); 145 read_lock_irqsave(&cpufreq_driver_lock, flags);
141 146
142 if (!cpufreq_driver) 147 if (!cpufreq_driver)
143 goto err_out_unlock; 148 goto err_out_unlock;
@@ -155,13 +160,13 @@ static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
155 if (!sysfs && !kobject_get(&data->kobj)) 160 if (!sysfs && !kobject_get(&data->kobj))
156 goto err_out_put_module; 161 goto err_out_put_module;
157 162
158 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 163 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
159 return data; 164 return data;
160 165
161err_out_put_module: 166err_out_put_module:
162 module_put(cpufreq_driver->owner); 167 module_put(cpufreq_driver->owner);
163err_out_unlock: 168err_out_unlock:
164 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 169 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
165err_out: 170err_out:
166 return NULL; 171 return NULL;
167} 172}
@@ -244,19 +249,9 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
244#endif 249#endif
245 250
246 251
247/** 252void __cpufreq_notify_transition(struct cpufreq_policy *policy,
248 * cpufreq_notify_transition - call notifier chain and adjust_jiffies 253 struct cpufreq_freqs *freqs, unsigned int state)
249 * on frequency transition.
250 *
251 * This function calls the transition notifiers and the "adjust_jiffies"
252 * function. It is called twice on all CPU frequency changes that have
253 * external effects.
254 */
255void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
256{ 254{
257 struct cpufreq_policy *policy;
258 unsigned long flags;
259
260 BUG_ON(irqs_disabled()); 255 BUG_ON(irqs_disabled());
261 256
262 if (cpufreq_disabled()) 257 if (cpufreq_disabled())
@@ -266,10 +261,6 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
266 pr_debug("notification %u of frequency transition to %u kHz\n", 261 pr_debug("notification %u of frequency transition to %u kHz\n",
267 state, freqs->new); 262 state, freqs->new);
268 263
269 spin_lock_irqsave(&cpufreq_driver_lock, flags);
270 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
271 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
272
273 switch (state) { 264 switch (state) {
274 265
275 case CPUFREQ_PRECHANGE: 266 case CPUFREQ_PRECHANGE:
@@ -303,6 +294,20 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
303 break; 294 break;
304 } 295 }
305} 296}
297/**
298 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
299 * on frequency transition.
300 *
301 * This function calls the transition notifiers and the "adjust_jiffies"
302 * function. It is called twice on all CPU frequency changes that have
303 * external effects.
304 */
305void cpufreq_notify_transition(struct cpufreq_policy *policy,
306 struct cpufreq_freqs *freqs, unsigned int state)
307{
308 for_each_cpu(freqs->cpu, policy->cpus)
309 __cpufreq_notify_transition(policy, freqs, state);
310}
306EXPORT_SYMBOL_GPL(cpufreq_notify_transition); 311EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
307 312
308 313
@@ -765,12 +770,12 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
765 goto err_out_kobj_put; 770 goto err_out_kobj_put;
766 } 771 }
767 772
768 spin_lock_irqsave(&cpufreq_driver_lock, flags); 773 write_lock_irqsave(&cpufreq_driver_lock, flags);
769 for_each_cpu(j, policy->cpus) { 774 for_each_cpu(j, policy->cpus) {
770 per_cpu(cpufreq_cpu_data, j) = policy; 775 per_cpu(cpufreq_cpu_data, j) = policy;
771 per_cpu(cpufreq_policy_cpu, j) = policy->cpu; 776 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
772 } 777 }
773 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 778 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
774 779
775 ret = cpufreq_add_dev_symlink(cpu, policy); 780 ret = cpufreq_add_dev_symlink(cpu, policy);
776 if (ret) 781 if (ret)
@@ -803,27 +808,30 @@ static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
803 struct device *dev) 808 struct device *dev)
804{ 809{
805 struct cpufreq_policy *policy; 810 struct cpufreq_policy *policy;
806 int ret = 0; 811 int ret = 0, has_target = !!cpufreq_driver->target;
807 unsigned long flags; 812 unsigned long flags;
808 813
809 policy = cpufreq_cpu_get(sibling); 814 policy = cpufreq_cpu_get(sibling);
810 WARN_ON(!policy); 815 WARN_ON(!policy);
811 816
812 __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 817 if (has_target)
818 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
813 819
814 lock_policy_rwsem_write(sibling); 820 lock_policy_rwsem_write(sibling);
815 821
816 spin_lock_irqsave(&cpufreq_driver_lock, flags); 822 write_lock_irqsave(&cpufreq_driver_lock, flags);
817 823
818 cpumask_set_cpu(cpu, policy->cpus); 824 cpumask_set_cpu(cpu, policy->cpus);
819 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu; 825 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
820 per_cpu(cpufreq_cpu_data, cpu) = policy; 826 per_cpu(cpufreq_cpu_data, cpu) = policy;
821 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 827 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
822 828
823 unlock_policy_rwsem_write(sibling); 829 unlock_policy_rwsem_write(sibling);
824 830
825 __cpufreq_governor(policy, CPUFREQ_GOV_START); 831 if (has_target) {
826 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 832 __cpufreq_governor(policy, CPUFREQ_GOV_START);
833 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
834 }
827 835
828 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); 836 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
829 if (ret) { 837 if (ret) {
@@ -871,15 +879,15 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
871 879
872#ifdef CONFIG_HOTPLUG_CPU 880#ifdef CONFIG_HOTPLUG_CPU
873 /* Check if this cpu was hot-unplugged earlier and has siblings */ 881 /* Check if this cpu was hot-unplugged earlier and has siblings */
874 spin_lock_irqsave(&cpufreq_driver_lock, flags); 882 read_lock_irqsave(&cpufreq_driver_lock, flags);
875 for_each_online_cpu(sibling) { 883 for_each_online_cpu(sibling) {
876 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); 884 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
877 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) { 885 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
878 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 886 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
879 return cpufreq_add_policy_cpu(cpu, sibling, dev); 887 return cpufreq_add_policy_cpu(cpu, sibling, dev);
880 } 888 }
881 } 889 }
882 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 890 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
883#endif 891#endif
884#endif 892#endif
885 893
@@ -952,10 +960,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
952 return 0; 960 return 0;
953 961
954err_out_unregister: 962err_out_unregister:
955 spin_lock_irqsave(&cpufreq_driver_lock, flags); 963 write_lock_irqsave(&cpufreq_driver_lock, flags);
956 for_each_cpu(j, policy->cpus) 964 for_each_cpu(j, policy->cpus)
957 per_cpu(cpufreq_cpu_data, j) = NULL; 965 per_cpu(cpufreq_cpu_data, j) = NULL;
958 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 966 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
959 967
960 kobject_put(&policy->kobj); 968 kobject_put(&policy->kobj);
961 wait_for_completion(&policy->kobj_unregister); 969 wait_for_completion(&policy->kobj_unregister);
@@ -1008,12 +1016,12 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1008 1016
1009 pr_debug("%s: unregistering CPU %u\n", __func__, cpu); 1017 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1010 1018
1011 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1019 write_lock_irqsave(&cpufreq_driver_lock, flags);
1012 1020
1013 data = per_cpu(cpufreq_cpu_data, cpu); 1021 data = per_cpu(cpufreq_cpu_data, cpu);
1014 per_cpu(cpufreq_cpu_data, cpu) = NULL; 1022 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1015 1023
1016 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1024 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1017 1025
1018 if (!data) { 1026 if (!data) {
1019 pr_debug("%s: No cpu_data found\n", __func__); 1027 pr_debug("%s: No cpu_data found\n", __func__);
@@ -1031,7 +1039,9 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1031 1039
1032 WARN_ON(lock_policy_rwsem_write(cpu)); 1040 WARN_ON(lock_policy_rwsem_write(cpu));
1033 cpus = cpumask_weight(data->cpus); 1041 cpus = cpumask_weight(data->cpus);
1034 cpumask_clear_cpu(cpu, data->cpus); 1042
1043 if (cpus > 1)
1044 cpumask_clear_cpu(cpu, data->cpus);
1035 unlock_policy_rwsem_write(cpu); 1045 unlock_policy_rwsem_write(cpu);
1036 1046
1037 if (cpu != data->cpu) { 1047 if (cpu != data->cpu) {
@@ -1047,9 +1057,9 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1047 WARN_ON(lock_policy_rwsem_write(cpu)); 1057 WARN_ON(lock_policy_rwsem_write(cpu));
1048 cpumask_set_cpu(cpu, data->cpus); 1058 cpumask_set_cpu(cpu, data->cpus);
1049 1059
1050 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1060 write_lock_irqsave(&cpufreq_driver_lock, flags);
1051 per_cpu(cpufreq_cpu_data, cpu) = data; 1061 per_cpu(cpufreq_cpu_data, cpu) = data;
1052 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1062 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1053 1063
1054 unlock_policy_rwsem_write(cpu); 1064 unlock_policy_rwsem_write(cpu);
1055 1065
@@ -1070,6 +1080,9 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1070 1080
1071 /* If cpu is last user of policy, free policy */ 1081 /* If cpu is last user of policy, free policy */
1072 if (cpus == 1) { 1082 if (cpus == 1) {
1083 if (cpufreq_driver->target)
1084 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1085
1073 lock_policy_rwsem_read(cpu); 1086 lock_policy_rwsem_read(cpu);
1074 kobj = &data->kobj; 1087 kobj = &data->kobj;
1075 cmp = &data->kobj_unregister; 1088 cmp = &data->kobj_unregister;
@@ -1134,16 +1147,23 @@ static void handle_update(struct work_struct *work)
1134static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, 1147static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1135 unsigned int new_freq) 1148 unsigned int new_freq)
1136{ 1149{
1150 struct cpufreq_policy *policy;
1137 struct cpufreq_freqs freqs; 1151 struct cpufreq_freqs freqs;
1152 unsigned long flags;
1153
1138 1154
1139 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " 1155 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1140 "core thinks of %u, is %u kHz.\n", old_freq, new_freq); 1156 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1141 1157
1142 freqs.cpu = cpu;
1143 freqs.old = old_freq; 1158 freqs.old = old_freq;
1144 freqs.new = new_freq; 1159 freqs.new = new_freq;
1145 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 1160
1146 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 1161 read_lock_irqsave(&cpufreq_driver_lock, flags);
1162 policy = per_cpu(cpufreq_cpu_data, cpu);
1163 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1164
1165 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1166 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1147} 1167}
1148 1168
1149 1169
@@ -1544,10 +1564,12 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
1544 policy->cpu, event); 1564 policy->cpu, event);
1545 ret = policy->governor->governor(policy, event); 1565 ret = policy->governor->governor(policy, event);
1546 1566
1547 if (event == CPUFREQ_GOV_START) 1567 if (!ret) {
1548 policy->governor->initialized++; 1568 if (event == CPUFREQ_GOV_POLICY_INIT)
1549 else if (event == CPUFREQ_GOV_STOP) 1569 policy->governor->initialized++;
1550 policy->governor->initialized--; 1570 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1571 policy->governor->initialized--;
1572 }
1551 1573
1552 /* we keep one module reference alive for 1574 /* we keep one module reference alive for
1553 each CPU governed by this CPU */ 1575 each CPU governed by this CPU */
@@ -1651,7 +1673,7 @@ EXPORT_SYMBOL(cpufreq_get_policy);
1651static int __cpufreq_set_policy(struct cpufreq_policy *data, 1673static int __cpufreq_set_policy(struct cpufreq_policy *data,
1652 struct cpufreq_policy *policy) 1674 struct cpufreq_policy *policy)
1653{ 1675{
1654 int ret = 0; 1676 int ret = 0, failed = 1;
1655 1677
1656 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, 1678 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1657 policy->min, policy->max); 1679 policy->min, policy->max);
@@ -1705,18 +1727,31 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
1705 pr_debug("governor switch\n"); 1727 pr_debug("governor switch\n");
1706 1728
1707 /* end old governor */ 1729 /* end old governor */
1708 if (data->governor) 1730 if (data->governor) {
1709 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1731 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1732 __cpufreq_governor(data,
1733 CPUFREQ_GOV_POLICY_EXIT);
1734 }
1710 1735
1711 /* start new governor */ 1736 /* start new governor */
1712 data->governor = policy->governor; 1737 data->governor = policy->governor;
1713 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { 1738 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1739 if (!__cpufreq_governor(data, CPUFREQ_GOV_START))
1740 failed = 0;
1741 else
1742 __cpufreq_governor(data,
1743 CPUFREQ_GOV_POLICY_EXIT);
1744 }
1745
1746 if (failed) {
1714 /* new governor failed, so re-start old one */ 1747 /* new governor failed, so re-start old one */
1715 pr_debug("starting governor %s failed\n", 1748 pr_debug("starting governor %s failed\n",
1716 data->governor->name); 1749 data->governor->name);
1717 if (old_gov) { 1750 if (old_gov) {
1718 data->governor = old_gov; 1751 data->governor = old_gov;
1719 __cpufreq_governor(data, 1752 __cpufreq_governor(data,
1753 CPUFREQ_GOV_POLICY_INIT);
1754 __cpufreq_governor(data,
1720 CPUFREQ_GOV_START); 1755 CPUFREQ_GOV_START);
1721 } 1756 }
1722 ret = -EINVAL; 1757 ret = -EINVAL;
@@ -1848,13 +1883,13 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1848 if (driver_data->setpolicy) 1883 if (driver_data->setpolicy)
1849 driver_data->flags |= CPUFREQ_CONST_LOOPS; 1884 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1850 1885
1851 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1886 write_lock_irqsave(&cpufreq_driver_lock, flags);
1852 if (cpufreq_driver) { 1887 if (cpufreq_driver) {
1853 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1888 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1854 return -EBUSY; 1889 return -EBUSY;
1855 } 1890 }
1856 cpufreq_driver = driver_data; 1891 cpufreq_driver = driver_data;
1857 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1892 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1858 1893
1859 ret = subsys_interface_register(&cpufreq_interface); 1894 ret = subsys_interface_register(&cpufreq_interface);
1860 if (ret) 1895 if (ret)
@@ -1886,9 +1921,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1886err_if_unreg: 1921err_if_unreg:
1887 subsys_interface_unregister(&cpufreq_interface); 1922 subsys_interface_unregister(&cpufreq_interface);
1888err_null_driver: 1923err_null_driver:
1889 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1924 write_lock_irqsave(&cpufreq_driver_lock, flags);
1890 cpufreq_driver = NULL; 1925 cpufreq_driver = NULL;
1891 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1926 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1892 return ret; 1927 return ret;
1893} 1928}
1894EXPORT_SYMBOL_GPL(cpufreq_register_driver); 1929EXPORT_SYMBOL_GPL(cpufreq_register_driver);
@@ -1914,9 +1949,9 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1914 subsys_interface_unregister(&cpufreq_interface); 1949 subsys_interface_unregister(&cpufreq_interface);
1915 unregister_hotcpu_notifier(&cpufreq_cpu_notifier); 1950 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1916 1951
1917 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1952 write_lock_irqsave(&cpufreq_driver_lock, flags);
1918 cpufreq_driver = NULL; 1953 cpufreq_driver = NULL;
1919 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1954 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1920 1955
1921 return 0; 1956 return 0;
1922} 1957}
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 4fd0006b1291..0ceb2eff5a7e 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -20,6 +20,7 @@
20#include <linux/mutex.h> 20#include <linux/mutex.h>
21#include <linux/notifier.h> 21#include <linux/notifier.h>
22#include <linux/percpu-defs.h> 22#include <linux/percpu-defs.h>
23#include <linux/slab.h>
23#include <linux/sysfs.h> 24#include <linux/sysfs.h>
24#include <linux/types.h> 25#include <linux/types.h>
25 26
@@ -28,25 +29,29 @@
28/* Conservative governor macros */ 29/* Conservative governor macros */
29#define DEF_FREQUENCY_UP_THRESHOLD (80) 30#define DEF_FREQUENCY_UP_THRESHOLD (80)
30#define DEF_FREQUENCY_DOWN_THRESHOLD (20) 31#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
32#define DEF_FREQUENCY_STEP (5)
31#define DEF_SAMPLING_DOWN_FACTOR (1) 33#define DEF_SAMPLING_DOWN_FACTOR (1)
32#define MAX_SAMPLING_DOWN_FACTOR (10) 34#define MAX_SAMPLING_DOWN_FACTOR (10)
33 35
34static struct dbs_data cs_dbs_data;
35static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info); 36static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
36 37
37static struct cs_dbs_tuners cs_tuners = { 38static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
38 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 39 struct cpufreq_policy *policy)
39 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, 40{
40 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 41 unsigned int freq_target = (cs_tuners->freq_step * policy->max) / 100;
41 .ignore_nice = 0, 42
42 .freq_step = 5, 43 /* max freq cannot be less than 100. But who knows... */
43}; 44 if (unlikely(freq_target == 0))
45 freq_target = DEF_FREQUENCY_STEP;
46
47 return freq_target;
48}
44 49
45/* 50/*
46 * Every sampling_rate, we check, if current idle time is less than 20% 51 * Every sampling_rate, we check, if current idle time is less than 20%
47 * (default), then we try to increase frequency Every sampling_rate * 52 * (default), then we try to increase frequency. Every sampling_rate *
48 * sampling_down_factor, we check, if current idle time is more than 80%, then 53 * sampling_down_factor, we check, if current idle time is more than 80%
49 * we try to decrease frequency 54 * (default), then we try to decrease frequency
50 * 55 *
51 * Any frequency increase takes it to the maximum frequency. Frequency reduction 56 * Any frequency increase takes it to the maximum frequency. Frequency reduction
52 * happens at minimum steps of 5% (default) of maximum frequency 57 * happens at minimum steps of 5% (default) of maximum frequency
@@ -55,30 +60,25 @@ static void cs_check_cpu(int cpu, unsigned int load)
55{ 60{
56 struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); 61 struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
57 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; 62 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
58 unsigned int freq_target; 63 struct dbs_data *dbs_data = policy->governor_data;
64 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
59 65
60 /* 66 /*
61 * break out if we 'cannot' reduce the speed as the user might 67 * break out if we 'cannot' reduce the speed as the user might
62 * want freq_step to be zero 68 * want freq_step to be zero
63 */ 69 */
64 if (cs_tuners.freq_step == 0) 70 if (cs_tuners->freq_step == 0)
65 return; 71 return;
66 72
67 /* Check for frequency increase */ 73 /* Check for frequency increase */
68 if (load > cs_tuners.up_threshold) { 74 if (load > cs_tuners->up_threshold) {
69 dbs_info->down_skip = 0; 75 dbs_info->down_skip = 0;
70 76
71 /* if we are already at full speed then break out early */ 77 /* if we are already at full speed then break out early */
72 if (dbs_info->requested_freq == policy->max) 78 if (dbs_info->requested_freq == policy->max)
73 return; 79 return;
74 80
75 freq_target = (cs_tuners.freq_step * policy->max) / 100; 81 dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
76
77 /* max freq cannot be less than 100. But who knows.... */
78 if (unlikely(freq_target == 0))
79 freq_target = 5;
80
81 dbs_info->requested_freq += freq_target;
82 if (dbs_info->requested_freq > policy->max) 82 if (dbs_info->requested_freq > policy->max)
83 dbs_info->requested_freq = policy->max; 83 dbs_info->requested_freq = policy->max;
84 84
@@ -87,45 +87,48 @@ static void cs_check_cpu(int cpu, unsigned int load)
87 return; 87 return;
88 } 88 }
89 89
90 /* 90 /* if sampling_down_factor is active break out early */
91 * The optimal frequency is the frequency that is the lowest that can 91 if (++dbs_info->down_skip < cs_tuners->sampling_down_factor)
92 * support the current CPU usage without triggering the up policy. To be 92 return;
93 * safe, we focus 10 points under the threshold. 93 dbs_info->down_skip = 0;
94 */
95 if (load < (cs_tuners.down_threshold - 10)) {
96 freq_target = (cs_tuners.freq_step * policy->max) / 100;
97
98 dbs_info->requested_freq -= freq_target;
99 if (dbs_info->requested_freq < policy->min)
100 dbs_info->requested_freq = policy->min;
101 94
95 /* Check for frequency decrease */
96 if (load < cs_tuners->down_threshold) {
102 /* 97 /*
103 * if we cannot reduce the frequency anymore, break out early 98 * if we cannot reduce the frequency anymore, break out early
104 */ 99 */
105 if (policy->cur == policy->min) 100 if (policy->cur == policy->min)
106 return; 101 return;
107 102
103 dbs_info->requested_freq -= get_freq_target(cs_tuners, policy);
104 if (dbs_info->requested_freq < policy->min)
105 dbs_info->requested_freq = policy->min;
106
108 __cpufreq_driver_target(policy, dbs_info->requested_freq, 107 __cpufreq_driver_target(policy, dbs_info->requested_freq,
109 CPUFREQ_RELATION_H); 108 CPUFREQ_RELATION_L);
110 return; 109 return;
111 } 110 }
112} 111}
113 112
114static void cs_dbs_timer(struct work_struct *work) 113static void cs_dbs_timer(struct work_struct *work)
115{ 114{
116 struct delayed_work *dw = to_delayed_work(work);
117 struct cs_cpu_dbs_info_s *dbs_info = container_of(work, 115 struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
118 struct cs_cpu_dbs_info_s, cdbs.work.work); 116 struct cs_cpu_dbs_info_s, cdbs.work.work);
119 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; 117 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
120 struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info, 118 struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
121 cpu); 119 cpu);
122 int delay = delay_for_sampling_rate(cs_tuners.sampling_rate); 120 struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
121 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
122 int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
123 bool modify_all = true;
123 124
124 mutex_lock(&core_dbs_info->cdbs.timer_mutex); 125 mutex_lock(&core_dbs_info->cdbs.timer_mutex);
125 if (need_load_eval(&core_dbs_info->cdbs, cs_tuners.sampling_rate)) 126 if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate))
126 dbs_check_cpu(&cs_dbs_data, cpu); 127 modify_all = false;
128 else
129 dbs_check_cpu(dbs_data, cpu);
127 130
128 schedule_delayed_work_on(smp_processor_id(), dw, delay); 131 gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
129 mutex_unlock(&core_dbs_info->cdbs.timer_mutex); 132 mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
130} 133}
131 134
@@ -154,16 +157,12 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
154} 157}
155 158
156/************************** sysfs interface ************************/ 159/************************** sysfs interface ************************/
157static ssize_t show_sampling_rate_min(struct kobject *kobj, 160static struct common_dbs_data cs_dbs_cdata;
158 struct attribute *attr, char *buf)
159{
160 return sprintf(buf, "%u\n", cs_dbs_data.min_sampling_rate);
161}
162 161
163static ssize_t store_sampling_down_factor(struct kobject *a, 162static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
164 struct attribute *b, 163 const char *buf, size_t count)
165 const char *buf, size_t count)
166{ 164{
165 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
167 unsigned int input; 166 unsigned int input;
168 int ret; 167 int ret;
169 ret = sscanf(buf, "%u", &input); 168 ret = sscanf(buf, "%u", &input);
@@ -171,13 +170,14 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
171 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 170 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
172 return -EINVAL; 171 return -EINVAL;
173 172
174 cs_tuners.sampling_down_factor = input; 173 cs_tuners->sampling_down_factor = input;
175 return count; 174 return count;
176} 175}
177 176
178static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, 177static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
179 const char *buf, size_t count) 178 size_t count)
180{ 179{
180 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
181 unsigned int input; 181 unsigned int input;
182 int ret; 182 int ret;
183 ret = sscanf(buf, "%u", &input); 183 ret = sscanf(buf, "%u", &input);
@@ -185,43 +185,46 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
185 if (ret != 1) 185 if (ret != 1)
186 return -EINVAL; 186 return -EINVAL;
187 187
188 cs_tuners.sampling_rate = max(input, cs_dbs_data.min_sampling_rate); 188 cs_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate);
189 return count; 189 return count;
190} 190}
191 191
192static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, 192static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
193 const char *buf, size_t count) 193 size_t count)
194{ 194{
195 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
195 unsigned int input; 196 unsigned int input;
196 int ret; 197 int ret;
197 ret = sscanf(buf, "%u", &input); 198 ret = sscanf(buf, "%u", &input);
198 199
199 if (ret != 1 || input > 100 || input <= cs_tuners.down_threshold) 200 if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
200 return -EINVAL; 201 return -EINVAL;
201 202
202 cs_tuners.up_threshold = input; 203 cs_tuners->up_threshold = input;
203 return count; 204 return count;
204} 205}
205 206
206static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, 207static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
207 const char *buf, size_t count) 208 size_t count)
208{ 209{
210 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
209 unsigned int input; 211 unsigned int input;
210 int ret; 212 int ret;
211 ret = sscanf(buf, "%u", &input); 213 ret = sscanf(buf, "%u", &input);
212 214
213 /* cannot be lower than 11 otherwise freq will not fall */ 215 /* cannot be lower than 11 otherwise freq will not fall */
214 if (ret != 1 || input < 11 || input > 100 || 216 if (ret != 1 || input < 11 || input > 100 ||
215 input >= cs_tuners.up_threshold) 217 input >= cs_tuners->up_threshold)
216 return -EINVAL; 218 return -EINVAL;
217 219
218 cs_tuners.down_threshold = input; 220 cs_tuners->down_threshold = input;
219 return count; 221 return count;
220} 222}
221 223
222static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, 224static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
223 const char *buf, size_t count) 225 size_t count)
224{ 226{
227 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
225 unsigned int input, j; 228 unsigned int input, j;
226 int ret; 229 int ret;
227 230
@@ -232,27 +235,28 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
232 if (input > 1) 235 if (input > 1)
233 input = 1; 236 input = 1;
234 237
235 if (input == cs_tuners.ignore_nice) /* nothing to do */ 238 if (input == cs_tuners->ignore_nice) /* nothing to do */
236 return count; 239 return count;
237 240
238 cs_tuners.ignore_nice = input; 241 cs_tuners->ignore_nice = input;
239 242
240 /* we need to re-evaluate prev_cpu_idle */ 243 /* we need to re-evaluate prev_cpu_idle */
241 for_each_online_cpu(j) { 244 for_each_online_cpu(j) {
242 struct cs_cpu_dbs_info_s *dbs_info; 245 struct cs_cpu_dbs_info_s *dbs_info;
243 dbs_info = &per_cpu(cs_cpu_dbs_info, j); 246 dbs_info = &per_cpu(cs_cpu_dbs_info, j);
244 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, 247 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
245 &dbs_info->cdbs.prev_cpu_wall); 248 &dbs_info->cdbs.prev_cpu_wall, 0);
246 if (cs_tuners.ignore_nice) 249 if (cs_tuners->ignore_nice)
247 dbs_info->cdbs.prev_cpu_nice = 250 dbs_info->cdbs.prev_cpu_nice =
248 kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 251 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
249 } 252 }
250 return count; 253 return count;
251} 254}
252 255
253static ssize_t store_freq_step(struct kobject *a, struct attribute *b, 256static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
254 const char *buf, size_t count) 257 size_t count)
255{ 258{
259 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
256 unsigned int input; 260 unsigned int input;
257 int ret; 261 int ret;
258 ret = sscanf(buf, "%u", &input); 262 ret = sscanf(buf, "%u", &input);
@@ -267,43 +271,88 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
267 * no need to test here if freq_step is zero as the user might actually 271 * no need to test here if freq_step is zero as the user might actually
268 * want this, they would be crazy though :) 272 * want this, they would be crazy though :)
269 */ 273 */
270 cs_tuners.freq_step = input; 274 cs_tuners->freq_step = input;
271 return count; 275 return count;
272} 276}
273 277
274show_one(cs, sampling_rate, sampling_rate); 278show_store_one(cs, sampling_rate);
275show_one(cs, sampling_down_factor, sampling_down_factor); 279show_store_one(cs, sampling_down_factor);
276show_one(cs, up_threshold, up_threshold); 280show_store_one(cs, up_threshold);
277show_one(cs, down_threshold, down_threshold); 281show_store_one(cs, down_threshold);
278show_one(cs, ignore_nice_load, ignore_nice); 282show_store_one(cs, ignore_nice);
279show_one(cs, freq_step, freq_step); 283show_store_one(cs, freq_step);
280 284declare_show_sampling_rate_min(cs);
281define_one_global_rw(sampling_rate); 285
282define_one_global_rw(sampling_down_factor); 286gov_sys_pol_attr_rw(sampling_rate);
283define_one_global_rw(up_threshold); 287gov_sys_pol_attr_rw(sampling_down_factor);
284define_one_global_rw(down_threshold); 288gov_sys_pol_attr_rw(up_threshold);
285define_one_global_rw(ignore_nice_load); 289gov_sys_pol_attr_rw(down_threshold);
286define_one_global_rw(freq_step); 290gov_sys_pol_attr_rw(ignore_nice);
287define_one_global_ro(sampling_rate_min); 291gov_sys_pol_attr_rw(freq_step);
288 292gov_sys_pol_attr_ro(sampling_rate_min);
289static struct attribute *dbs_attributes[] = { 293
290 &sampling_rate_min.attr, 294static struct attribute *dbs_attributes_gov_sys[] = {
291 &sampling_rate.attr, 295 &sampling_rate_min_gov_sys.attr,
292 &sampling_down_factor.attr, 296 &sampling_rate_gov_sys.attr,
293 &up_threshold.attr, 297 &sampling_down_factor_gov_sys.attr,
294 &down_threshold.attr, 298 &up_threshold_gov_sys.attr,
295 &ignore_nice_load.attr, 299 &down_threshold_gov_sys.attr,
296 &freq_step.attr, 300 &ignore_nice_gov_sys.attr,
301 &freq_step_gov_sys.attr,
297 NULL 302 NULL
298}; 303};
299 304
300static struct attribute_group cs_attr_group = { 305static struct attribute_group cs_attr_group_gov_sys = {
301 .attrs = dbs_attributes, 306 .attrs = dbs_attributes_gov_sys,
307 .name = "conservative",
308};
309
310static struct attribute *dbs_attributes_gov_pol[] = {
311 &sampling_rate_min_gov_pol.attr,
312 &sampling_rate_gov_pol.attr,
313 &sampling_down_factor_gov_pol.attr,
314 &up_threshold_gov_pol.attr,
315 &down_threshold_gov_pol.attr,
316 &ignore_nice_gov_pol.attr,
317 &freq_step_gov_pol.attr,
318 NULL
319};
320
321static struct attribute_group cs_attr_group_gov_pol = {
322 .attrs = dbs_attributes_gov_pol,
302 .name = "conservative", 323 .name = "conservative",
303}; 324};
304 325
305/************************** sysfs end ************************/ 326/************************** sysfs end ************************/
306 327
328static int cs_init(struct dbs_data *dbs_data)
329{
330 struct cs_dbs_tuners *tuners;
331
332 tuners = kzalloc(sizeof(struct cs_dbs_tuners), GFP_KERNEL);
333 if (!tuners) {
334 pr_err("%s: kzalloc failed\n", __func__);
335 return -ENOMEM;
336 }
337
338 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
339 tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
340 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
341 tuners->ignore_nice = 0;
342 tuners->freq_step = DEF_FREQUENCY_STEP;
343
344 dbs_data->tuners = tuners;
345 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
346 jiffies_to_usecs(10);
347 mutex_init(&dbs_data->mutex);
348 return 0;
349}
350
351static void cs_exit(struct dbs_data *dbs_data)
352{
353 kfree(dbs_data->tuners);
354}
355
307define_get_cpu_dbs_routines(cs_cpu_dbs_info); 356define_get_cpu_dbs_routines(cs_cpu_dbs_info);
308 357
309static struct notifier_block cs_cpufreq_notifier_block = { 358static struct notifier_block cs_cpufreq_notifier_block = {
@@ -314,21 +363,23 @@ static struct cs_ops cs_ops = {
314 .notifier_block = &cs_cpufreq_notifier_block, 363 .notifier_block = &cs_cpufreq_notifier_block,
315}; 364};
316 365
317static struct dbs_data cs_dbs_data = { 366static struct common_dbs_data cs_dbs_cdata = {
318 .governor = GOV_CONSERVATIVE, 367 .governor = GOV_CONSERVATIVE,
319 .attr_group = &cs_attr_group, 368 .attr_group_gov_sys = &cs_attr_group_gov_sys,
320 .tuners = &cs_tuners, 369 .attr_group_gov_pol = &cs_attr_group_gov_pol,
321 .get_cpu_cdbs = get_cpu_cdbs, 370 .get_cpu_cdbs = get_cpu_cdbs,
322 .get_cpu_dbs_info_s = get_cpu_dbs_info_s, 371 .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
323 .gov_dbs_timer = cs_dbs_timer, 372 .gov_dbs_timer = cs_dbs_timer,
324 .gov_check_cpu = cs_check_cpu, 373 .gov_check_cpu = cs_check_cpu,
325 .gov_ops = &cs_ops, 374 .gov_ops = &cs_ops,
375 .init = cs_init,
376 .exit = cs_exit,
326}; 377};
327 378
328static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy, 379static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
329 unsigned int event) 380 unsigned int event)
330{ 381{
331 return cpufreq_governor_dbs(&cs_dbs_data, policy, event); 382 return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event);
332} 383}
333 384
334#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE 385#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
@@ -343,7 +394,6 @@ struct cpufreq_governor cpufreq_gov_conservative = {
343 394
344static int __init cpufreq_gov_dbs_init(void) 395static int __init cpufreq_gov_dbs_init(void)
345{ 396{
346 mutex_init(&cs_dbs_data.mutex);
347 return cpufreq_register_governor(&cpufreq_gov_conservative); 397 return cpufreq_register_governor(&cpufreq_gov_conservative);
348} 398}
349 399
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 5a76086ff09b..443442df113b 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -22,12 +22,29 @@
22#include <linux/export.h> 22#include <linux/export.h>
23#include <linux/kernel_stat.h> 23#include <linux/kernel_stat.h>
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/slab.h>
25#include <linux/tick.h> 26#include <linux/tick.h>
26#include <linux/types.h> 27#include <linux/types.h>
27#include <linux/workqueue.h> 28#include <linux/workqueue.h>
28 29
29#include "cpufreq_governor.h" 30#include "cpufreq_governor.h"
30 31
32static struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
33{
34 if (have_governor_per_policy())
35 return &policy->kobj;
36 else
37 return cpufreq_global_kobject;
38}
39
40static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
41{
42 if (have_governor_per_policy())
43 return dbs_data->cdata->attr_group_gov_pol;
44 else
45 return dbs_data->cdata->attr_group_gov_sys;
46}
47
31static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) 48static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
32{ 49{
33 u64 idle_time; 50 u64 idle_time;
@@ -50,13 +67,13 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
50 return cputime_to_usecs(idle_time); 67 return cputime_to_usecs(idle_time);
51} 68}
52 69
53u64 get_cpu_idle_time(unsigned int cpu, u64 *wall) 70u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
54{ 71{
55 u64 idle_time = get_cpu_idle_time_us(cpu, NULL); 72 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
56 73
57 if (idle_time == -1ULL) 74 if (idle_time == -1ULL)
58 return get_cpu_idle_time_jiffy(cpu, wall); 75 return get_cpu_idle_time_jiffy(cpu, wall);
59 else 76 else if (!io_busy)
60 idle_time += get_cpu_iowait_time_us(cpu, wall); 77 idle_time += get_cpu_iowait_time_us(cpu, wall);
61 78
62 return idle_time; 79 return idle_time;
@@ -65,7 +82,7 @@ EXPORT_SYMBOL_GPL(get_cpu_idle_time);
65 82
66void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) 83void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
67{ 84{
68 struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu); 85 struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
69 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 86 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
70 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 87 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
71 struct cpufreq_policy *policy; 88 struct cpufreq_policy *policy;
@@ -73,7 +90,7 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
73 unsigned int ignore_nice; 90 unsigned int ignore_nice;
74 unsigned int j; 91 unsigned int j;
75 92
76 if (dbs_data->governor == GOV_ONDEMAND) 93 if (dbs_data->cdata->governor == GOV_ONDEMAND)
77 ignore_nice = od_tuners->ignore_nice; 94 ignore_nice = od_tuners->ignore_nice;
78 else 95 else
79 ignore_nice = cs_tuners->ignore_nice; 96 ignore_nice = cs_tuners->ignore_nice;
@@ -83,13 +100,22 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
83 /* Get Absolute Load (in terms of freq for ondemand gov) */ 100 /* Get Absolute Load (in terms of freq for ondemand gov) */
84 for_each_cpu(j, policy->cpus) { 101 for_each_cpu(j, policy->cpus) {
85 struct cpu_dbs_common_info *j_cdbs; 102 struct cpu_dbs_common_info *j_cdbs;
86 u64 cur_wall_time, cur_idle_time, cur_iowait_time; 103 u64 cur_wall_time, cur_idle_time;
87 unsigned int idle_time, wall_time, iowait_time; 104 unsigned int idle_time, wall_time;
88 unsigned int load; 105 unsigned int load;
106 int io_busy = 0;
89 107
90 j_cdbs = dbs_data->get_cpu_cdbs(j); 108 j_cdbs = dbs_data->cdata->get_cpu_cdbs(j);
91 109
92 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 110 /*
111 * For the purpose of ondemand, waiting for disk IO is
112 * an indication that you're performance critical, and
113 * not that the system is actually idle. So do not add
114 * the iowait time to the cpu idle time.
115 */
116 if (dbs_data->cdata->governor == GOV_ONDEMAND)
117 io_busy = od_tuners->io_is_busy;
118 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
93 119
94 wall_time = (unsigned int) 120 wall_time = (unsigned int)
95 (cur_wall_time - j_cdbs->prev_cpu_wall); 121 (cur_wall_time - j_cdbs->prev_cpu_wall);
@@ -117,35 +143,12 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
117 idle_time += jiffies_to_usecs(cur_nice_jiffies); 143 idle_time += jiffies_to_usecs(cur_nice_jiffies);
118 } 144 }
119 145
120 if (dbs_data->governor == GOV_ONDEMAND) {
121 struct od_cpu_dbs_info_s *od_j_dbs_info =
122 dbs_data->get_cpu_dbs_info_s(cpu);
123
124 cur_iowait_time = get_cpu_iowait_time_us(j,
125 &cur_wall_time);
126 if (cur_iowait_time == -1ULL)
127 cur_iowait_time = 0;
128
129 iowait_time = (unsigned int) (cur_iowait_time -
130 od_j_dbs_info->prev_cpu_iowait);
131 od_j_dbs_info->prev_cpu_iowait = cur_iowait_time;
132
133 /*
134 * For the purpose of ondemand, waiting for disk IO is
135 * an indication that you're performance critical, and
136 * not that the system is actually idle. So subtract the
137 * iowait time from the cpu idle time.
138 */
139 if (od_tuners->io_is_busy && idle_time >= iowait_time)
140 idle_time -= iowait_time;
141 }
142
143 if (unlikely(!wall_time || wall_time < idle_time)) 146 if (unlikely(!wall_time || wall_time < idle_time))
144 continue; 147 continue;
145 148
146 load = 100 * (wall_time - idle_time) / wall_time; 149 load = 100 * (wall_time - idle_time) / wall_time;
147 150
148 if (dbs_data->governor == GOV_ONDEMAND) { 151 if (dbs_data->cdata->governor == GOV_ONDEMAND) {
149 int freq_avg = __cpufreq_driver_getavg(policy, j); 152 int freq_avg = __cpufreq_driver_getavg(policy, j);
150 if (freq_avg <= 0) 153 if (freq_avg <= 0)
151 freq_avg = policy->cur; 154 freq_avg = policy->cur;
@@ -157,24 +160,42 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
157 max_load = load; 160 max_load = load;
158 } 161 }
159 162
160 dbs_data->gov_check_cpu(cpu, max_load); 163 dbs_data->cdata->gov_check_cpu(cpu, max_load);
161} 164}
162EXPORT_SYMBOL_GPL(dbs_check_cpu); 165EXPORT_SYMBOL_GPL(dbs_check_cpu);
163 166
164static inline void dbs_timer_init(struct dbs_data *dbs_data, int cpu, 167static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
165 unsigned int sampling_rate) 168 unsigned int delay)
166{ 169{
167 int delay = delay_for_sampling_rate(sampling_rate); 170 struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
168 struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
169 171
170 schedule_delayed_work_on(cpu, &cdbs->work, delay); 172 mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
171} 173}
172 174
173static inline void dbs_timer_exit(struct dbs_data *dbs_data, int cpu) 175void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
176 unsigned int delay, bool all_cpus)
174{ 177{
175 struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu); 178 int i;
176 179
177 cancel_delayed_work_sync(&cdbs->work); 180 if (!all_cpus) {
181 __gov_queue_work(smp_processor_id(), dbs_data, delay);
182 } else {
183 for_each_cpu(i, policy->cpus)
184 __gov_queue_work(i, dbs_data, delay);
185 }
186}
187EXPORT_SYMBOL_GPL(gov_queue_work);
188
189static inline void gov_cancel_work(struct dbs_data *dbs_data,
190 struct cpufreq_policy *policy)
191{
192 struct cpu_dbs_common_info *cdbs;
193 int i;
194
195 for_each_cpu(i, policy->cpus) {
196 cdbs = dbs_data->cdata->get_cpu_cdbs(i);
197 cancel_delayed_work_sync(&cdbs->work);
198 }
178} 199}
179 200
180/* Will return if we need to evaluate cpu load again or not */ 201/* Will return if we need to evaluate cpu load again or not */
@@ -196,31 +217,130 @@ bool need_load_eval(struct cpu_dbs_common_info *cdbs,
196} 217}
197EXPORT_SYMBOL_GPL(need_load_eval); 218EXPORT_SYMBOL_GPL(need_load_eval);
198 219
199int cpufreq_governor_dbs(struct dbs_data *dbs_data, 220static void set_sampling_rate(struct dbs_data *dbs_data,
200 struct cpufreq_policy *policy, unsigned int event) 221 unsigned int sampling_rate)
201{ 222{
223 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
224 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
225 cs_tuners->sampling_rate = sampling_rate;
226 } else {
227 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
228 od_tuners->sampling_rate = sampling_rate;
229 }
230}
231
232int cpufreq_governor_dbs(struct cpufreq_policy *policy,
233 struct common_dbs_data *cdata, unsigned int event)
234{
235 struct dbs_data *dbs_data;
202 struct od_cpu_dbs_info_s *od_dbs_info = NULL; 236 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
203 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL; 237 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
204 struct cs_ops *cs_ops = NULL;
205 struct od_ops *od_ops = NULL; 238 struct od_ops *od_ops = NULL;
206 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 239 struct od_dbs_tuners *od_tuners = NULL;
207 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 240 struct cs_dbs_tuners *cs_tuners = NULL;
208 struct cpu_dbs_common_info *cpu_cdbs; 241 struct cpu_dbs_common_info *cpu_cdbs;
209 unsigned int *sampling_rate, latency, ignore_nice, j, cpu = policy->cpu; 242 unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
243 int io_busy = 0;
210 int rc; 244 int rc;
211 245
212 cpu_cdbs = dbs_data->get_cpu_cdbs(cpu); 246 if (have_governor_per_policy())
247 dbs_data = policy->governor_data;
248 else
249 dbs_data = cdata->gdbs_data;
250
251 WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT));
252
253 switch (event) {
254 case CPUFREQ_GOV_POLICY_INIT:
255 if (have_governor_per_policy()) {
256 WARN_ON(dbs_data);
257 } else if (dbs_data) {
258 policy->governor_data = dbs_data;
259 return 0;
260 }
261
262 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
263 if (!dbs_data) {
264 pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
265 return -ENOMEM;
266 }
267
268 dbs_data->cdata = cdata;
269 rc = cdata->init(dbs_data);
270 if (rc) {
271 pr_err("%s: POLICY_INIT: init() failed\n", __func__);
272 kfree(dbs_data);
273 return rc;
274 }
275
276 rc = sysfs_create_group(get_governor_parent_kobj(policy),
277 get_sysfs_attr(dbs_data));
278 if (rc) {
279 cdata->exit(dbs_data);
280 kfree(dbs_data);
281 return rc;
282 }
283
284 policy->governor_data = dbs_data;
213 285
214 if (dbs_data->governor == GOV_CONSERVATIVE) { 286 /* policy latency is in nS. Convert it to uS first */
215 cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); 287 latency = policy->cpuinfo.transition_latency / 1000;
216 sampling_rate = &cs_tuners->sampling_rate; 288 if (latency == 0)
289 latency = 1;
290
291 /* Bring kernel and HW constraints together */
292 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
293 MIN_LATENCY_MULTIPLIER * latency);
294 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
295 latency * LATENCY_MULTIPLIER));
296
297 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
298 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
299
300 cpufreq_register_notifier(cs_ops->notifier_block,
301 CPUFREQ_TRANSITION_NOTIFIER);
302 }
303
304 if (!have_governor_per_policy())
305 cdata->gdbs_data = dbs_data;
306
307 return 0;
308 case CPUFREQ_GOV_POLICY_EXIT:
309 if ((policy->governor->initialized == 1) ||
310 have_governor_per_policy()) {
311 sysfs_remove_group(get_governor_parent_kobj(policy),
312 get_sysfs_attr(dbs_data));
313
314 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
315 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
316
317 cpufreq_unregister_notifier(cs_ops->notifier_block,
318 CPUFREQ_TRANSITION_NOTIFIER);
319 }
320
321 cdata->exit(dbs_data);
322 kfree(dbs_data);
323 cdata->gdbs_data = NULL;
324 }
325
326 policy->governor_data = NULL;
327 return 0;
328 }
329
330 cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
331
332 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
333 cs_tuners = dbs_data->tuners;
334 cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
335 sampling_rate = cs_tuners->sampling_rate;
217 ignore_nice = cs_tuners->ignore_nice; 336 ignore_nice = cs_tuners->ignore_nice;
218 cs_ops = dbs_data->gov_ops;
219 } else { 337 } else {
220 od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); 338 od_tuners = dbs_data->tuners;
221 sampling_rate = &od_tuners->sampling_rate; 339 od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
340 sampling_rate = od_tuners->sampling_rate;
222 ignore_nice = od_tuners->ignore_nice; 341 ignore_nice = od_tuners->ignore_nice;
223 od_ops = dbs_data->gov_ops; 342 od_ops = dbs_data->cdata->gov_ops;
343 io_busy = od_tuners->io_is_busy;
224 } 344 }
225 345
226 switch (event) { 346 switch (event) {
@@ -232,96 +352,53 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
232 352
233 for_each_cpu(j, policy->cpus) { 353 for_each_cpu(j, policy->cpus) {
234 struct cpu_dbs_common_info *j_cdbs = 354 struct cpu_dbs_common_info *j_cdbs =
235 dbs_data->get_cpu_cdbs(j); 355 dbs_data->cdata->get_cpu_cdbs(j);
236 356
237 j_cdbs->cpu = j; 357 j_cdbs->cpu = j;
238 j_cdbs->cur_policy = policy; 358 j_cdbs->cur_policy = policy;
239 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, 359 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
240 &j_cdbs->prev_cpu_wall); 360 &j_cdbs->prev_cpu_wall, io_busy);
241 if (ignore_nice) 361 if (ignore_nice)
242 j_cdbs->prev_cpu_nice = 362 j_cdbs->prev_cpu_nice =
243 kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 363 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
244 364
245 mutex_init(&j_cdbs->timer_mutex); 365 mutex_init(&j_cdbs->timer_mutex);
246 INIT_DEFERRABLE_WORK(&j_cdbs->work, 366 INIT_DEFERRABLE_WORK(&j_cdbs->work,
247 dbs_data->gov_dbs_timer); 367 dbs_data->cdata->gov_dbs_timer);
248 }
249
250 if (!policy->governor->initialized) {
251 rc = sysfs_create_group(cpufreq_global_kobject,
252 dbs_data->attr_group);
253 if (rc) {
254 mutex_unlock(&dbs_data->mutex);
255 return rc;
256 }
257 } 368 }
258 369
259 /* 370 /*
260 * conservative does not implement micro like ondemand 371 * conservative does not implement micro like ondemand
261 * governor, thus we are bound to jiffes/HZ 372 * governor, thus we are bound to jiffes/HZ
262 */ 373 */
263 if (dbs_data->governor == GOV_CONSERVATIVE) { 374 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
264 cs_dbs_info->down_skip = 0; 375 cs_dbs_info->down_skip = 0;
265 cs_dbs_info->enable = 1; 376 cs_dbs_info->enable = 1;
266 cs_dbs_info->requested_freq = policy->cur; 377 cs_dbs_info->requested_freq = policy->cur;
267
268 if (!policy->governor->initialized) {
269 cpufreq_register_notifier(cs_ops->notifier_block,
270 CPUFREQ_TRANSITION_NOTIFIER);
271
272 dbs_data->min_sampling_rate =
273 MIN_SAMPLING_RATE_RATIO *
274 jiffies_to_usecs(10);
275 }
276 } else { 378 } else {
277 od_dbs_info->rate_mult = 1; 379 od_dbs_info->rate_mult = 1;
278 od_dbs_info->sample_type = OD_NORMAL_SAMPLE; 380 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
279 od_ops->powersave_bias_init_cpu(cpu); 381 od_ops->powersave_bias_init_cpu(cpu);
280
281 if (!policy->governor->initialized)
282 od_tuners->io_is_busy = od_ops->io_busy();
283 } 382 }
284 383
285 if (policy->governor->initialized)
286 goto unlock;
287
288 /* policy latency is in nS. Convert it to uS first */
289 latency = policy->cpuinfo.transition_latency / 1000;
290 if (latency == 0)
291 latency = 1;
292
293 /* Bring kernel and HW constraints together */
294 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
295 MIN_LATENCY_MULTIPLIER * latency);
296 *sampling_rate = max(dbs_data->min_sampling_rate, latency *
297 LATENCY_MULTIPLIER);
298unlock:
299 mutex_unlock(&dbs_data->mutex); 384 mutex_unlock(&dbs_data->mutex);
300 385
301 /* Initiate timer time stamp */ 386 /* Initiate timer time stamp */
302 cpu_cdbs->time_stamp = ktime_get(); 387 cpu_cdbs->time_stamp = ktime_get();
303 388
304 for_each_cpu(j, policy->cpus) 389 gov_queue_work(dbs_data, policy,
305 dbs_timer_init(dbs_data, j, *sampling_rate); 390 delay_for_sampling_rate(sampling_rate), true);
306 break; 391 break;
307 392
308 case CPUFREQ_GOV_STOP: 393 case CPUFREQ_GOV_STOP:
309 if (dbs_data->governor == GOV_CONSERVATIVE) 394 if (dbs_data->cdata->governor == GOV_CONSERVATIVE)
310 cs_dbs_info->enable = 0; 395 cs_dbs_info->enable = 0;
311 396
312 for_each_cpu(j, policy->cpus) 397 gov_cancel_work(dbs_data, policy);
313 dbs_timer_exit(dbs_data, j);
314 398
315 mutex_lock(&dbs_data->mutex); 399 mutex_lock(&dbs_data->mutex);
316 mutex_destroy(&cpu_cdbs->timer_mutex); 400 mutex_destroy(&cpu_cdbs->timer_mutex);
317 401
318 if (policy->governor->initialized == 1) {
319 sysfs_remove_group(cpufreq_global_kobject,
320 dbs_data->attr_group);
321 if (dbs_data->governor == GOV_CONSERVATIVE)
322 cpufreq_unregister_notifier(cs_ops->notifier_block,
323 CPUFREQ_TRANSITION_NOTIFIER);
324 }
325 mutex_unlock(&dbs_data->mutex); 402 mutex_unlock(&dbs_data->mutex);
326 403
327 break; 404 break;
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index cc4bd2f6838a..8ac33538d0bd 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -34,20 +34,81 @@
34 */ 34 */
35#define MIN_SAMPLING_RATE_RATIO (2) 35#define MIN_SAMPLING_RATE_RATIO (2)
36#define LATENCY_MULTIPLIER (1000) 36#define LATENCY_MULTIPLIER (1000)
37#define MIN_LATENCY_MULTIPLIER (100) 37#define MIN_LATENCY_MULTIPLIER (20)
38#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) 38#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
39 39
40/* Ondemand Sampling types */ 40/* Ondemand Sampling types */
41enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE}; 41enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
42 42
43/* Macro creating sysfs show routines */ 43/*
44#define show_one(_gov, file_name, object) \ 44 * Macro for creating governors sysfs routines
45static ssize_t show_##file_name \ 45 *
46 * - gov_sys: One governor instance per whole system
47 * - gov_pol: One governor instance per policy
48 */
49
50/* Create attributes */
51#define gov_sys_attr_ro(_name) \
52static struct global_attr _name##_gov_sys = \
53__ATTR(_name, 0444, show_##_name##_gov_sys, NULL)
54
55#define gov_sys_attr_rw(_name) \
56static struct global_attr _name##_gov_sys = \
57__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
58
59#define gov_pol_attr_ro(_name) \
60static struct freq_attr _name##_gov_pol = \
61__ATTR(_name, 0444, show_##_name##_gov_pol, NULL)
62
63#define gov_pol_attr_rw(_name) \
64static struct freq_attr _name##_gov_pol = \
65__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
66
67#define gov_sys_pol_attr_rw(_name) \
68 gov_sys_attr_rw(_name); \
69 gov_pol_attr_rw(_name)
70
71#define gov_sys_pol_attr_ro(_name) \
72 gov_sys_attr_ro(_name); \
73 gov_pol_attr_ro(_name)
74
75/* Create show/store routines */
76#define show_one(_gov, file_name) \
77static ssize_t show_##file_name##_gov_sys \
46(struct kobject *kobj, struct attribute *attr, char *buf) \ 78(struct kobject *kobj, struct attribute *attr, char *buf) \
47{ \ 79{ \
48 return sprintf(buf, "%u\n", _gov##_tuners.object); \ 80 struct _gov##_dbs_tuners *tuners = _gov##_dbs_cdata.gdbs_data->tuners; \
81 return sprintf(buf, "%u\n", tuners->file_name); \
82} \
83 \
84static ssize_t show_##file_name##_gov_pol \
85(struct cpufreq_policy *policy, char *buf) \
86{ \
87 struct dbs_data *dbs_data = policy->governor_data; \
88 struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \
89 return sprintf(buf, "%u\n", tuners->file_name); \
90}
91
92#define store_one(_gov, file_name) \
93static ssize_t store_##file_name##_gov_sys \
94(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) \
95{ \
96 struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
97 return store_##file_name(dbs_data, buf, count); \
98} \
99 \
100static ssize_t store_##file_name##_gov_pol \
101(struct cpufreq_policy *policy, const char *buf, size_t count) \
102{ \
103 struct dbs_data *dbs_data = policy->governor_data; \
104 return store_##file_name(dbs_data, buf, count); \
49} 105}
50 106
107#define show_store_one(_gov, file_name) \
108show_one(_gov, file_name); \
109store_one(_gov, file_name)
110
111/* create helper routines */
51#define define_get_cpu_dbs_routines(_dbs_info) \ 112#define define_get_cpu_dbs_routines(_dbs_info) \
52static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \ 113static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \
53{ \ 114{ \
@@ -87,7 +148,6 @@ struct cpu_dbs_common_info {
87 148
88struct od_cpu_dbs_info_s { 149struct od_cpu_dbs_info_s {
89 struct cpu_dbs_common_info cdbs; 150 struct cpu_dbs_common_info cdbs;
90 u64 prev_cpu_iowait;
91 struct cpufreq_frequency_table *freq_table; 151 struct cpufreq_frequency_table *freq_table;
92 unsigned int freq_lo; 152 unsigned int freq_lo;
93 unsigned int freq_lo_jiffies; 153 unsigned int freq_lo_jiffies;
@@ -103,7 +163,7 @@ struct cs_cpu_dbs_info_s {
103 unsigned int enable:1; 163 unsigned int enable:1;
104}; 164};
105 165
106/* Governers sysfs tunables */ 166/* Per policy Governers sysfs tunables */
107struct od_dbs_tuners { 167struct od_dbs_tuners {
108 unsigned int ignore_nice; 168 unsigned int ignore_nice;
109 unsigned int sampling_rate; 169 unsigned int sampling_rate;
@@ -123,31 +183,42 @@ struct cs_dbs_tuners {
123 unsigned int freq_step; 183 unsigned int freq_step;
124}; 184};
125 185
126/* Per Governer data */ 186/* Common Governer data across policies */
127struct dbs_data { 187struct dbs_data;
188struct common_dbs_data {
128 /* Common across governors */ 189 /* Common across governors */
129 #define GOV_ONDEMAND 0 190 #define GOV_ONDEMAND 0
130 #define GOV_CONSERVATIVE 1 191 #define GOV_CONSERVATIVE 1
131 int governor; 192 int governor;
132 unsigned int min_sampling_rate; 193 struct attribute_group *attr_group_gov_sys; /* one governor - system */
133 struct attribute_group *attr_group; 194 struct attribute_group *attr_group_gov_pol; /* one governor - policy */
134 void *tuners;
135 195
136 /* dbs_mutex protects dbs_enable in governor start/stop */ 196 /* Common data for platforms that don't set have_governor_per_policy */
137 struct mutex mutex; 197 struct dbs_data *gdbs_data;
138 198
139 struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu); 199 struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
140 void *(*get_cpu_dbs_info_s)(int cpu); 200 void *(*get_cpu_dbs_info_s)(int cpu);
141 void (*gov_dbs_timer)(struct work_struct *work); 201 void (*gov_dbs_timer)(struct work_struct *work);
142 void (*gov_check_cpu)(int cpu, unsigned int load); 202 void (*gov_check_cpu)(int cpu, unsigned int load);
203 int (*init)(struct dbs_data *dbs_data);
204 void (*exit)(struct dbs_data *dbs_data);
143 205
144 /* Governor specific ops, see below */ 206 /* Governor specific ops, see below */
145 void *gov_ops; 207 void *gov_ops;
146}; 208};
147 209
210/* Governer Per policy data */
211struct dbs_data {
212 struct common_dbs_data *cdata;
213 unsigned int min_sampling_rate;
214 void *tuners;
215
216 /* dbs_mutex protects dbs_enable in governor start/stop */
217 struct mutex mutex;
218};
219
148/* Governor specific ops, will be passed to dbs_data->gov_ops */ 220/* Governor specific ops, will be passed to dbs_data->gov_ops */
149struct od_ops { 221struct od_ops {
150 int (*io_busy)(void);
151 void (*powersave_bias_init_cpu)(int cpu); 222 void (*powersave_bias_init_cpu)(int cpu);
152 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy, 223 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
153 unsigned int freq_next, unsigned int relation); 224 unsigned int freq_next, unsigned int relation);
@@ -169,10 +240,31 @@ static inline int delay_for_sampling_rate(unsigned int sampling_rate)
169 return delay; 240 return delay;
170} 241}
171 242
172u64 get_cpu_idle_time(unsigned int cpu, u64 *wall); 243#define declare_show_sampling_rate_min(_gov) \
244static ssize_t show_sampling_rate_min_gov_sys \
245(struct kobject *kobj, struct attribute *attr, char *buf) \
246{ \
247 struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
248 return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
249} \
250 \
251static ssize_t show_sampling_rate_min_gov_pol \
252(struct cpufreq_policy *policy, char *buf) \
253{ \
254 struct dbs_data *dbs_data = policy->governor_data; \
255 return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
256}
257
258u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
173void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); 259void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
174bool need_load_eval(struct cpu_dbs_common_info *cdbs, 260bool need_load_eval(struct cpu_dbs_common_info *cdbs,
175 unsigned int sampling_rate); 261 unsigned int sampling_rate);
176int cpufreq_governor_dbs(struct dbs_data *dbs_data, 262int cpufreq_governor_dbs(struct cpufreq_policy *policy,
177 struct cpufreq_policy *policy, unsigned int event); 263 struct common_dbs_data *cdata, unsigned int event);
264void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
265 unsigned int delay, bool all_cpus);
266void od_register_powersave_bias_handler(unsigned int (*f)
267 (struct cpufreq_policy *, unsigned int, unsigned int),
268 unsigned int powersave_bias);
269void od_unregister_powersave_bias_handler(void);
178#endif /* _CPUFREQ_GOVERNOR_H */ 270#endif /* _CPUFREQ_GOVERNOR_H */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index f3eb26cd848f..b0ffef96bf77 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -20,9 +20,11 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/percpu-defs.h> 22#include <linux/percpu-defs.h>
23#include <linux/slab.h>
23#include <linux/sysfs.h> 24#include <linux/sysfs.h>
24#include <linux/tick.h> 25#include <linux/tick.h>
25#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/cpu.h>
26 28
27#include "cpufreq_governor.h" 29#include "cpufreq_governor.h"
28 30
@@ -37,22 +39,14 @@
37#define MIN_FREQUENCY_UP_THRESHOLD (11) 39#define MIN_FREQUENCY_UP_THRESHOLD (11)
38#define MAX_FREQUENCY_UP_THRESHOLD (100) 40#define MAX_FREQUENCY_UP_THRESHOLD (100)
39 41
40static struct dbs_data od_dbs_data;
41static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info); 42static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
42 43
44static struct od_ops od_ops;
45
43#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND 46#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
44static struct cpufreq_governor cpufreq_gov_ondemand; 47static struct cpufreq_governor cpufreq_gov_ondemand;
45#endif 48#endif
46 49
47static struct od_dbs_tuners od_tuners = {
48 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
49 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
50 .adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
51 DEF_FREQUENCY_DOWN_DIFFERENTIAL,
52 .ignore_nice = 0,
53 .powersave_bias = 0,
54};
55
56static void ondemand_powersave_bias_init_cpu(int cpu) 50static void ondemand_powersave_bias_init_cpu(int cpu)
57{ 51{
58 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); 52 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
@@ -89,7 +83,7 @@ static int should_io_be_busy(void)
89 * Returns the freq_hi to be used right now and will set freq_hi_jiffies, 83 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
90 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. 84 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
91 */ 85 */
92static unsigned int powersave_bias_target(struct cpufreq_policy *policy, 86static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
93 unsigned int freq_next, unsigned int relation) 87 unsigned int freq_next, unsigned int relation)
94{ 88{
95 unsigned int freq_req, freq_reduc, freq_avg; 89 unsigned int freq_req, freq_reduc, freq_avg;
@@ -98,6 +92,8 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
98 unsigned int jiffies_total, jiffies_hi, jiffies_lo; 92 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
99 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, 93 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
100 policy->cpu); 94 policy->cpu);
95 struct dbs_data *dbs_data = policy->governor_data;
96 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
101 97
102 if (!dbs_info->freq_table) { 98 if (!dbs_info->freq_table) {
103 dbs_info->freq_lo = 0; 99 dbs_info->freq_lo = 0;
@@ -108,7 +104,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
108 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, 104 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
109 relation, &index); 105 relation, &index);
110 freq_req = dbs_info->freq_table[index].frequency; 106 freq_req = dbs_info->freq_table[index].frequency;
111 freq_reduc = freq_req * od_tuners.powersave_bias / 1000; 107 freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
112 freq_avg = freq_req - freq_reduc; 108 freq_avg = freq_req - freq_reduc;
113 109
114 /* Find freq bounds for freq_avg in freq_table */ 110 /* Find freq bounds for freq_avg in freq_table */
@@ -127,7 +123,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
127 dbs_info->freq_lo_jiffies = 0; 123 dbs_info->freq_lo_jiffies = 0;
128 return freq_lo; 124 return freq_lo;
129 } 125 }
130 jiffies_total = usecs_to_jiffies(od_tuners.sampling_rate); 126 jiffies_total = usecs_to_jiffies(od_tuners->sampling_rate);
131 jiffies_hi = (freq_avg - freq_lo) * jiffies_total; 127 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
132 jiffies_hi += ((freq_hi - freq_lo) / 2); 128 jiffies_hi += ((freq_hi - freq_lo) / 2);
133 jiffies_hi /= (freq_hi - freq_lo); 129 jiffies_hi /= (freq_hi - freq_lo);
@@ -148,12 +144,16 @@ static void ondemand_powersave_bias_init(void)
148 144
149static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) 145static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
150{ 146{
151 if (od_tuners.powersave_bias) 147 struct dbs_data *dbs_data = p->governor_data;
152 freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); 148 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
149
150 if (od_tuners->powersave_bias)
151 freq = od_ops.powersave_bias_target(p, freq,
152 CPUFREQ_RELATION_H);
153 else if (p->cur == p->max) 153 else if (p->cur == p->max)
154 return; 154 return;
155 155
156 __cpufreq_driver_target(p, freq, od_tuners.powersave_bias ? 156 __cpufreq_driver_target(p, freq, od_tuners->powersave_bias ?
157 CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); 157 CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
158} 158}
159 159
@@ -170,15 +170,17 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
170{ 170{
171 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); 171 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
172 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; 172 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
173 struct dbs_data *dbs_data = policy->governor_data;
174 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
173 175
174 dbs_info->freq_lo = 0; 176 dbs_info->freq_lo = 0;
175 177
176 /* Check for frequency increase */ 178 /* Check for frequency increase */
177 if (load_freq > od_tuners.up_threshold * policy->cur) { 179 if (load_freq > od_tuners->up_threshold * policy->cur) {
178 /* If switching to max speed, apply sampling_down_factor */ 180 /* If switching to max speed, apply sampling_down_factor */
179 if (policy->cur < policy->max) 181 if (policy->cur < policy->max)
180 dbs_info->rate_mult = 182 dbs_info->rate_mult =
181 od_tuners.sampling_down_factor; 183 od_tuners->sampling_down_factor;
182 dbs_freq_increase(policy, policy->max); 184 dbs_freq_increase(policy, policy->max);
183 return; 185 return;
184 } 186 }
@@ -193,9 +195,10 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
193 * support the current CPU usage without triggering the up policy. To be 195 * support the current CPU usage without triggering the up policy. To be
194 * safe, we focus 10 points under the threshold. 196 * safe, we focus 10 points under the threshold.
195 */ 197 */
196 if (load_freq < od_tuners.adj_up_threshold * policy->cur) { 198 if (load_freq < od_tuners->adj_up_threshold
199 * policy->cur) {
197 unsigned int freq_next; 200 unsigned int freq_next;
198 freq_next = load_freq / od_tuners.adj_up_threshold; 201 freq_next = load_freq / od_tuners->adj_up_threshold;
199 202
200 /* No longer fully busy, reset rate_mult */ 203 /* No longer fully busy, reset rate_mult */
201 dbs_info->rate_mult = 1; 204 dbs_info->rate_mult = 1;
@@ -203,65 +206,62 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
203 if (freq_next < policy->min) 206 if (freq_next < policy->min)
204 freq_next = policy->min; 207 freq_next = policy->min;
205 208
206 if (!od_tuners.powersave_bias) { 209 if (!od_tuners->powersave_bias) {
207 __cpufreq_driver_target(policy, freq_next, 210 __cpufreq_driver_target(policy, freq_next,
208 CPUFREQ_RELATION_L); 211 CPUFREQ_RELATION_L);
209 } else { 212 return;
210 int freq = powersave_bias_target(policy, freq_next,
211 CPUFREQ_RELATION_L);
212 __cpufreq_driver_target(policy, freq,
213 CPUFREQ_RELATION_L);
214 } 213 }
214
215 freq_next = od_ops.powersave_bias_target(policy, freq_next,
216 CPUFREQ_RELATION_L);
217 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
215 } 218 }
216} 219}
217 220
218static void od_dbs_timer(struct work_struct *work) 221static void od_dbs_timer(struct work_struct *work)
219{ 222{
220 struct delayed_work *dw = to_delayed_work(work);
221 struct od_cpu_dbs_info_s *dbs_info = 223 struct od_cpu_dbs_info_s *dbs_info =
222 container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work); 224 container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
223 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; 225 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
224 struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info, 226 struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
225 cpu); 227 cpu);
226 int delay, sample_type = core_dbs_info->sample_type; 228 struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
227 bool eval_load; 229 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
230 int delay = 0, sample_type = core_dbs_info->sample_type;
231 bool modify_all = true;
228 232
229 mutex_lock(&core_dbs_info->cdbs.timer_mutex); 233 mutex_lock(&core_dbs_info->cdbs.timer_mutex);
230 eval_load = need_load_eval(&core_dbs_info->cdbs, 234 if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
231 od_tuners.sampling_rate); 235 modify_all = false;
236 goto max_delay;
237 }
232 238
233 /* Common NORMAL_SAMPLE setup */ 239 /* Common NORMAL_SAMPLE setup */
234 core_dbs_info->sample_type = OD_NORMAL_SAMPLE; 240 core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
235 if (sample_type == OD_SUB_SAMPLE) { 241 if (sample_type == OD_SUB_SAMPLE) {
236 delay = core_dbs_info->freq_lo_jiffies; 242 delay = core_dbs_info->freq_lo_jiffies;
237 if (eval_load) 243 __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
238 __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy, 244 core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
239 core_dbs_info->freq_lo,
240 CPUFREQ_RELATION_H);
241 } else { 245 } else {
242 if (eval_load) 246 dbs_check_cpu(dbs_data, cpu);
243 dbs_check_cpu(&od_dbs_data, cpu);
244 if (core_dbs_info->freq_lo) { 247 if (core_dbs_info->freq_lo) {
245 /* Setup timer for SUB_SAMPLE */ 248 /* Setup timer for SUB_SAMPLE */
246 core_dbs_info->sample_type = OD_SUB_SAMPLE; 249 core_dbs_info->sample_type = OD_SUB_SAMPLE;
247 delay = core_dbs_info->freq_hi_jiffies; 250 delay = core_dbs_info->freq_hi_jiffies;
248 } else {
249 delay = delay_for_sampling_rate(od_tuners.sampling_rate
250 * core_dbs_info->rate_mult);
251 } 251 }
252 } 252 }
253 253
254 schedule_delayed_work_on(smp_processor_id(), dw, delay); 254max_delay:
255 if (!delay)
256 delay = delay_for_sampling_rate(od_tuners->sampling_rate
257 * core_dbs_info->rate_mult);
258
259 gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
255 mutex_unlock(&core_dbs_info->cdbs.timer_mutex); 260 mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
256} 261}
257 262
258/************************** sysfs interface ************************/ 263/************************** sysfs interface ************************/
259 264static struct common_dbs_data od_dbs_cdata;
260static ssize_t show_sampling_rate_min(struct kobject *kobj,
261 struct attribute *attr, char *buf)
262{
263 return sprintf(buf, "%u\n", od_dbs_data.min_sampling_rate);
264}
265 265
266/** 266/**
267 * update_sampling_rate - update sampling rate effective immediately if needed. 267 * update_sampling_rate - update sampling rate effective immediately if needed.
@@ -276,12 +276,14 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
276 * reducing the sampling rate, we need to make the new value effective 276 * reducing the sampling rate, we need to make the new value effective
277 * immediately. 277 * immediately.
278 */ 278 */
279static void update_sampling_rate(unsigned int new_rate) 279static void update_sampling_rate(struct dbs_data *dbs_data,
280 unsigned int new_rate)
280{ 281{
282 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
281 int cpu; 283 int cpu;
282 284
283 od_tuners.sampling_rate = new_rate = max(new_rate, 285 od_tuners->sampling_rate = new_rate = max(new_rate,
284 od_dbs_data.min_sampling_rate); 286 dbs_data->min_sampling_rate);
285 287
286 for_each_online_cpu(cpu) { 288 for_each_online_cpu(cpu) {
287 struct cpufreq_policy *policy; 289 struct cpufreq_policy *policy;
@@ -314,42 +316,54 @@ static void update_sampling_rate(unsigned int new_rate)
314 cancel_delayed_work_sync(&dbs_info->cdbs.work); 316 cancel_delayed_work_sync(&dbs_info->cdbs.work);
315 mutex_lock(&dbs_info->cdbs.timer_mutex); 317 mutex_lock(&dbs_info->cdbs.timer_mutex);
316 318
317 schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, 319 gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
318 usecs_to_jiffies(new_rate)); 320 usecs_to_jiffies(new_rate), true);
319 321
320 } 322 }
321 mutex_unlock(&dbs_info->cdbs.timer_mutex); 323 mutex_unlock(&dbs_info->cdbs.timer_mutex);
322 } 324 }
323} 325}
324 326
325static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, 327static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
326 const char *buf, size_t count) 328 size_t count)
327{ 329{
328 unsigned int input; 330 unsigned int input;
329 int ret; 331 int ret;
330 ret = sscanf(buf, "%u", &input); 332 ret = sscanf(buf, "%u", &input);
331 if (ret != 1) 333 if (ret != 1)
332 return -EINVAL; 334 return -EINVAL;
333 update_sampling_rate(input); 335
336 update_sampling_rate(dbs_data, input);
334 return count; 337 return count;
335} 338}
336 339
337static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, 340static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
338 const char *buf, size_t count) 341 size_t count)
339{ 342{
343 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
340 unsigned int input; 344 unsigned int input;
341 int ret; 345 int ret;
346 unsigned int j;
342 347
343 ret = sscanf(buf, "%u", &input); 348 ret = sscanf(buf, "%u", &input);
344 if (ret != 1) 349 if (ret != 1)
345 return -EINVAL; 350 return -EINVAL;
346 od_tuners.io_is_busy = !!input; 351 od_tuners->io_is_busy = !!input;
352
353 /* we need to re-evaluate prev_cpu_idle */
354 for_each_online_cpu(j) {
355 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
356 j);
357 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
358 &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
359 }
347 return count; 360 return count;
348} 361}
349 362
350static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, 363static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
351 const char *buf, size_t count) 364 size_t count)
352{ 365{
366 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
353 unsigned int input; 367 unsigned int input;
354 int ret; 368 int ret;
355 ret = sscanf(buf, "%u", &input); 369 ret = sscanf(buf, "%u", &input);
@@ -359,23 +373,24 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
359 return -EINVAL; 373 return -EINVAL;
360 } 374 }
361 /* Calculate the new adj_up_threshold */ 375 /* Calculate the new adj_up_threshold */
362 od_tuners.adj_up_threshold += input; 376 od_tuners->adj_up_threshold += input;
363 od_tuners.adj_up_threshold -= od_tuners.up_threshold; 377 od_tuners->adj_up_threshold -= od_tuners->up_threshold;
364 378
365 od_tuners.up_threshold = input; 379 od_tuners->up_threshold = input;
366 return count; 380 return count;
367} 381}
368 382
369static ssize_t store_sampling_down_factor(struct kobject *a, 383static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
370 struct attribute *b, const char *buf, size_t count) 384 const char *buf, size_t count)
371{ 385{
386 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
372 unsigned int input, j; 387 unsigned int input, j;
373 int ret; 388 int ret;
374 ret = sscanf(buf, "%u", &input); 389 ret = sscanf(buf, "%u", &input);
375 390
376 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 391 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
377 return -EINVAL; 392 return -EINVAL;
378 od_tuners.sampling_down_factor = input; 393 od_tuners->sampling_down_factor = input;
379 394
380 /* Reset down sampling multiplier in case it was active */ 395 /* Reset down sampling multiplier in case it was active */
381 for_each_online_cpu(j) { 396 for_each_online_cpu(j) {
@@ -386,9 +401,10 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
386 return count; 401 return count;
387} 402}
388 403
389static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, 404static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
390 const char *buf, size_t count) 405 size_t count)
391{ 406{
407 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
392 unsigned int input; 408 unsigned int input;
393 int ret; 409 int ret;
394 410
@@ -401,18 +417,18 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
401 if (input > 1) 417 if (input > 1)
402 input = 1; 418 input = 1;
403 419
404 if (input == od_tuners.ignore_nice) { /* nothing to do */ 420 if (input == od_tuners->ignore_nice) { /* nothing to do */
405 return count; 421 return count;
406 } 422 }
407 od_tuners.ignore_nice = input; 423 od_tuners->ignore_nice = input;
408 424
409 /* we need to re-evaluate prev_cpu_idle */ 425 /* we need to re-evaluate prev_cpu_idle */
410 for_each_online_cpu(j) { 426 for_each_online_cpu(j) {
411 struct od_cpu_dbs_info_s *dbs_info; 427 struct od_cpu_dbs_info_s *dbs_info;
412 dbs_info = &per_cpu(od_cpu_dbs_info, j); 428 dbs_info = &per_cpu(od_cpu_dbs_info, j);
413 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, 429 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
414 &dbs_info->cdbs.prev_cpu_wall); 430 &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
415 if (od_tuners.ignore_nice) 431 if (od_tuners->ignore_nice)
416 dbs_info->cdbs.prev_cpu_nice = 432 dbs_info->cdbs.prev_cpu_nice =
417 kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 433 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
418 434
@@ -420,9 +436,10 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
420 return count; 436 return count;
421} 437}
422 438
423static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, 439static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
424 const char *buf, size_t count) 440 size_t count)
425{ 441{
442 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
426 unsigned int input; 443 unsigned int input;
427 int ret; 444 int ret;
428 ret = sscanf(buf, "%u", &input); 445 ret = sscanf(buf, "%u", &input);
@@ -433,68 +450,179 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
433 if (input > 1000) 450 if (input > 1000)
434 input = 1000; 451 input = 1000;
435 452
436 od_tuners.powersave_bias = input; 453 od_tuners->powersave_bias = input;
437 ondemand_powersave_bias_init(); 454 ondemand_powersave_bias_init();
438 return count; 455 return count;
439} 456}
440 457
441show_one(od, sampling_rate, sampling_rate); 458show_store_one(od, sampling_rate);
442show_one(od, io_is_busy, io_is_busy); 459show_store_one(od, io_is_busy);
443show_one(od, up_threshold, up_threshold); 460show_store_one(od, up_threshold);
444show_one(od, sampling_down_factor, sampling_down_factor); 461show_store_one(od, sampling_down_factor);
445show_one(od, ignore_nice_load, ignore_nice); 462show_store_one(od, ignore_nice);
446show_one(od, powersave_bias, powersave_bias); 463show_store_one(od, powersave_bias);
447 464declare_show_sampling_rate_min(od);
448define_one_global_rw(sampling_rate); 465
449define_one_global_rw(io_is_busy); 466gov_sys_pol_attr_rw(sampling_rate);
450define_one_global_rw(up_threshold); 467gov_sys_pol_attr_rw(io_is_busy);
451define_one_global_rw(sampling_down_factor); 468gov_sys_pol_attr_rw(up_threshold);
452define_one_global_rw(ignore_nice_load); 469gov_sys_pol_attr_rw(sampling_down_factor);
453define_one_global_rw(powersave_bias); 470gov_sys_pol_attr_rw(ignore_nice);
454define_one_global_ro(sampling_rate_min); 471gov_sys_pol_attr_rw(powersave_bias);
455 472gov_sys_pol_attr_ro(sampling_rate_min);
456static struct attribute *dbs_attributes[] = { 473
457 &sampling_rate_min.attr, 474static struct attribute *dbs_attributes_gov_sys[] = {
458 &sampling_rate.attr, 475 &sampling_rate_min_gov_sys.attr,
459 &up_threshold.attr, 476 &sampling_rate_gov_sys.attr,
460 &sampling_down_factor.attr, 477 &up_threshold_gov_sys.attr,
461 &ignore_nice_load.attr, 478 &sampling_down_factor_gov_sys.attr,
462 &powersave_bias.attr, 479 &ignore_nice_gov_sys.attr,
463 &io_is_busy.attr, 480 &powersave_bias_gov_sys.attr,
481 &io_is_busy_gov_sys.attr,
482 NULL
483};
484
485static struct attribute_group od_attr_group_gov_sys = {
486 .attrs = dbs_attributes_gov_sys,
487 .name = "ondemand",
488};
489
490static struct attribute *dbs_attributes_gov_pol[] = {
491 &sampling_rate_min_gov_pol.attr,
492 &sampling_rate_gov_pol.attr,
493 &up_threshold_gov_pol.attr,
494 &sampling_down_factor_gov_pol.attr,
495 &ignore_nice_gov_pol.attr,
496 &powersave_bias_gov_pol.attr,
497 &io_is_busy_gov_pol.attr,
464 NULL 498 NULL
465}; 499};
466 500
467static struct attribute_group od_attr_group = { 501static struct attribute_group od_attr_group_gov_pol = {
468 .attrs = dbs_attributes, 502 .attrs = dbs_attributes_gov_pol,
469 .name = "ondemand", 503 .name = "ondemand",
470}; 504};
471 505
472/************************** sysfs end ************************/ 506/************************** sysfs end ************************/
473 507
508static int od_init(struct dbs_data *dbs_data)
509{
510 struct od_dbs_tuners *tuners;
511 u64 idle_time;
512 int cpu;
513
514 tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL);
515 if (!tuners) {
516 pr_err("%s: kzalloc failed\n", __func__);
517 return -ENOMEM;
518 }
519
520 cpu = get_cpu();
521 idle_time = get_cpu_idle_time_us(cpu, NULL);
522 put_cpu();
523 if (idle_time != -1ULL) {
524 /* Idle micro accounting is supported. Use finer thresholds */
525 tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
526 tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
527 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
528 /*
529 * In nohz/micro accounting case we set the minimum frequency
530 * not depending on HZ, but fixed (very low). The deferred
531 * timer might skip some samples if idle/sleeping as needed.
532 */
533 dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
534 } else {
535 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
536 tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
537 DEF_FREQUENCY_DOWN_DIFFERENTIAL;
538
539 /* For correct statistics, we need 10 ticks for each measure */
540 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
541 jiffies_to_usecs(10);
542 }
543
544 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
545 tuners->ignore_nice = 0;
546 tuners->powersave_bias = 0;
547 tuners->io_is_busy = should_io_be_busy();
548
549 dbs_data->tuners = tuners;
550 pr_info("%s: tuners %p\n", __func__, tuners);
551 mutex_init(&dbs_data->mutex);
552 return 0;
553}
554
555static void od_exit(struct dbs_data *dbs_data)
556{
557 kfree(dbs_data->tuners);
558}
559
474define_get_cpu_dbs_routines(od_cpu_dbs_info); 560define_get_cpu_dbs_routines(od_cpu_dbs_info);
475 561
476static struct od_ops od_ops = { 562static struct od_ops od_ops = {
477 .io_busy = should_io_be_busy,
478 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu, 563 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
479 .powersave_bias_target = powersave_bias_target, 564 .powersave_bias_target = generic_powersave_bias_target,
480 .freq_increase = dbs_freq_increase, 565 .freq_increase = dbs_freq_increase,
481}; 566};
482 567
483static struct dbs_data od_dbs_data = { 568static struct common_dbs_data od_dbs_cdata = {
484 .governor = GOV_ONDEMAND, 569 .governor = GOV_ONDEMAND,
485 .attr_group = &od_attr_group, 570 .attr_group_gov_sys = &od_attr_group_gov_sys,
486 .tuners = &od_tuners, 571 .attr_group_gov_pol = &od_attr_group_gov_pol,
487 .get_cpu_cdbs = get_cpu_cdbs, 572 .get_cpu_cdbs = get_cpu_cdbs,
488 .get_cpu_dbs_info_s = get_cpu_dbs_info_s, 573 .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
489 .gov_dbs_timer = od_dbs_timer, 574 .gov_dbs_timer = od_dbs_timer,
490 .gov_check_cpu = od_check_cpu, 575 .gov_check_cpu = od_check_cpu,
491 .gov_ops = &od_ops, 576 .gov_ops = &od_ops,
577 .init = od_init,
578 .exit = od_exit,
492}; 579};
493 580
581static void od_set_powersave_bias(unsigned int powersave_bias)
582{
583 struct cpufreq_policy *policy;
584 struct dbs_data *dbs_data;
585 struct od_dbs_tuners *od_tuners;
586 unsigned int cpu;
587 cpumask_t done;
588
589 cpumask_clear(&done);
590
591 get_online_cpus();
592 for_each_online_cpu(cpu) {
593 if (cpumask_test_cpu(cpu, &done))
594 continue;
595
596 policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
597 dbs_data = policy->governor_data;
598 od_tuners = dbs_data->tuners;
599 od_tuners->powersave_bias = powersave_bias;
600
601 cpumask_or(&done, &done, policy->cpus);
602 }
603 put_online_cpus();
604}
605
606void od_register_powersave_bias_handler(unsigned int (*f)
607 (struct cpufreq_policy *, unsigned int, unsigned int),
608 unsigned int powersave_bias)
609{
610 od_ops.powersave_bias_target = f;
611 od_set_powersave_bias(powersave_bias);
612}
613EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
614
615void od_unregister_powersave_bias_handler(void)
616{
617 od_ops.powersave_bias_target = generic_powersave_bias_target;
618 od_set_powersave_bias(0);
619}
620EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
621
494static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy, 622static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
495 unsigned int event) 623 unsigned int event)
496{ 624{
497 return cpufreq_governor_dbs(&od_dbs_data, policy, event); 625 return cpufreq_governor_dbs(policy, &od_dbs_cdata, event);
498} 626}
499 627
500#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND 628#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
@@ -509,29 +637,6 @@ struct cpufreq_governor cpufreq_gov_ondemand = {
509 637
510static int __init cpufreq_gov_dbs_init(void) 638static int __init cpufreq_gov_dbs_init(void)
511{ 639{
512 u64 idle_time;
513 int cpu = get_cpu();
514
515 mutex_init(&od_dbs_data.mutex);
516 idle_time = get_cpu_idle_time_us(cpu, NULL);
517 put_cpu();
518 if (idle_time != -1ULL) {
519 /* Idle micro accounting is supported. Use finer thresholds */
520 od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
521 od_tuners.adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
522 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
523 /*
524 * In nohz/micro accounting case we set the minimum frequency
525 * not depending on HZ, but fixed (very low). The deferred
526 * timer might skip some samples if idle/sleeping as needed.
527 */
528 od_dbs_data.min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
529 } else {
530 /* For correct statistics, we need 10 ticks for each measure */
531 od_dbs_data.min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
532 jiffies_to_usecs(10);
533 }
534
535 return cpufreq_register_governor(&cpufreq_gov_ondemand); 640 return cpufreq_register_governor(&cpufreq_gov_ondemand);
536} 641}
537 642
diff --git a/drivers/cpufreq/cris-artpec3-cpufreq.c b/drivers/cpufreq/cris-artpec3-cpufreq.c
new file mode 100644
index 000000000000..ee142c490575
--- /dev/null
+++ b/drivers/cpufreq/cris-artpec3-cpufreq.c
@@ -0,0 +1,146 @@
1#include <linux/init.h>
2#include <linux/module.h>
3#include <linux/cpufreq.h>
4#include <hwregs/reg_map.h>
5#include <hwregs/reg_rdwr.h>
6#include <hwregs/clkgen_defs.h>
7#include <hwregs/ddr2_defs.h>
8
9static int
10cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
11 void *data);
12
13static struct notifier_block cris_sdram_freq_notifier_block = {
14 .notifier_call = cris_sdram_freq_notifier
15};
16
17static struct cpufreq_frequency_table cris_freq_table[] = {
18 {0x01, 6000},
19 {0x02, 200000},
20 {0, CPUFREQ_TABLE_END},
21};
22
23static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
24{
25 reg_clkgen_rw_clk_ctrl clk_ctrl;
26 clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
27 return clk_ctrl.pll ? 200000 : 6000;
28}
29
30static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
31 unsigned int state)
32{
33 struct cpufreq_freqs freqs;
34 reg_clkgen_rw_clk_ctrl clk_ctrl;
35 clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
36
37 freqs.old = cris_freq_get_cpu_frequency(policy->cpu);
38 freqs.new = cris_freq_table[state].frequency;
39
40 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
41
42 local_irq_disable();
43
44 /* Even though we may be SMP they will share the same clock
45 * so all settings are made on CPU0. */
46 if (cris_freq_table[state].frequency == 200000)
47 clk_ctrl.pll = 1;
48 else
49 clk_ctrl.pll = 0;
50 REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl);
51
52 local_irq_enable();
53
54 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
55};
56
57static int cris_freq_verify(struct cpufreq_policy *policy)
58{
59 return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
60}
61
62static int cris_freq_target(struct cpufreq_policy *policy,
63 unsigned int target_freq,
64 unsigned int relation)
65{
66 unsigned int newstate = 0;
67
68 if (cpufreq_frequency_table_target(policy, cris_freq_table,
69 target_freq, relation, &newstate))
70 return -EINVAL;
71
72 cris_freq_set_cpu_state(policy, newstate);
73
74 return 0;
75}
76
77static int cris_freq_cpu_init(struct cpufreq_policy *policy)
78{
79 int result;
80
81 /* cpuinfo and default policy values */
82 policy->cpuinfo.transition_latency = 1000000; /* 1ms */
83 policy->cur = cris_freq_get_cpu_frequency(0);
84
85 result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
86 if (result)
87 return (result);
88
89 cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
90
91 return 0;
92}
93
94
95static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
96{
97 cpufreq_frequency_table_put_attr(policy->cpu);
98 return 0;
99}
100
101
102static struct freq_attr *cris_freq_attr[] = {
103 &cpufreq_freq_attr_scaling_available_freqs,
104 NULL,
105};
106
107static struct cpufreq_driver cris_freq_driver = {
108 .get = cris_freq_get_cpu_frequency,
109 .verify = cris_freq_verify,
110 .target = cris_freq_target,
111 .init = cris_freq_cpu_init,
112 .exit = cris_freq_cpu_exit,
113 .name = "cris_freq",
114 .owner = THIS_MODULE,
115 .attr = cris_freq_attr,
116};
117
118static int __init cris_freq_init(void)
119{
120 int ret;
121 ret = cpufreq_register_driver(&cris_freq_driver);
122 cpufreq_register_notifier(&cris_sdram_freq_notifier_block,
123 CPUFREQ_TRANSITION_NOTIFIER);
124 return ret;
125}
126
127static int
128cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
129 void *data)
130{
131 int i;
132 struct cpufreq_freqs *freqs = data;
133 if (val == CPUFREQ_PRECHANGE) {
134 reg_ddr2_rw_cfg cfg =
135 REG_RD(ddr2, regi_ddr2_ctrl, rw_cfg);
136 cfg.ref_interval = (freqs->new == 200000 ? 1560 : 46);
137
138 if (freqs->new == 200000)
139 for (i = 0; i < 50000; i++);
140 REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing);
141 }
142 return 0;
143}
144
145
146module_init(cris_freq_init);
diff --git a/drivers/cpufreq/cris-etraxfs-cpufreq.c b/drivers/cpufreq/cris-etraxfs-cpufreq.c
new file mode 100644
index 000000000000..12952235d5db
--- /dev/null
+++ b/drivers/cpufreq/cris-etraxfs-cpufreq.c
@@ -0,0 +1,142 @@
1#include <linux/init.h>
2#include <linux/module.h>
3#include <linux/cpufreq.h>
4#include <hwregs/reg_map.h>
5#include <arch/hwregs/reg_rdwr.h>
6#include <arch/hwregs/config_defs.h>
7#include <arch/hwregs/bif_core_defs.h>
8
9static int
10cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
11 void *data);
12
13static struct notifier_block cris_sdram_freq_notifier_block = {
14 .notifier_call = cris_sdram_freq_notifier
15};
16
17static struct cpufreq_frequency_table cris_freq_table[] = {
18 {0x01, 6000},
19 {0x02, 200000},
20 {0, CPUFREQ_TABLE_END},
21};
22
23static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
24{
25 reg_config_rw_clk_ctrl clk_ctrl;
26 clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl);
27 return clk_ctrl.pll ? 200000 : 6000;
28}
29
30static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
31 unsigned int state)
32{
33 struct cpufreq_freqs freqs;
34 reg_config_rw_clk_ctrl clk_ctrl;
35 clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl);
36
37 freqs.old = cris_freq_get_cpu_frequency(policy->cpu);
38 freqs.new = cris_freq_table[state].frequency;
39
40 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
41
42 local_irq_disable();
43
44 /* Even though we may be SMP they will share the same clock
45 * so all settings are made on CPU0. */
46 if (cris_freq_table[state].frequency == 200000)
47 clk_ctrl.pll = 1;
48 else
49 clk_ctrl.pll = 0;
50 REG_WR(config, regi_config, rw_clk_ctrl, clk_ctrl);
51
52 local_irq_enable();
53
54 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
55};
56
57static int cris_freq_verify(struct cpufreq_policy *policy)
58{
59 return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
60}
61
62static int cris_freq_target(struct cpufreq_policy *policy,
63 unsigned int target_freq, unsigned int relation)
64{
65 unsigned int newstate = 0;
66
67 if (cpufreq_frequency_table_target
68 (policy, cris_freq_table, target_freq, relation, &newstate))
69 return -EINVAL;
70
71 cris_freq_set_cpu_state(policy, newstate);
72
73 return 0;
74}
75
76static int cris_freq_cpu_init(struct cpufreq_policy *policy)
77{
78 int result;
79
80 /* cpuinfo and default policy values */
81 policy->cpuinfo.transition_latency = 1000000; /* 1ms */
82 policy->cur = cris_freq_get_cpu_frequency(0);
83
84 result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
85 if (result)
86 return (result);
87
88 cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
89
90 return 0;
91}
92
93static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
94{
95 cpufreq_frequency_table_put_attr(policy->cpu);
96 return 0;
97}
98
99static struct freq_attr *cris_freq_attr[] = {
100 &cpufreq_freq_attr_scaling_available_freqs,
101 NULL,
102};
103
104static struct cpufreq_driver cris_freq_driver = {
105 .get = cris_freq_get_cpu_frequency,
106 .verify = cris_freq_verify,
107 .target = cris_freq_target,
108 .init = cris_freq_cpu_init,
109 .exit = cris_freq_cpu_exit,
110 .name = "cris_freq",
111 .owner = THIS_MODULE,
112 .attr = cris_freq_attr,
113};
114
115static int __init cris_freq_init(void)
116{
117 int ret;
118 ret = cpufreq_register_driver(&cris_freq_driver);
119 cpufreq_register_notifier(&cris_sdram_freq_notifier_block,
120 CPUFREQ_TRANSITION_NOTIFIER);
121 return ret;
122}
123
124static int
125cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
126 void *data)
127{
128 int i;
129 struct cpufreq_freqs *freqs = data;
130 if (val == CPUFREQ_PRECHANGE) {
131 reg_bif_core_rw_sdram_timing timing =
132 REG_RD(bif_core, regi_bif_core, rw_sdram_timing);
133 timing.cpd = (freqs->new == 200000 ? 0 : 1);
134
135 if (freqs->new == 200000)
136 for (i = 0; i < 50000; i++) ;
137 REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing);
138 }
139 return 0;
140}
141
142module_init(cris_freq_init);
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
new file mode 100644
index 000000000000..c33c76c360fa
--- /dev/null
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -0,0 +1,231 @@
1/*
2 * CPU frequency scaling for DaVinci
3 *
4 * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 * Based on linux/arch/arm/plat-omap/cpu-omap.c. Original Copyright follows:
7 *
8 * Copyright (C) 2005 Nokia Corporation
9 * Written by Tony Lindgren <tony@atomide.com>
10 *
11 * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
12 *
13 * Copyright (C) 2007-2008 Texas Instruments, Inc.
14 * Updated to support OMAP3
15 * Rajendra Nayak <rnayak@ti.com>
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License version 2 as
19 * published by the Free Software Foundation.
20 */
21#include <linux/types.h>
22#include <linux/cpufreq.h>
23#include <linux/init.h>
24#include <linux/err.h>
25#include <linux/clk.h>
26#include <linux/platform_device.h>
27#include <linux/export.h>
28
29#include <mach/hardware.h>
30#include <mach/cpufreq.h>
31#include <mach/common.h>
32
33struct davinci_cpufreq {
34 struct device *dev;
35 struct clk *armclk;
36 struct clk *asyncclk;
37 unsigned long asyncrate;
38};
39static struct davinci_cpufreq cpufreq;
40
41static int davinci_verify_speed(struct cpufreq_policy *policy)
42{
43 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
44 struct cpufreq_frequency_table *freq_table = pdata->freq_table;
45 struct clk *armclk = cpufreq.armclk;
46
47 if (freq_table)
48 return cpufreq_frequency_table_verify(policy, freq_table);
49
50 if (policy->cpu)
51 return -EINVAL;
52
53 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
54 policy->cpuinfo.max_freq);
55
56 policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000;
57 policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000;
58 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
59 policy->cpuinfo.max_freq);
60 return 0;
61}
62
63static unsigned int davinci_getspeed(unsigned int cpu)
64{
65 if (cpu)
66 return 0;
67
68 return clk_get_rate(cpufreq.armclk) / 1000;
69}
70
71static int davinci_target(struct cpufreq_policy *policy,
72 unsigned int target_freq, unsigned int relation)
73{
74 int ret = 0;
75 unsigned int idx;
76 struct cpufreq_freqs freqs;
77 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
78 struct clk *armclk = cpufreq.armclk;
79
80 freqs.old = davinci_getspeed(0);
81 freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000;
82
83 if (freqs.old == freqs.new)
84 return ret;
85
86 dev_dbg(cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new);
87
88 ret = cpufreq_frequency_table_target(policy, pdata->freq_table,
89 freqs.new, relation, &idx);
90 if (ret)
91 return -EINVAL;
92
93 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
94
95 /* if moving to higher frequency, up the voltage beforehand */
96 if (pdata->set_voltage && freqs.new > freqs.old) {
97 ret = pdata->set_voltage(idx);
98 if (ret)
99 goto out;
100 }
101
102 ret = clk_set_rate(armclk, idx);
103 if (ret)
104 goto out;
105
106 if (cpufreq.asyncclk) {
107 ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate);
108 if (ret)
109 goto out;
110 }
111
112 /* if moving to lower freq, lower the voltage after lowering freq */
113 if (pdata->set_voltage && freqs.new < freqs.old)
114 pdata->set_voltage(idx);
115
116out:
117 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
118
119 return ret;
120}
121
122static int davinci_cpu_init(struct cpufreq_policy *policy)
123{
124 int result = 0;
125 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
126 struct cpufreq_frequency_table *freq_table = pdata->freq_table;
127
128 if (policy->cpu != 0)
129 return -EINVAL;
130
131 /* Finish platform specific initialization */
132 if (pdata->init) {
133 result = pdata->init();
134 if (result)
135 return result;
136 }
137
138 policy->cur = davinci_getspeed(0);
139
140 result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
141 if (result) {
142 pr_err("%s: cpufreq_frequency_table_cpuinfo() failed",
143 __func__);
144 return result;
145 }
146
147 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
148
149 /*
150 * Time measurement across the target() function yields ~1500-1800us
151 * time taken with no drivers on notification list.
152 * Setting the latency to 2000 us to accommodate addition of drivers
153 * to pre/post change notification list.
154 */
155 policy->cpuinfo.transition_latency = 2000 * 1000;
156 return 0;
157}
158
159static int davinci_cpu_exit(struct cpufreq_policy *policy)
160{
161 cpufreq_frequency_table_put_attr(policy->cpu);
162 return 0;
163}
164
165static struct freq_attr *davinci_cpufreq_attr[] = {
166 &cpufreq_freq_attr_scaling_available_freqs,
167 NULL,
168};
169
170static struct cpufreq_driver davinci_driver = {
171 .flags = CPUFREQ_STICKY,
172 .verify = davinci_verify_speed,
173 .target = davinci_target,
174 .get = davinci_getspeed,
175 .init = davinci_cpu_init,
176 .exit = davinci_cpu_exit,
177 .name = "davinci",
178 .attr = davinci_cpufreq_attr,
179};
180
181static int __init davinci_cpufreq_probe(struct platform_device *pdev)
182{
183 struct davinci_cpufreq_config *pdata = pdev->dev.platform_data;
184 struct clk *asyncclk;
185
186 if (!pdata)
187 return -EINVAL;
188 if (!pdata->freq_table)
189 return -EINVAL;
190
191 cpufreq.dev = &pdev->dev;
192
193 cpufreq.armclk = clk_get(NULL, "arm");
194 if (IS_ERR(cpufreq.armclk)) {
195 dev_err(cpufreq.dev, "Unable to get ARM clock\n");
196 return PTR_ERR(cpufreq.armclk);
197 }
198
199 asyncclk = clk_get(cpufreq.dev, "async");
200 if (!IS_ERR(asyncclk)) {
201 cpufreq.asyncclk = asyncclk;
202 cpufreq.asyncrate = clk_get_rate(asyncclk);
203 }
204
205 return cpufreq_register_driver(&davinci_driver);
206}
207
208static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
209{
210 clk_put(cpufreq.armclk);
211
212 if (cpufreq.asyncclk)
213 clk_put(cpufreq.asyncclk);
214
215 return cpufreq_unregister_driver(&davinci_driver);
216}
217
218static struct platform_driver davinci_cpufreq_driver = {
219 .driver = {
220 .name = "cpufreq-davinci",
221 .owner = THIS_MODULE,
222 },
223 .remove = __exit_p(davinci_cpufreq_remove),
224};
225
226int __init davinci_cpufreq_init(void)
227{
228 return platform_driver_probe(&davinci_cpufreq_driver,
229 davinci_cpufreq_probe);
230}
231
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 72f0c3efa76e..6ec6539ae041 100644
--- a/drivers/cpufreq/dbx500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -37,12 +37,6 @@ static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
37 unsigned int idx; 37 unsigned int idx;
38 int ret; 38 int ret;
39 39
40 /* scale the target frequency to one of the extremes supported */
41 if (target_freq < policy->cpuinfo.min_freq)
42 target_freq = policy->cpuinfo.min_freq;
43 if (target_freq > policy->cpuinfo.max_freq)
44 target_freq = policy->cpuinfo.max_freq;
45
46 /* Lookup the next frequency */ 40 /* Lookup the next frequency */
47 if (cpufreq_frequency_table_target(policy, freq_table, target_freq, 41 if (cpufreq_frequency_table_target(policy, freq_table, target_freq,
48 relation, &idx)) 42 relation, &idx))
@@ -55,8 +49,7 @@ static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
55 return 0; 49 return 0;
56 50
57 /* pre-change notification */ 51 /* pre-change notification */
58 for_each_cpu(freqs.cpu, policy->cpus) 52 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
59 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
60 53
61 /* update armss clk frequency */ 54 /* update armss clk frequency */
62 ret = clk_set_rate(armss_clk, freqs.new * 1000); 55 ret = clk_set_rate(armss_clk, freqs.new * 1000);
@@ -68,8 +61,7 @@ static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
68 } 61 }
69 62
70 /* post change notification */ 63 /* post change notification */
71 for_each_cpu(freqs.cpu, policy->cpus) 64 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
72 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
73 65
74 return 0; 66 return 0;
75} 67}
@@ -79,15 +71,15 @@ static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
79 int i = 0; 71 int i = 0;
80 unsigned long freq = clk_get_rate(armss_clk) / 1000; 72 unsigned long freq = clk_get_rate(armss_clk) / 1000;
81 73
82 while (freq_table[i].frequency != CPUFREQ_TABLE_END) { 74 /* The value is rounded to closest frequency in the defined table. */
83 if (freq <= freq_table[i].frequency) 75 while (freq_table[i + 1].frequency != CPUFREQ_TABLE_END) {
76 if (freq < freq_table[i].frequency +
77 (freq_table[i + 1].frequency - freq_table[i].frequency) / 2)
84 return freq_table[i].frequency; 78 return freq_table[i].frequency;
85 i++; 79 i++;
86 } 80 }
87 81
88 /* We could not find a corresponding frequency. */ 82 return freq_table[i].frequency;
89 pr_err("dbx500-cpufreq: Failed to find cpufreq speed\n");
90 return 0;
91} 83}
92 84
93static int __cpuinit dbx500_cpufreq_init(struct cpufreq_policy *policy) 85static int __cpuinit dbx500_cpufreq_init(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 3fffbe6025cd..37380fb92621 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -104,7 +104,7 @@ static unsigned int eps_get(unsigned int cpu)
104} 104}
105 105
106static int eps_set_state(struct eps_cpu_data *centaur, 106static int eps_set_state(struct eps_cpu_data *centaur,
107 unsigned int cpu, 107 struct cpufreq_policy *policy,
108 u32 dest_state) 108 u32 dest_state)
109{ 109{
110 struct cpufreq_freqs freqs; 110 struct cpufreq_freqs freqs;
@@ -112,10 +112,9 @@ static int eps_set_state(struct eps_cpu_data *centaur,
112 int err = 0; 112 int err = 0;
113 int i; 113 int i;
114 114
115 freqs.old = eps_get(cpu); 115 freqs.old = eps_get(policy->cpu);
116 freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff); 116 freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff);
117 freqs.cpu = cpu; 117 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
118 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
119 118
120 /* Wait while CPU is busy */ 119 /* Wait while CPU is busy */
121 rdmsr(MSR_IA32_PERF_STATUS, lo, hi); 120 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
@@ -162,7 +161,7 @@ postchange:
162 current_multiplier); 161 current_multiplier);
163 } 162 }
164#endif 163#endif
165 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 164 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
166 return err; 165 return err;
167} 166}
168 167
@@ -190,7 +189,7 @@ static int eps_target(struct cpufreq_policy *policy,
190 189
191 /* Make frequency transition */ 190 /* Make frequency transition */
192 dest_state = centaur->freq_table[newstate].index & 0xffff; 191 dest_state = centaur->freq_table[newstate].index & 0xffff;
193 ret = eps_set_state(centaur, cpu, dest_state); 192 ret = eps_set_state(centaur, policy, dest_state);
194 if (ret) 193 if (ret)
195 printk(KERN_ERR "eps: Timeout!\n"); 194 printk(KERN_ERR "eps: Timeout!\n");
196 return ret; 195 return ret;
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 960671fd3d7e..658d860344b0 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -117,15 +117,15 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
117 * There is no return value. 117 * There is no return value.
118 */ 118 */
119 119
120static void elanfreq_set_cpu_state(unsigned int state) 120static void elanfreq_set_cpu_state(struct cpufreq_policy *policy,
121 unsigned int state)
121{ 122{
122 struct cpufreq_freqs freqs; 123 struct cpufreq_freqs freqs;
123 124
124 freqs.old = elanfreq_get_cpu_frequency(0); 125 freqs.old = elanfreq_get_cpu_frequency(0);
125 freqs.new = elan_multiplier[state].clock; 126 freqs.new = elan_multiplier[state].clock;
126 freqs.cpu = 0; /* elanfreq.c is UP only driver */
127 127
128 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 128 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
129 129
130 printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n", 130 printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",
131 elan_multiplier[state].clock); 131 elan_multiplier[state].clock);
@@ -161,7 +161,7 @@ static void elanfreq_set_cpu_state(unsigned int state)
161 udelay(10000); 161 udelay(10000);
162 local_irq_enable(); 162 local_irq_enable();
163 163
164 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 164 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
165}; 165};
166 166
167 167
@@ -188,7 +188,7 @@ static int elanfreq_target(struct cpufreq_policy *policy,
188 target_freq, relation, &newstate)) 188 target_freq, relation, &newstate))
189 return -EINVAL; 189 return -EINVAL;
190 190
191 elanfreq_set_cpu_state(newstate); 191 elanfreq_set_cpu_state(policy, newstate);
192 192
193 return 0; 193 return 0;
194} 194}
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 78057a357ddb..475b4f607f0d 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -70,7 +70,6 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
70 70
71 freqs.old = policy->cur; 71 freqs.old = policy->cur;
72 freqs.new = target_freq; 72 freqs.new = target_freq;
73 freqs.cpu = policy->cpu;
74 73
75 if (freqs.new == freqs.old) 74 if (freqs.new == freqs.old)
76 goto out; 75 goto out;
@@ -105,8 +104,7 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
105 } 104 }
106 arm_volt = volt_table[index]; 105 arm_volt = volt_table[index];
107 106
108 for_each_cpu(freqs.cpu, policy->cpus) 107 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
109 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
110 108
111 /* When the new frequency is higher than current frequency */ 109 /* When the new frequency is higher than current frequency */
112 if ((freqs.new > freqs.old) && !safe_arm_volt) { 110 if ((freqs.new > freqs.old) && !safe_arm_volt) {
@@ -131,8 +129,7 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
131 129
132 exynos_info->set_freq(old_index, index); 130 exynos_info->set_freq(old_index, index);
133 131
134 for_each_cpu(freqs.cpu, policy->cpus) 132 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
135 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
136 133
137 /* When the new frequency is lower than current frequency */ 134 /* When the new frequency is lower than current frequency */
138 if ((freqs.new < freqs.old) || 135 if ((freqs.new < freqs.old) ||
@@ -297,7 +294,7 @@ static int __init exynos_cpufreq_init(void)
297 else if (soc_is_exynos5250()) 294 else if (soc_is_exynos5250())
298 ret = exynos5250_cpufreq_init(exynos_info); 295 ret = exynos5250_cpufreq_init(exynos_info);
299 else 296 else
300 pr_err("%s: CPU type not found\n", __func__); 297 return 0;
301 298
302 if (ret) 299 if (ret)
303 goto err_vdd_arm; 300 goto err_vdd_arm;
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
new file mode 100644
index 000000000000..0c74018eda47
--- /dev/null
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -0,0 +1,481 @@
1/*
2 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * Amit Daniel Kachhap <amit.daniel@samsung.com>
6 *
7 * EXYNOS5440 - CPU frequency scaling support
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#include <linux/clk.h>
15#include <linux/cpu.h>
16#include <linux/cpufreq.h>
17#include <linux/err.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
23#include <linux/opp.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26
27/* Register definitions */
28#define XMU_DVFS_CTRL 0x0060
29#define XMU_PMU_P0_7 0x0064
30#define XMU_C0_3_PSTATE 0x0090
31#define XMU_P_LIMIT 0x00a0
32#define XMU_P_STATUS 0x00a4
33#define XMU_PMUEVTEN 0x00d0
34#define XMU_PMUIRQEN 0x00d4
35#define XMU_PMUIRQ 0x00d8
36
37/* PMU mask and shift definations */
38#define P_VALUE_MASK 0x7
39
40#define XMU_DVFS_CTRL_EN_SHIFT 0
41
42#define P0_7_CPUCLKDEV_SHIFT 21
43#define P0_7_CPUCLKDEV_MASK 0x7
44#define P0_7_ATBCLKDEV_SHIFT 18
45#define P0_7_ATBCLKDEV_MASK 0x7
46#define P0_7_CSCLKDEV_SHIFT 15
47#define P0_7_CSCLKDEV_MASK 0x7
48#define P0_7_CPUEMA_SHIFT 28
49#define P0_7_CPUEMA_MASK 0xf
50#define P0_7_L2EMA_SHIFT 24
51#define P0_7_L2EMA_MASK 0xf
52#define P0_7_VDD_SHIFT 8
53#define P0_7_VDD_MASK 0x7f
54#define P0_7_FREQ_SHIFT 0
55#define P0_7_FREQ_MASK 0xff
56
57#define C0_3_PSTATE_VALID_SHIFT 8
58#define C0_3_PSTATE_CURR_SHIFT 4
59#define C0_3_PSTATE_NEW_SHIFT 0
60
61#define PSTATE_CHANGED_EVTEN_SHIFT 0
62
63#define PSTATE_CHANGED_IRQEN_SHIFT 0
64
65#define PSTATE_CHANGED_SHIFT 0
66
67/* some constant values for clock divider calculation */
68#define CPU_DIV_FREQ_MAX 500
69#define CPU_DBG_FREQ_MAX 375
70#define CPU_ATB_FREQ_MAX 500
71
72#define PMIC_LOW_VOLT 0x30
73#define PMIC_HIGH_VOLT 0x28
74
75#define CPUEMA_HIGH 0x2
76#define CPUEMA_MID 0x4
77#define CPUEMA_LOW 0x7
78
79#define L2EMA_HIGH 0x1
80#define L2EMA_MID 0x3
81#define L2EMA_LOW 0x4
82
83#define DIV_TAB_MAX 2
84/* frequency unit is 20MHZ */
85#define FREQ_UNIT 20
86#define MAX_VOLTAGE 1550000 /* In microvolt */
87#define VOLTAGE_STEP 12500 /* In microvolt */
88
89#define CPUFREQ_NAME "exynos5440_dvfs"
90#define DEF_TRANS_LATENCY 100000
91
92enum cpufreq_level_index {
93 L0, L1, L2, L3, L4,
94 L5, L6, L7, L8, L9,
95};
96#define CPUFREQ_LEVEL_END (L7 + 1)
97
98struct exynos_dvfs_data {
99 void __iomem *base;
100 struct resource *mem;
101 int irq;
102 struct clk *cpu_clk;
103 unsigned int cur_frequency;
104 unsigned int latency;
105 struct cpufreq_frequency_table *freq_table;
106 unsigned int freq_count;
107 struct device *dev;
108 bool dvfs_enabled;
109 struct work_struct irq_work;
110};
111
112static struct exynos_dvfs_data *dvfs_info;
113static DEFINE_MUTEX(cpufreq_lock);
114static struct cpufreq_freqs freqs;
115
116static int init_div_table(void)
117{
118 struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
119 unsigned int tmp, clk_div, ema_div, freq, volt_id;
120 int i = 0;
121 struct opp *opp;
122
123 rcu_read_lock();
124 for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) {
125
126 opp = opp_find_freq_exact(dvfs_info->dev,
127 freq_tbl[i].frequency * 1000, true);
128 if (IS_ERR(opp)) {
129 rcu_read_unlock();
130 dev_err(dvfs_info->dev,
131 "failed to find valid OPP for %u KHZ\n",
132 freq_tbl[i].frequency);
133 return PTR_ERR(opp);
134 }
135
136 freq = freq_tbl[i].frequency / 1000; /* In MHZ */
137 clk_div = ((freq / CPU_DIV_FREQ_MAX) & P0_7_CPUCLKDEV_MASK)
138 << P0_7_CPUCLKDEV_SHIFT;
139 clk_div |= ((freq / CPU_ATB_FREQ_MAX) & P0_7_ATBCLKDEV_MASK)
140 << P0_7_ATBCLKDEV_SHIFT;
141 clk_div |= ((freq / CPU_DBG_FREQ_MAX) & P0_7_CSCLKDEV_MASK)
142 << P0_7_CSCLKDEV_SHIFT;
143
144 /* Calculate EMA */
145 volt_id = opp_get_voltage(opp);
146 volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP;
147 if (volt_id < PMIC_HIGH_VOLT) {
148 ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) |
149 (L2EMA_HIGH << P0_7_L2EMA_SHIFT);
150 } else if (volt_id > PMIC_LOW_VOLT) {
151 ema_div = (CPUEMA_LOW << P0_7_CPUEMA_SHIFT) |
152 (L2EMA_LOW << P0_7_L2EMA_SHIFT);
153 } else {
154 ema_div = (CPUEMA_MID << P0_7_CPUEMA_SHIFT) |
155 (L2EMA_MID << P0_7_L2EMA_SHIFT);
156 }
157
158 tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT)
159 | ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT));
160
161 __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * i);
162 }
163
164 rcu_read_unlock();
165 return 0;
166}
167
168static void exynos_enable_dvfs(void)
169{
170 unsigned int tmp, i, cpu;
171 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
172 /* Disable DVFS */
173 __raw_writel(0, dvfs_info->base + XMU_DVFS_CTRL);
174
175 /* Enable PSTATE Change Event */
176 tmp = __raw_readl(dvfs_info->base + XMU_PMUEVTEN);
177 tmp |= (1 << PSTATE_CHANGED_EVTEN_SHIFT);
178 __raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
179
180 /* Enable PSTATE Change IRQ */
181 tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQEN);
182 tmp |= (1 << PSTATE_CHANGED_IRQEN_SHIFT);
183 __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
184
185 /* Set initial performance index */
186 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
187 if (freq_table[i].frequency == dvfs_info->cur_frequency)
188 break;
189
190 if (freq_table[i].frequency == CPUFREQ_TABLE_END) {
191 dev_crit(dvfs_info->dev, "Boot up frequency not supported\n");
192 /* Assign the highest frequency */
193 i = 0;
194 dvfs_info->cur_frequency = freq_table[i].frequency;
195 }
196
197 dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ",
198 dvfs_info->cur_frequency);
199
200 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
201 tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
202 tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
203 tmp |= (i << C0_3_PSTATE_NEW_SHIFT);
204 __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
205 }
206
207 /* Enable DVFS */
208 __raw_writel(1 << XMU_DVFS_CTRL_EN_SHIFT,
209 dvfs_info->base + XMU_DVFS_CTRL);
210}
211
212static int exynos_verify_speed(struct cpufreq_policy *policy)
213{
214 return cpufreq_frequency_table_verify(policy,
215 dvfs_info->freq_table);
216}
217
218static unsigned int exynos_getspeed(unsigned int cpu)
219{
220 return dvfs_info->cur_frequency;
221}
222
223static int exynos_target(struct cpufreq_policy *policy,
224 unsigned int target_freq,
225 unsigned int relation)
226{
227 unsigned int index, tmp;
228 int ret = 0, i;
229 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
230
231 mutex_lock(&cpufreq_lock);
232
233 ret = cpufreq_frequency_table_target(policy, freq_table,
234 target_freq, relation, &index);
235 if (ret)
236 goto out;
237
238 freqs.old = dvfs_info->cur_frequency;
239 freqs.new = freq_table[index].frequency;
240
241 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
242
243 /* Set the target frequency in all C0_3_PSTATE register */
244 for_each_cpu(i, policy->cpus) {
245 tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
246 tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
247 tmp |= (index << C0_3_PSTATE_NEW_SHIFT);
248
249 __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
250 }
251out:
252 mutex_unlock(&cpufreq_lock);
253 return ret;
254}
255
256static void exynos_cpufreq_work(struct work_struct *work)
257{
258 unsigned int cur_pstate, index;
259 struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
260 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
261
262 /* Ensure we can access cpufreq structures */
263 if (unlikely(dvfs_info->dvfs_enabled == false))
264 goto skip_work;
265
266 mutex_lock(&cpufreq_lock);
267 freqs.old = dvfs_info->cur_frequency;
268
269 cur_pstate = __raw_readl(dvfs_info->base + XMU_P_STATUS);
270 if (cur_pstate >> C0_3_PSTATE_VALID_SHIFT & 0x1)
271 index = (cur_pstate >> C0_3_PSTATE_CURR_SHIFT) & P_VALUE_MASK;
272 else
273 index = (cur_pstate >> C0_3_PSTATE_NEW_SHIFT) & P_VALUE_MASK;
274
275 if (likely(index < dvfs_info->freq_count)) {
276 freqs.new = freq_table[index].frequency;
277 dvfs_info->cur_frequency = freqs.new;
278 } else {
279 dev_crit(dvfs_info->dev, "New frequency out of range\n");
280 freqs.new = dvfs_info->cur_frequency;
281 }
282 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
283
284 cpufreq_cpu_put(policy);
285 mutex_unlock(&cpufreq_lock);
286skip_work:
287 enable_irq(dvfs_info->irq);
288}
289
290static irqreturn_t exynos_cpufreq_irq(int irq, void *id)
291{
292 unsigned int tmp;
293
294 tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQ);
295 if (tmp >> PSTATE_CHANGED_SHIFT & 0x1) {
296 __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQ);
297 disable_irq_nosync(irq);
298 schedule_work(&dvfs_info->irq_work);
299 }
300 return IRQ_HANDLED;
301}
302
303static void exynos_sort_descend_freq_table(void)
304{
305 struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
306 int i = 0, index;
307 unsigned int tmp_freq;
308 /*
309 * Exynos5440 clock controller state logic expects the cpufreq table to
310 * be in descending order. But the OPP library constructs the table in
311 * ascending order. So to make the table descending we just need to
312 * swap the i element with the N - i element.
313 */
314 for (i = 0; i < dvfs_info->freq_count / 2; i++) {
315 index = dvfs_info->freq_count - i - 1;
316 tmp_freq = freq_tbl[i].frequency;
317 freq_tbl[i].frequency = freq_tbl[index].frequency;
318 freq_tbl[index].frequency = tmp_freq;
319 }
320}
321
322static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
323{
324 int ret;
325
326 ret = cpufreq_frequency_table_cpuinfo(policy, dvfs_info->freq_table);
327 if (ret) {
328 dev_err(dvfs_info->dev, "Invalid frequency table: %d\n", ret);
329 return ret;
330 }
331
332 policy->cur = dvfs_info->cur_frequency;
333 policy->cpuinfo.transition_latency = dvfs_info->latency;
334 cpumask_setall(policy->cpus);
335
336 cpufreq_frequency_table_get_attr(dvfs_info->freq_table, policy->cpu);
337
338 return 0;
339}
340
341static struct cpufreq_driver exynos_driver = {
342 .flags = CPUFREQ_STICKY,
343 .verify = exynos_verify_speed,
344 .target = exynos_target,
345 .get = exynos_getspeed,
346 .init = exynos_cpufreq_cpu_init,
347 .name = CPUFREQ_NAME,
348};
349
350static const struct of_device_id exynos_cpufreq_match[] = {
351 {
352 .compatible = "samsung,exynos5440-cpufreq",
353 },
354 {},
355};
356MODULE_DEVICE_TABLE(of, exynos_cpufreq_match);
357
358static int exynos_cpufreq_probe(struct platform_device *pdev)
359{
360 int ret = -EINVAL;
361 struct device_node *np;
362 struct resource res;
363
364 np = pdev->dev.of_node;
365 if (!np)
366 return -ENODEV;
367
368 dvfs_info = devm_kzalloc(&pdev->dev, sizeof(*dvfs_info), GFP_KERNEL);
369 if (!dvfs_info) {
370 ret = -ENOMEM;
371 goto err_put_node;
372 }
373
374 dvfs_info->dev = &pdev->dev;
375
376 ret = of_address_to_resource(np, 0, &res);
377 if (ret)
378 goto err_put_node;
379
380 dvfs_info->base = devm_ioremap_resource(dvfs_info->dev, &res);
381 if (IS_ERR(dvfs_info->base)) {
382 ret = PTR_ERR(dvfs_info->base);
383 goto err_put_node;
384 }
385
386 dvfs_info->irq = irq_of_parse_and_map(np, 0);
387 if (!dvfs_info->irq) {
388 dev_err(dvfs_info->dev, "No cpufreq irq found\n");
389 ret = -ENODEV;
390 goto err_put_node;
391 }
392
393 ret = of_init_opp_table(dvfs_info->dev);
394 if (ret) {
395 dev_err(dvfs_info->dev, "failed to init OPP table: %d\n", ret);
396 goto err_put_node;
397 }
398
399 ret = opp_init_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
400 if (ret) {
401 dev_err(dvfs_info->dev,
402 "failed to init cpufreq table: %d\n", ret);
403 goto err_put_node;
404 }
405 dvfs_info->freq_count = opp_get_opp_count(dvfs_info->dev);
406 exynos_sort_descend_freq_table();
407
408 if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency))
409 dvfs_info->latency = DEF_TRANS_LATENCY;
410
411 dvfs_info->cpu_clk = devm_clk_get(dvfs_info->dev, "armclk");
412 if (IS_ERR(dvfs_info->cpu_clk)) {
413 dev_err(dvfs_info->dev, "Failed to get cpu clock\n");
414 ret = PTR_ERR(dvfs_info->cpu_clk);
415 goto err_free_table;
416 }
417
418 dvfs_info->cur_frequency = clk_get_rate(dvfs_info->cpu_clk);
419 if (!dvfs_info->cur_frequency) {
420 dev_err(dvfs_info->dev, "Failed to get clock rate\n");
421 ret = -EINVAL;
422 goto err_free_table;
423 }
424 dvfs_info->cur_frequency /= 1000;
425
426 INIT_WORK(&dvfs_info->irq_work, exynos_cpufreq_work);
427 ret = devm_request_irq(dvfs_info->dev, dvfs_info->irq,
428 exynos_cpufreq_irq, IRQF_TRIGGER_NONE,
429 CPUFREQ_NAME, dvfs_info);
430 if (ret) {
431 dev_err(dvfs_info->dev, "Failed to register IRQ\n");
432 goto err_free_table;
433 }
434
435 ret = init_div_table();
436 if (ret) {
437 dev_err(dvfs_info->dev, "Failed to initialise div table\n");
438 goto err_free_table;
439 }
440
441 exynos_enable_dvfs();
442 ret = cpufreq_register_driver(&exynos_driver);
443 if (ret) {
444 dev_err(dvfs_info->dev,
445 "%s: failed to register cpufreq driver\n", __func__);
446 goto err_free_table;
447 }
448
449 of_node_put(np);
450 dvfs_info->dvfs_enabled = true;
451 return 0;
452
453err_free_table:
454 opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
455err_put_node:
456 of_node_put(np);
457 dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__);
458 return ret;
459}
460
461static int exynos_cpufreq_remove(struct platform_device *pdev)
462{
463 cpufreq_unregister_driver(&exynos_driver);
464 opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
465 return 0;
466}
467
468static struct platform_driver exynos_cpufreq_platdrv = {
469 .driver = {
470 .name = "exynos5440-cpufreq",
471 .owner = THIS_MODULE,
472 .of_match_table = exynos_cpufreq_match,
473 },
474 .probe = exynos_cpufreq_probe,
475 .remove = exynos_cpufreq_remove,
476};
477module_platform_driver(exynos_cpufreq_platdrv);
478
479MODULE_AUTHOR("Amit Daniel Kachhap <amit.daniel@samsung.com>");
480MODULE_DESCRIPTION("Exynos5440 cpufreq driver");
481MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
index 456bee058fe6..3dfc99b9ca86 100644
--- a/drivers/cpufreq/gx-suspmod.c
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -251,14 +251,13 @@ static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration,
251 * set cpu speed in khz. 251 * set cpu speed in khz.
252 **/ 252 **/
253 253
254static void gx_set_cpuspeed(unsigned int khz) 254static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz)
255{ 255{
256 u8 suscfg, pmer1; 256 u8 suscfg, pmer1;
257 unsigned int new_khz; 257 unsigned int new_khz;
258 unsigned long flags; 258 unsigned long flags;
259 struct cpufreq_freqs freqs; 259 struct cpufreq_freqs freqs;
260 260
261 freqs.cpu = 0;
262 freqs.old = gx_get_cpuspeed(0); 261 freqs.old = gx_get_cpuspeed(0);
263 262
264 new_khz = gx_validate_speed(khz, &gx_params->on_duration, 263 new_khz = gx_validate_speed(khz, &gx_params->on_duration,
@@ -266,11 +265,9 @@ static void gx_set_cpuspeed(unsigned int khz)
266 265
267 freqs.new = new_khz; 266 freqs.new = new_khz;
268 267
269 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 268 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
270 local_irq_save(flags); 269 local_irq_save(flags);
271 270
272
273
274 if (new_khz != stock_freq) { 271 if (new_khz != stock_freq) {
275 /* if new khz == 100% of CPU speed, it is special case */ 272 /* if new khz == 100% of CPU speed, it is special case */
276 switch (gx_params->cs55x0->device) { 273 switch (gx_params->cs55x0->device) {
@@ -317,7 +314,7 @@ static void gx_set_cpuspeed(unsigned int khz)
317 314
318 gx_params->pci_suscfg = suscfg; 315 gx_params->pci_suscfg = suscfg;
319 316
320 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 317 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
321 318
322 pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", 319 pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n",
323 gx_params->on_duration * 32, gx_params->off_duration * 32); 320 gx_params->on_duration * 32, gx_params->off_duration * 32);
@@ -397,7 +394,7 @@ static int cpufreq_gx_target(struct cpufreq_policy *policy,
397 tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2); 394 tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2);
398 } 395 }
399 396
400 gx_set_cpuspeed(tmp_freq); 397 gx_set_cpuspeed(policy, tmp_freq);
401 398
402 return 0; 399 return 0;
403} 400}
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
new file mode 100644
index 000000000000..c0075dbaa633
--- /dev/null
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -0,0 +1,438 @@
1/*
2 * This file provides the ACPI based P-state support. This
3 * module works with generic cpufreq infrastructure. Most of
4 * the code is based on i386 version
5 * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c)
6 *
7 * Copyright (C) 2005 Intel Corp
8 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
9 */
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/cpufreq.h>
16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
18#include <asm/io.h>
19#include <asm/uaccess.h>
20#include <asm/pal.h>
21
22#include <linux/acpi.h>
23#include <acpi/processor.h>
24
25MODULE_AUTHOR("Venkatesh Pallipadi");
26MODULE_DESCRIPTION("ACPI Processor P-States Driver");
27MODULE_LICENSE("GPL");
28
29
30struct cpufreq_acpi_io {
31 struct acpi_processor_performance acpi_data;
32 struct cpufreq_frequency_table *freq_table;
33 unsigned int resume;
34};
35
36static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
37
38static struct cpufreq_driver acpi_cpufreq_driver;
39
40
41static int
42processor_set_pstate (
43 u32 value)
44{
45 s64 retval;
46
47 pr_debug("processor_set_pstate\n");
48
49 retval = ia64_pal_set_pstate((u64)value);
50
51 if (retval) {
52 pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n",
53 value, retval);
54 return -ENODEV;
55 }
56 return (int)retval;
57}
58
59
60static int
61processor_get_pstate (
62 u32 *value)
63{
64 u64 pstate_index = 0;
65 s64 retval;
66
67 pr_debug("processor_get_pstate\n");
68
69 retval = ia64_pal_get_pstate(&pstate_index,
70 PAL_GET_PSTATE_TYPE_INSTANT);
71 *value = (u32) pstate_index;
72
73 if (retval)
74 pr_debug("Failed to get current freq with "
75 "error 0x%lx, idx 0x%x\n", retval, *value);
76
77 return (int)retval;
78}
79
80
81/* To be used only after data->acpi_data is initialized */
82static unsigned
83extract_clock (
84 struct cpufreq_acpi_io *data,
85 unsigned value,
86 unsigned int cpu)
87{
88 unsigned long i;
89
90 pr_debug("extract_clock\n");
91
92 for (i = 0; i < data->acpi_data.state_count; i++) {
93 if (value == data->acpi_data.states[i].status)
94 return data->acpi_data.states[i].core_frequency;
95 }
96 return data->acpi_data.states[i-1].core_frequency;
97}
98
99
100static unsigned int
101processor_get_freq (
102 struct cpufreq_acpi_io *data,
103 unsigned int cpu)
104{
105 int ret = 0;
106 u32 value = 0;
107 cpumask_t saved_mask;
108 unsigned long clock_freq;
109
110 pr_debug("processor_get_freq\n");
111
112 saved_mask = current->cpus_allowed;
113 set_cpus_allowed_ptr(current, cpumask_of(cpu));
114 if (smp_processor_id() != cpu)
115 goto migrate_end;
116
117 /* processor_get_pstate gets the instantaneous frequency */
118 ret = processor_get_pstate(&value);
119
120 if (ret) {
121 set_cpus_allowed_ptr(current, &saved_mask);
122 printk(KERN_WARNING "get performance failed with error %d\n",
123 ret);
124 ret = 0;
125 goto migrate_end;
126 }
127 clock_freq = extract_clock(data, value, cpu);
128 ret = (clock_freq*1000);
129
130migrate_end:
131 set_cpus_allowed_ptr(current, &saved_mask);
132 return ret;
133}
134
135
136static int
137processor_set_freq (
138 struct cpufreq_acpi_io *data,
139 struct cpufreq_policy *policy,
140 int state)
141{
142 int ret = 0;
143 u32 value = 0;
144 struct cpufreq_freqs cpufreq_freqs;
145 cpumask_t saved_mask;
146 int retval;
147
148 pr_debug("processor_set_freq\n");
149
150 saved_mask = current->cpus_allowed;
151 set_cpus_allowed_ptr(current, cpumask_of(policy->cpu));
152 if (smp_processor_id() != policy->cpu) {
153 retval = -EAGAIN;
154 goto migrate_end;
155 }
156
157 if (state == data->acpi_data.state) {
158 if (unlikely(data->resume)) {
159 pr_debug("Called after resume, resetting to P%d\n", state);
160 data->resume = 0;
161 } else {
162 pr_debug("Already at target state (P%d)\n", state);
163 retval = 0;
164 goto migrate_end;
165 }
166 }
167
168 pr_debug("Transitioning from P%d to P%d\n",
169 data->acpi_data.state, state);
170
171 /* cpufreq frequency struct */
172 cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
173 cpufreq_freqs.new = data->freq_table[state].frequency;
174
175 /* notify cpufreq */
176 cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_PRECHANGE);
177
178 /*
179 * First we write the target state's 'control' value to the
180 * control_register.
181 */
182
183 value = (u32) data->acpi_data.states[state].control;
184
185 pr_debug("Transitioning to state: 0x%08x\n", value);
186
187 ret = processor_set_pstate(value);
188 if (ret) {
189 unsigned int tmp = cpufreq_freqs.new;
190 cpufreq_notify_transition(policy, &cpufreq_freqs,
191 CPUFREQ_POSTCHANGE);
192 cpufreq_freqs.new = cpufreq_freqs.old;
193 cpufreq_freqs.old = tmp;
194 cpufreq_notify_transition(policy, &cpufreq_freqs,
195 CPUFREQ_PRECHANGE);
196 cpufreq_notify_transition(policy, &cpufreq_freqs,
197 CPUFREQ_POSTCHANGE);
198 printk(KERN_WARNING "Transition failed with error %d\n", ret);
199 retval = -ENODEV;
200 goto migrate_end;
201 }
202
203 cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_POSTCHANGE);
204
205 data->acpi_data.state = state;
206
207 retval = 0;
208
209migrate_end:
210 set_cpus_allowed_ptr(current, &saved_mask);
211 return (retval);
212}
213
214
215static unsigned int
216acpi_cpufreq_get (
217 unsigned int cpu)
218{
219 struct cpufreq_acpi_io *data = acpi_io_data[cpu];
220
221 pr_debug("acpi_cpufreq_get\n");
222
223 return processor_get_freq(data, cpu);
224}
225
226
227static int
228acpi_cpufreq_target (
229 struct cpufreq_policy *policy,
230 unsigned int target_freq,
231 unsigned int relation)
232{
233 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
234 unsigned int next_state = 0;
235 unsigned int result = 0;
236
237 pr_debug("acpi_cpufreq_setpolicy\n");
238
239 result = cpufreq_frequency_table_target(policy,
240 data->freq_table, target_freq, relation, &next_state);
241 if (result)
242 return (result);
243
244 result = processor_set_freq(data, policy, next_state);
245
246 return (result);
247}
248
249
250static int
251acpi_cpufreq_verify (
252 struct cpufreq_policy *policy)
253{
254 unsigned int result = 0;
255 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
256
257 pr_debug("acpi_cpufreq_verify\n");
258
259 result = cpufreq_frequency_table_verify(policy,
260 data->freq_table);
261
262 return (result);
263}
264
265
266static int
267acpi_cpufreq_cpu_init (
268 struct cpufreq_policy *policy)
269{
270 unsigned int i;
271 unsigned int cpu = policy->cpu;
272 struct cpufreq_acpi_io *data;
273 unsigned int result = 0;
274
275 pr_debug("acpi_cpufreq_cpu_init\n");
276
277 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
278 if (!data)
279 return (-ENOMEM);
280
281 acpi_io_data[cpu] = data;
282
283 result = acpi_processor_register_performance(&data->acpi_data, cpu);
284
285 if (result)
286 goto err_free;
287
288 /* capability check */
289 if (data->acpi_data.state_count <= 1) {
290 pr_debug("No P-States\n");
291 result = -ENODEV;
292 goto err_unreg;
293 }
294
295 if ((data->acpi_data.control_register.space_id !=
296 ACPI_ADR_SPACE_FIXED_HARDWARE) ||
297 (data->acpi_data.status_register.space_id !=
298 ACPI_ADR_SPACE_FIXED_HARDWARE)) {
299 pr_debug("Unsupported address space [%d, %d]\n",
300 (u32) (data->acpi_data.control_register.space_id),
301 (u32) (data->acpi_data.status_register.space_id));
302 result = -ENODEV;
303 goto err_unreg;
304 }
305
306 /* alloc freq_table */
307 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
308 (data->acpi_data.state_count + 1),
309 GFP_KERNEL);
310 if (!data->freq_table) {
311 result = -ENOMEM;
312 goto err_unreg;
313 }
314
315 /* detect transition latency */
316 policy->cpuinfo.transition_latency = 0;
317 for (i=0; i<data->acpi_data.state_count; i++) {
318 if ((data->acpi_data.states[i].transition_latency * 1000) >
319 policy->cpuinfo.transition_latency) {
320 policy->cpuinfo.transition_latency =
321 data->acpi_data.states[i].transition_latency * 1000;
322 }
323 }
324 policy->cur = processor_get_freq(data, policy->cpu);
325
326 /* table init */
327 for (i = 0; i <= data->acpi_data.state_count; i++)
328 {
329 data->freq_table[i].index = i;
330 if (i < data->acpi_data.state_count) {
331 data->freq_table[i].frequency =
332 data->acpi_data.states[i].core_frequency * 1000;
333 } else {
334 data->freq_table[i].frequency = CPUFREQ_TABLE_END;
335 }
336 }
337
338 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
339 if (result) {
340 goto err_freqfree;
341 }
342
343 /* notify BIOS that we exist */
344 acpi_processor_notify_smm(THIS_MODULE);
345
346 printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management "
347 "activated.\n", cpu);
348
349 for (i = 0; i < data->acpi_data.state_count; i++)
350 pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
351 (i == data->acpi_data.state?'*':' '), i,
352 (u32) data->acpi_data.states[i].core_frequency,
353 (u32) data->acpi_data.states[i].power,
354 (u32) data->acpi_data.states[i].transition_latency,
355 (u32) data->acpi_data.states[i].bus_master_latency,
356 (u32) data->acpi_data.states[i].status,
357 (u32) data->acpi_data.states[i].control);
358
359 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
360
361 /* the first call to ->target() should result in us actually
362 * writing something to the appropriate registers. */
363 data->resume = 1;
364
365 return (result);
366
367 err_freqfree:
368 kfree(data->freq_table);
369 err_unreg:
370 acpi_processor_unregister_performance(&data->acpi_data, cpu);
371 err_free:
372 kfree(data);
373 acpi_io_data[cpu] = NULL;
374
375 return (result);
376}
377
378
379static int
380acpi_cpufreq_cpu_exit (
381 struct cpufreq_policy *policy)
382{
383 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
384
385 pr_debug("acpi_cpufreq_cpu_exit\n");
386
387 if (data) {
388 cpufreq_frequency_table_put_attr(policy->cpu);
389 acpi_io_data[policy->cpu] = NULL;
390 acpi_processor_unregister_performance(&data->acpi_data,
391 policy->cpu);
392 kfree(data);
393 }
394
395 return (0);
396}
397
398
399static struct freq_attr* acpi_cpufreq_attr[] = {
400 &cpufreq_freq_attr_scaling_available_freqs,
401 NULL,
402};
403
404
405static struct cpufreq_driver acpi_cpufreq_driver = {
406 .verify = acpi_cpufreq_verify,
407 .target = acpi_cpufreq_target,
408 .get = acpi_cpufreq_get,
409 .init = acpi_cpufreq_cpu_init,
410 .exit = acpi_cpufreq_cpu_exit,
411 .name = "acpi-cpufreq",
412 .owner = THIS_MODULE,
413 .attr = acpi_cpufreq_attr,
414};
415
416
417static int __init
418acpi_cpufreq_init (void)
419{
420 pr_debug("acpi_cpufreq_init\n");
421
422 return cpufreq_register_driver(&acpi_cpufreq_driver);
423}
424
425
426static void __exit
427acpi_cpufreq_exit (void)
428{
429 pr_debug("acpi_cpufreq_exit\n");
430
431 cpufreq_unregister_driver(&acpi_cpufreq_driver);
432 return;
433}
434
435
436late_initcall(acpi_cpufreq_init);
437module_exit(acpi_cpufreq_exit);
438
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 54e336de373b..b78bc35973ba 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -50,7 +50,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
50 struct cpufreq_freqs freqs; 50 struct cpufreq_freqs freqs;
51 struct opp *opp; 51 struct opp *opp;
52 unsigned long freq_hz, volt, volt_old; 52 unsigned long freq_hz, volt, volt_old;
53 unsigned int index, cpu; 53 unsigned int index;
54 int ret; 54 int ret;
55 55
56 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, 56 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
@@ -68,10 +68,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
68 if (freqs.old == freqs.new) 68 if (freqs.old == freqs.new)
69 return 0; 69 return 0;
70 70
71 for_each_online_cpu(cpu) { 71 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
72 freqs.cpu = cpu;
73 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
74 }
75 72
76 rcu_read_lock(); 73 rcu_read_lock();
77 opp = opp_find_freq_ceil(cpu_dev, &freq_hz); 74 opp = opp_find_freq_ceil(cpu_dev, &freq_hz);
@@ -166,10 +163,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
166 } 163 }
167 } 164 }
168 165
169 for_each_online_cpu(cpu) { 166 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
170 freqs.cpu = cpu;
171 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
172 }
173 167
174 return 0; 168 return 0;
175} 169}
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
new file mode 100644
index 000000000000..f7c99df0880b
--- /dev/null
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -0,0 +1,220 @@
1/*
2 * Copyright (C) 2001-2002 Deep Blue Solutions Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * CPU support functions
9 */
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/cpufreq.h>
14#include <linux/sched.h>
15#include <linux/smp.h>
16#include <linux/init.h>
17#include <linux/io.h>
18
19#include <mach/hardware.h>
20#include <mach/platform.h>
21#include <asm/mach-types.h>
22#include <asm/hardware/icst.h>
23
24static struct cpufreq_driver integrator_driver;
25
26#define CM_ID __io_address(INTEGRATOR_HDR_ID)
27#define CM_OSC __io_address(INTEGRATOR_HDR_OSC)
28#define CM_STAT __io_address(INTEGRATOR_HDR_STAT)
29#define CM_LOCK __io_address(INTEGRATOR_HDR_LOCK)
30
31static const struct icst_params lclk_params = {
32 .ref = 24000000,
33 .vco_max = ICST525_VCO_MAX_5V,
34 .vco_min = ICST525_VCO_MIN,
35 .vd_min = 8,
36 .vd_max = 132,
37 .rd_min = 24,
38 .rd_max = 24,
39 .s2div = icst525_s2div,
40 .idx2s = icst525_idx2s,
41};
42
43static const struct icst_params cclk_params = {
44 .ref = 24000000,
45 .vco_max = ICST525_VCO_MAX_5V,
46 .vco_min = ICST525_VCO_MIN,
47 .vd_min = 12,
48 .vd_max = 160,
49 .rd_min = 24,
50 .rd_max = 24,
51 .s2div = icst525_s2div,
52 .idx2s = icst525_idx2s,
53};
54
55/*
56 * Validate the speed policy.
57 */
58static int integrator_verify_policy(struct cpufreq_policy *policy)
59{
60 struct icst_vco vco;
61
62 cpufreq_verify_within_limits(policy,
63 policy->cpuinfo.min_freq,
64 policy->cpuinfo.max_freq);
65
66 vco = icst_hz_to_vco(&cclk_params, policy->max * 1000);
67 policy->max = icst_hz(&cclk_params, vco) / 1000;
68
69 vco = icst_hz_to_vco(&cclk_params, policy->min * 1000);
70 policy->min = icst_hz(&cclk_params, vco) / 1000;
71
72 cpufreq_verify_within_limits(policy,
73 policy->cpuinfo.min_freq,
74 policy->cpuinfo.max_freq);
75
76 return 0;
77}
78
79
80static int integrator_set_target(struct cpufreq_policy *policy,
81 unsigned int target_freq,
82 unsigned int relation)
83{
84 cpumask_t cpus_allowed;
85 int cpu = policy->cpu;
86 struct icst_vco vco;
87 struct cpufreq_freqs freqs;
88 u_int cm_osc;
89
90 /*
91 * Save this threads cpus_allowed mask.
92 */
93 cpus_allowed = current->cpus_allowed;
94
95 /*
96 * Bind to the specified CPU. When this call returns,
97 * we should be running on the right CPU.
98 */
99 set_cpus_allowed(current, cpumask_of_cpu(cpu));
100 BUG_ON(cpu != smp_processor_id());
101
102 /* get current setting */
103 cm_osc = __raw_readl(CM_OSC);
104
105 if (machine_is_integrator()) {
106 vco.s = (cm_osc >> 8) & 7;
107 } else if (machine_is_cintegrator()) {
108 vco.s = 1;
109 }
110 vco.v = cm_osc & 255;
111 vco.r = 22;
112 freqs.old = icst_hz(&cclk_params, vco) / 1000;
113
114 /* icst_hz_to_vco rounds down -- so we need the next
115 * larger freq in case of CPUFREQ_RELATION_L.
116 */
117 if (relation == CPUFREQ_RELATION_L)
118 target_freq += 999;
119 if (target_freq > policy->max)
120 target_freq = policy->max;
121 vco = icst_hz_to_vco(&cclk_params, target_freq * 1000);
122 freqs.new = icst_hz(&cclk_params, vco) / 1000;
123
124 if (freqs.old == freqs.new) {
125 set_cpus_allowed(current, cpus_allowed);
126 return 0;
127 }
128
129 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
130
131 cm_osc = __raw_readl(CM_OSC);
132
133 if (machine_is_integrator()) {
134 cm_osc &= 0xfffff800;
135 cm_osc |= vco.s << 8;
136 } else if (machine_is_cintegrator()) {
137 cm_osc &= 0xffffff00;
138 }
139 cm_osc |= vco.v;
140
141 __raw_writel(0xa05f, CM_LOCK);
142 __raw_writel(cm_osc, CM_OSC);
143 __raw_writel(0, CM_LOCK);
144
145 /*
146 * Restore the CPUs allowed mask.
147 */
148 set_cpus_allowed(current, cpus_allowed);
149
150 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
151
152 return 0;
153}
154
155static unsigned int integrator_get(unsigned int cpu)
156{
157 cpumask_t cpus_allowed;
158 unsigned int current_freq;
159 u_int cm_osc;
160 struct icst_vco vco;
161
162 cpus_allowed = current->cpus_allowed;
163
164 set_cpus_allowed(current, cpumask_of_cpu(cpu));
165 BUG_ON(cpu != smp_processor_id());
166
167 /* detect memory etc. */
168 cm_osc = __raw_readl(CM_OSC);
169
170 if (machine_is_integrator()) {
171 vco.s = (cm_osc >> 8) & 7;
172 } else {
173 vco.s = 1;
174 }
175 vco.v = cm_osc & 255;
176 vco.r = 22;
177
178 current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */
179
180 set_cpus_allowed(current, cpus_allowed);
181
182 return current_freq;
183}
184
185static int integrator_cpufreq_init(struct cpufreq_policy *policy)
186{
187
188 /* set default policy and cpuinfo */
189 policy->cpuinfo.max_freq = 160000;
190 policy->cpuinfo.min_freq = 12000;
191 policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */
192 policy->cur = policy->min = policy->max = integrator_get(policy->cpu);
193
194 return 0;
195}
196
197static struct cpufreq_driver integrator_driver = {
198 .verify = integrator_verify_policy,
199 .target = integrator_set_target,
200 .get = integrator_get,
201 .init = integrator_cpufreq_init,
202 .name = "integrator",
203};
204
205static int __init integrator_cpu_init(void)
206{
207 return cpufreq_register_driver(&integrator_driver);
208}
209
210static void __exit integrator_cpu_exit(void)
211{
212 cpufreq_unregister_driver(&integrator_driver);
213}
214
215MODULE_AUTHOR ("Russell M. King");
216MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs");
217MODULE_LICENSE ("GPL");
218
219module_init(integrator_cpu_init);
220module_exit(integrator_cpu_exit);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6133ef5cf671..cc3a8e6c92be 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * cpufreq_snb.c: Native P state management for Intel processors 2 * intel_pstate.c: Native P state management for Intel processors
3 * 3 *
4 * (C) Copyright 2012 Intel Corporation 4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
@@ -657,30 +657,27 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
657static int intel_pstate_set_policy(struct cpufreq_policy *policy) 657static int intel_pstate_set_policy(struct cpufreq_policy *policy)
658{ 658{
659 struct cpudata *cpu; 659 struct cpudata *cpu;
660 int min, max;
661 660
662 cpu = all_cpu_data[policy->cpu]; 661 cpu = all_cpu_data[policy->cpu];
663 662
664 if (!policy->cpuinfo.max_freq) 663 if (!policy->cpuinfo.max_freq)
665 return -ENODEV; 664 return -ENODEV;
666 665
667 intel_pstate_get_min_max(cpu, &min, &max);
668
669 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
670 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
671 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
672
673 limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq;
674 limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100);
675 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
676
677 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 666 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
678 limits.min_perf_pct = 100; 667 limits.min_perf_pct = 100;
679 limits.min_perf = int_tofp(1); 668 limits.min_perf = int_tofp(1);
680 limits.max_perf_pct = 100; 669 limits.max_perf_pct = 100;
681 limits.max_perf = int_tofp(1); 670 limits.max_perf = int_tofp(1);
682 limits.no_turbo = 0; 671 limits.no_turbo = 0;
672 return 0;
683 } 673 }
674 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
675 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
676 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
677
678 limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq;
679 limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100);
680 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
684 681
685 return 0; 682 return 0;
686} 683}
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index 0e83e3c24f5b..d36ea8dc96eb 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -55,7 +55,8 @@ static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
55 return kirkwood_freq_table[0].frequency; 55 return kirkwood_freq_table[0].frequency;
56} 56}
57 57
58static void kirkwood_cpufreq_set_cpu_state(unsigned int index) 58static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy,
59 unsigned int index)
59{ 60{
60 struct cpufreq_freqs freqs; 61 struct cpufreq_freqs freqs;
61 unsigned int state = kirkwood_freq_table[index].index; 62 unsigned int state = kirkwood_freq_table[index].index;
@@ -63,9 +64,8 @@ static void kirkwood_cpufreq_set_cpu_state(unsigned int index)
63 64
64 freqs.old = kirkwood_cpufreq_get_cpu_frequency(0); 65 freqs.old = kirkwood_cpufreq_get_cpu_frequency(0);
65 freqs.new = kirkwood_freq_table[index].frequency; 66 freqs.new = kirkwood_freq_table[index].frequency;
66 freqs.cpu = 0; /* Kirkwood is UP */
67 67
68 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 68 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
69 69
70 dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n", 70 dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n",
71 kirkwood_freq_table[index].frequency); 71 kirkwood_freq_table[index].frequency);
@@ -99,7 +99,7 @@ static void kirkwood_cpufreq_set_cpu_state(unsigned int index)
99 99
100 local_irq_enable(); 100 local_irq_enable();
101 } 101 }
102 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 102 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
103}; 103};
104 104
105static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy) 105static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy)
@@ -117,7 +117,7 @@ static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
117 target_freq, relation, &index)) 117 target_freq, relation, &index))
118 return -EINVAL; 118 return -EINVAL;
119 119
120 kirkwood_cpufreq_set_cpu_state(index); 120 kirkwood_cpufreq_set_cpu_state(policy, index);
121 121
122 return 0; 122 return 0;
123} 123}
@@ -175,11 +175,9 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
175 dev_err(&pdev->dev, "Cannot get memory resource\n"); 175 dev_err(&pdev->dev, "Cannot get memory resource\n");
176 return -ENODEV; 176 return -ENODEV;
177 } 177 }
178 priv.base = devm_request_and_ioremap(&pdev->dev, res); 178 priv.base = devm_ioremap_resource(&pdev->dev, res);
179 if (!priv.base) { 179 if (IS_ERR(priv.base))
180 dev_err(&pdev->dev, "Cannot ioremap\n"); 180 return PTR_ERR(priv.base);
181 return -EADDRNOTAVAIL;
182 }
183 181
184 np = of_find_node_by_path("/cpus/cpu@0"); 182 np = of_find_node_by_path("/cpus/cpu@0");
185 if (!np) 183 if (!np)
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 1180d536d1eb..b448638e34de 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -242,7 +242,8 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
242 * Sets a new clock ratio. 242 * Sets a new clock ratio.
243 */ 243 */
244 244
245static void longhaul_setstate(unsigned int table_index) 245static void longhaul_setstate(struct cpufreq_policy *policy,
246 unsigned int table_index)
246{ 247{
247 unsigned int mults_index; 248 unsigned int mults_index;
248 int speed, mult; 249 int speed, mult;
@@ -267,9 +268,8 @@ static void longhaul_setstate(unsigned int table_index)
267 268
268 freqs.old = calc_speed(longhaul_get_cpu_mult()); 269 freqs.old = calc_speed(longhaul_get_cpu_mult());
269 freqs.new = speed; 270 freqs.new = speed;
270 freqs.cpu = 0; /* longhaul.c is UP only driver */
271 271
272 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 272 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
273 273
274 pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", 274 pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
275 fsb, mult/10, mult%10, print_speed(speed/1000)); 275 fsb, mult/10, mult%10, print_speed(speed/1000));
@@ -386,7 +386,7 @@ retry_loop:
386 } 386 }
387 } 387 }
388 /* Report true CPU frequency */ 388 /* Report true CPU frequency */
389 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 389 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
390 390
391 if (!bm_timeout) 391 if (!bm_timeout)
392 printk(KERN_INFO PFX "Warning: Timeout while waiting for " 392 printk(KERN_INFO PFX "Warning: Timeout while waiting for "
@@ -648,7 +648,7 @@ static int longhaul_target(struct cpufreq_policy *policy,
648 return 0; 648 return 0;
649 649
650 if (!can_scale_voltage) 650 if (!can_scale_voltage)
651 longhaul_setstate(table_index); 651 longhaul_setstate(policy, table_index);
652 else { 652 else {
653 /* On test system voltage transitions exceeding single 653 /* On test system voltage transitions exceeding single
654 * step up or down were turning motherboard off. Both 654 * step up or down were turning motherboard off. Both
@@ -663,7 +663,7 @@ static int longhaul_target(struct cpufreq_policy *policy,
663 while (i != table_index) { 663 while (i != table_index) {
664 vid = (longhaul_table[i].index >> 8) & 0x1f; 664 vid = (longhaul_table[i].index >> 8) & 0x1f;
665 if (vid != current_vid) { 665 if (vid != current_vid) {
666 longhaul_setstate(i); 666 longhaul_setstate(policy, i);
667 current_vid = vid; 667 current_vid = vid;
668 msleep(200); 668 msleep(200);
669 } 669 }
@@ -672,7 +672,7 @@ static int longhaul_target(struct cpufreq_policy *policy,
672 else 672 else
673 i--; 673 i--;
674 } 674 }
675 longhaul_setstate(table_index); 675 longhaul_setstate(policy, table_index);
676 } 676 }
677 longhaul_index = table_index; 677 longhaul_index = table_index;
678 return 0; 678 return 0;
@@ -998,15 +998,17 @@ static int __init longhaul_init(void)
998 998
999static void __exit longhaul_exit(void) 999static void __exit longhaul_exit(void)
1000{ 1000{
1001 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
1001 int i; 1002 int i;
1002 1003
1003 for (i = 0; i < numscales; i++) { 1004 for (i = 0; i < numscales; i++) {
1004 if (mults[i] == maxmult) { 1005 if (mults[i] == maxmult) {
1005 longhaul_setstate(i); 1006 longhaul_setstate(policy, i);
1006 break; 1007 break;
1007 } 1008 }
1008 } 1009 }
1009 1010
1011 cpufreq_cpu_put(policy);
1010 cpufreq_unregister_driver(&longhaul_driver); 1012 cpufreq_unregister_driver(&longhaul_driver);
1011 kfree(longhaul_table); 1013 kfree(longhaul_table);
1012} 1014}
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
new file mode 100644
index 000000000000..84889573b566
--- /dev/null
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -0,0 +1,248 @@
1/*
2 * Cpufreq driver for the loongson-2 processors
3 *
4 * The 2E revision of loongson processor not support this feature.
5 *
6 * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology
7 * Author: Yanhua, yanh@lemote.com
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/cpufreq.h>
14#include <linux/module.h>
15#include <linux/err.h>
16#include <linux/sched.h> /* set_cpus_allowed() */
17#include <linux/delay.h>
18#include <linux/platform_device.h>
19
20#include <asm/clock.h>
21
22#include <asm/mach-loongson/loongson.h>
23
24static uint nowait;
25
26static struct clk *cpuclk;
27
28static void (*saved_cpu_wait) (void);
29
30static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
31 unsigned long val, void *data);
32
33static struct notifier_block loongson2_cpufreq_notifier_block = {
34 .notifier_call = loongson2_cpu_freq_notifier
35};
36
37static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
38 unsigned long val, void *data)
39{
40 if (val == CPUFREQ_POSTCHANGE)
41 current_cpu_data.udelay_val = loops_per_jiffy;
42
43 return 0;
44}
45
46static unsigned int loongson2_cpufreq_get(unsigned int cpu)
47{
48 return clk_get_rate(cpuclk);
49}
50
51/*
52 * Here we notify other drivers of the proposed change and the final change.
53 */
54static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
55 unsigned int target_freq,
56 unsigned int relation)
57{
58 unsigned int cpu = policy->cpu;
59 unsigned int newstate = 0;
60 cpumask_t cpus_allowed;
61 struct cpufreq_freqs freqs;
62 unsigned int freq;
63
64 cpus_allowed = current->cpus_allowed;
65 set_cpus_allowed_ptr(current, cpumask_of(cpu));
66
67 if (cpufreq_frequency_table_target
68 (policy, &loongson2_clockmod_table[0], target_freq, relation,
69 &newstate))
70 return -EINVAL;
71
72 freq =
73 ((cpu_clock_freq / 1000) *
74 loongson2_clockmod_table[newstate].index) / 8;
75 if (freq < policy->min || freq > policy->max)
76 return -EINVAL;
77
78 pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
79
80 freqs.old = loongson2_cpufreq_get(cpu);
81 freqs.new = freq;
82 freqs.flags = 0;
83
84 if (freqs.new == freqs.old)
85 return 0;
86
87 /* notifiers */
88 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
89
90 set_cpus_allowed_ptr(current, &cpus_allowed);
91
92 /* setting the cpu frequency */
93 clk_set_rate(cpuclk, freq);
94
95 /* notifiers */
96 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
97
98 pr_debug("cpufreq: set frequency %u kHz\n", freq);
99
100 return 0;
101}
102
103static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
104{
105 int i;
106 unsigned long rate;
107 int ret;
108
109 cpuclk = clk_get(NULL, "cpu_clk");
110 if (IS_ERR(cpuclk)) {
111 printk(KERN_ERR "cpufreq: couldn't get CPU clk\n");
112 return PTR_ERR(cpuclk);
113 }
114
115 rate = cpu_clock_freq / 1000;
116 if (!rate) {
117 clk_put(cpuclk);
118 return -EINVAL;
119 }
120 ret = clk_set_rate(cpuclk, rate);
121 if (ret) {
122 clk_put(cpuclk);
123 return ret;
124 }
125
126 /* clock table init */
127 for (i = 2;
128 (loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END);
129 i++)
130 loongson2_clockmod_table[i].frequency = (rate * i) / 8;
131
132 policy->cur = loongson2_cpufreq_get(policy->cpu);
133
134 cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
135 policy->cpu);
136
137 return cpufreq_frequency_table_cpuinfo(policy,
138 &loongson2_clockmod_table[0]);
139}
140
141static int loongson2_cpufreq_verify(struct cpufreq_policy *policy)
142{
143 return cpufreq_frequency_table_verify(policy,
144 &loongson2_clockmod_table[0]);
145}
146
147static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
148{
149 clk_put(cpuclk);
150 return 0;
151}
152
153static struct freq_attr *loongson2_table_attr[] = {
154 &cpufreq_freq_attr_scaling_available_freqs,
155 NULL,
156};
157
158static struct cpufreq_driver loongson2_cpufreq_driver = {
159 .owner = THIS_MODULE,
160 .name = "loongson2",
161 .init = loongson2_cpufreq_cpu_init,
162 .verify = loongson2_cpufreq_verify,
163 .target = loongson2_cpufreq_target,
164 .get = loongson2_cpufreq_get,
165 .exit = loongson2_cpufreq_exit,
166 .attr = loongson2_table_attr,
167};
168
169static struct platform_device_id platform_device_ids[] = {
170 {
171 .name = "loongson2_cpufreq",
172 },
173 {}
174};
175
176MODULE_DEVICE_TABLE(platform, platform_device_ids);
177
178static struct platform_driver platform_driver = {
179 .driver = {
180 .name = "loongson2_cpufreq",
181 .owner = THIS_MODULE,
182 },
183 .id_table = platform_device_ids,
184};
185
186/*
187 * This is the simple version of Loongson-2 wait, Maybe we need do this in
188 * interrupt disabled context.
189 */
190
191static DEFINE_SPINLOCK(loongson2_wait_lock);
192
193static void loongson2_cpu_wait(void)
194{
195 unsigned long flags;
196 u32 cpu_freq;
197
198 spin_lock_irqsave(&loongson2_wait_lock, flags);
199 cpu_freq = LOONGSON_CHIPCFG0;
200 LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */
201 LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */
202 spin_unlock_irqrestore(&loongson2_wait_lock, flags);
203}
204
205static int __init cpufreq_init(void)
206{
207 int ret;
208
209 /* Register platform stuff */
210 ret = platform_driver_register(&platform_driver);
211 if (ret)
212 return ret;
213
214 pr_info("cpufreq: Loongson-2F CPU frequency driver.\n");
215
216 cpufreq_register_notifier(&loongson2_cpufreq_notifier_block,
217 CPUFREQ_TRANSITION_NOTIFIER);
218
219 ret = cpufreq_register_driver(&loongson2_cpufreq_driver);
220
221 if (!ret && !nowait) {
222 saved_cpu_wait = cpu_wait;
223 cpu_wait = loongson2_cpu_wait;
224 }
225
226 return ret;
227}
228
229static void __exit cpufreq_exit(void)
230{
231 if (!nowait && saved_cpu_wait)
232 cpu_wait = saved_cpu_wait;
233 cpufreq_unregister_driver(&loongson2_cpufreq_driver);
234 cpufreq_unregister_notifier(&loongson2_cpufreq_notifier_block,
235 CPUFREQ_TRANSITION_NOTIFIER);
236
237 platform_driver_unregister(&platform_driver);
238}
239
240module_init(cpufreq_init);
241module_exit(cpufreq_exit);
242
243module_param(nowait, uint, 0644);
244MODULE_PARM_DESC(nowait, "Disable Loongson-2F specific wait");
245
246MODULE_AUTHOR("Yanhua <yanh@lemote.com>");
247MODULE_DESCRIPTION("cpufreq driver for Loongson2F");
248MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index d4c4989823dc..cdd62915efaf 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -158,11 +158,10 @@ static int maple_cpufreq_target(struct cpufreq_policy *policy,
158 158
159 freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency; 159 freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency;
160 freqs.new = maple_cpu_freqs[newstate].frequency; 160 freqs.new = maple_cpu_freqs[newstate].frequency;
161 freqs.cpu = 0;
162 161
163 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 162 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
164 rc = maple_scom_switch_freq(newstate); 163 rc = maple_scom_switch_freq(newstate);
165 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 164 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
166 165
167 mutex_unlock(&maple_switch_mutex); 166 mutex_unlock(&maple_switch_mutex);
168 167
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 9128c07bafba..0279d18a57f9 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -25,6 +25,7 @@
25#include <linux/opp.h> 25#include <linux/opp.h>
26#include <linux/cpu.h> 26#include <linux/cpu.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/platform_device.h>
28#include <linux/regulator/consumer.h> 29#include <linux/regulator/consumer.h>
29 30
30#include <asm/smp_plat.h> 31#include <asm/smp_plat.h>
@@ -88,16 +89,12 @@ static int omap_target(struct cpufreq_policy *policy,
88 } 89 }
89 90
90 freqs.old = omap_getspeed(policy->cpu); 91 freqs.old = omap_getspeed(policy->cpu);
91 freqs.cpu = policy->cpu;
92 92
93 if (freqs.old == freqs.new && policy->cur == freqs.new) 93 if (freqs.old == freqs.new && policy->cur == freqs.new)
94 return ret; 94 return ret;
95 95
96 /* notifiers */ 96 /* notifiers */
97 for_each_cpu(i, policy->cpus) { 97 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
98 freqs.cpu = i;
99 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
100 }
101 98
102 freq = freqs.new * 1000; 99 freq = freqs.new * 1000;
103 ret = clk_round_rate(mpu_clk, freq); 100 ret = clk_round_rate(mpu_clk, freq);
@@ -157,10 +154,7 @@ static int omap_target(struct cpufreq_policy *policy,
157 154
158done: 155done:
159 /* notifiers */ 156 /* notifiers */
160 for_each_cpu(i, policy->cpus) { 157 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
161 freqs.cpu = i;
162 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
163 }
164 158
165 return ret; 159 return ret;
166} 160}
@@ -184,7 +178,7 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
184 goto fail_ck; 178 goto fail_ck;
185 } 179 }
186 180
187 policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu); 181 policy->cur = omap_getspeed(policy->cpu);
188 182
189 if (!freq_table) 183 if (!freq_table)
190 result = opp_init_cpufreq_table(mpu_dev, &freq_table); 184 result = opp_init_cpufreq_table(mpu_dev, &freq_table);
@@ -203,8 +197,6 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
203 197
204 cpufreq_frequency_table_get_attr(freq_table, policy->cpu); 198 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
205 199
206 policy->min = policy->cpuinfo.min_freq;
207 policy->max = policy->cpuinfo.max_freq;
208 policy->cur = omap_getspeed(policy->cpu); 200 policy->cur = omap_getspeed(policy->cpu);
209 201
210 /* 202 /*
@@ -252,7 +244,7 @@ static struct cpufreq_driver omap_driver = {
252 .attr = omap_cpufreq_attr, 244 .attr = omap_cpufreq_attr,
253}; 245};
254 246
255static int __init omap_cpufreq_init(void) 247static int omap_cpufreq_probe(struct platform_device *pdev)
256{ 248{
257 mpu_dev = get_cpu_device(0); 249 mpu_dev = get_cpu_device(0);
258 if (!mpu_dev) { 250 if (!mpu_dev) {
@@ -280,12 +272,20 @@ static int __init omap_cpufreq_init(void)
280 return cpufreq_register_driver(&omap_driver); 272 return cpufreq_register_driver(&omap_driver);
281} 273}
282 274
283static void __exit omap_cpufreq_exit(void) 275static int omap_cpufreq_remove(struct platform_device *pdev)
284{ 276{
285 cpufreq_unregister_driver(&omap_driver); 277 return cpufreq_unregister_driver(&omap_driver);
286} 278}
287 279
280static struct platform_driver omap_cpufreq_platdrv = {
281 .driver = {
282 .name = "omap-cpufreq",
283 .owner = THIS_MODULE,
284 },
285 .probe = omap_cpufreq_probe,
286 .remove = omap_cpufreq_remove,
287};
288module_platform_driver(omap_cpufreq_platdrv);
289
288MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs"); 290MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs");
289MODULE_LICENSE("GPL"); 291MODULE_LICENSE("GPL");
290module_init(omap_cpufreq_init);
291module_exit(omap_cpufreq_exit);
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 827629c9aad7..421ef37d0bb3 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -58,8 +58,7 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
58{ 58{
59 u32 l, h; 59 u32 l, h;
60 60
61 if (!cpu_online(cpu) || 61 if ((newstate > DC_DISABLE) || (newstate == DC_RESV))
62 (newstate > DC_DISABLE) || (newstate == DC_RESV))
63 return -EINVAL; 62 return -EINVAL;
64 63
65 rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); 64 rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
@@ -125,10 +124,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
125 return 0; 124 return 0;
126 125
127 /* notifiers */ 126 /* notifiers */
128 for_each_cpu(i, policy->cpus) { 127 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
129 freqs.cpu = i;
130 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
131 }
132 128
133 /* run on each logical CPU, 129 /* run on each logical CPU,
134 * see section 13.15.3 of IA32 Intel Architecture Software 130 * see section 13.15.3 of IA32 Intel Architecture Software
@@ -138,10 +134,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
138 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); 134 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
139 135
140 /* notifiers */ 136 /* notifiers */
141 for_each_cpu(i, policy->cpus) { 137 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
142 freqs.cpu = i;
143 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
144 }
145 138
146 return 0; 139 return 0;
147} 140}
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 503996a94a6a..0de00081a81e 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -215,8 +215,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
215 (pcch_virt_addr + pcc_cpu_data->input_offset)); 215 (pcch_virt_addr + pcc_cpu_data->input_offset));
216 216
217 freqs.new = target_freq; 217 freqs.new = target_freq;
218 freqs.cpu = cpu; 218 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
219 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
220 219
221 input_buffer = 0x1 | (((target_freq * 100) 220 input_buffer = 0x1 | (((target_freq * 100)
222 / (ioread32(&pcch_hdr->nominal) * 1000)) << 8); 221 / (ioread32(&pcch_hdr->nominal) * 1000)) << 8);
@@ -237,7 +236,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
237 } 236 }
238 iowrite16(0, &pcch_hdr->status); 237 iowrite16(0, &pcch_hdr->status);
239 238
240 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 239 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
241 pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu); 240 pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu);
242 spin_unlock(&pcc_lock); 241 spin_unlock(&pcc_lock);
243 242
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index af23e0b9ec92..ea0222a45b7b 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -68,7 +68,8 @@ static int powernow_k6_get_cpu_multiplier(void)
68 * 68 *
69 * Tries to change the PowerNow! multiplier 69 * Tries to change the PowerNow! multiplier
70 */ 70 */
71static void powernow_k6_set_state(unsigned int best_i) 71static void powernow_k6_set_state(struct cpufreq_policy *policy,
72 unsigned int best_i)
72{ 73{
73 unsigned long outvalue = 0, invalue = 0; 74 unsigned long outvalue = 0, invalue = 0;
74 unsigned long msrval; 75 unsigned long msrval;
@@ -81,9 +82,8 @@ static void powernow_k6_set_state(unsigned int best_i)
81 82
82 freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); 83 freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
83 freqs.new = busfreq * clock_ratio[best_i].index; 84 freqs.new = busfreq * clock_ratio[best_i].index;
84 freqs.cpu = 0; /* powernow-k6.c is UP only driver */
85 85
86 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 86 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
87 87
88 /* we now need to transform best_i to the BVC format, see AMD#23446 */ 88 /* we now need to transform best_i to the BVC format, see AMD#23446 */
89 89
@@ -98,7 +98,7 @@ static void powernow_k6_set_state(unsigned int best_i)
98 msrval = POWERNOW_IOPORT + 0x0; 98 msrval = POWERNOW_IOPORT + 0x0;
99 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ 99 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
100 100
101 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 101 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
102 102
103 return; 103 return;
104} 104}
@@ -136,7 +136,7 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
136 target_freq, relation, &newstate)) 136 target_freq, relation, &newstate))
137 return -EINVAL; 137 return -EINVAL;
138 138
139 powernow_k6_set_state(newstate); 139 powernow_k6_set_state(policy, newstate);
140 140
141 return 0; 141 return 0;
142} 142}
@@ -182,7 +182,7 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
182 unsigned int i; 182 unsigned int i;
183 for (i = 0; i < 8; i++) { 183 for (i = 0; i < 8; i++) {
184 if (i == max_multiplier) 184 if (i == max_multiplier)
185 powernow_k6_set_state(i); 185 powernow_k6_set_state(policy, i);
186 } 186 }
187 cpufreq_frequency_table_put_attr(policy->cpu); 187 cpufreq_frequency_table_put_attr(policy->cpu);
188 return 0; 188 return 0;
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 334cc2f1e9f1..53888dacbe58 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -248,7 +248,7 @@ static void change_VID(int vid)
248} 248}
249 249
250 250
251static void change_speed(unsigned int index) 251static void change_speed(struct cpufreq_policy *policy, unsigned int index)
252{ 252{
253 u8 fid, vid; 253 u8 fid, vid;
254 struct cpufreq_freqs freqs; 254 struct cpufreq_freqs freqs;
@@ -263,15 +263,13 @@ static void change_speed(unsigned int index)
263 fid = powernow_table[index].index & 0xFF; 263 fid = powernow_table[index].index & 0xFF;
264 vid = (powernow_table[index].index & 0xFF00) >> 8; 264 vid = (powernow_table[index].index & 0xFF00) >> 8;
265 265
266 freqs.cpu = 0;
267
268 rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); 266 rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
269 cfid = fidvidstatus.bits.CFID; 267 cfid = fidvidstatus.bits.CFID;
270 freqs.old = fsb * fid_codes[cfid] / 10; 268 freqs.old = fsb * fid_codes[cfid] / 10;
271 269
272 freqs.new = powernow_table[index].frequency; 270 freqs.new = powernow_table[index].frequency;
273 271
274 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 272 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
275 273
276 /* Now do the magic poking into the MSRs. */ 274 /* Now do the magic poking into the MSRs. */
277 275
@@ -292,7 +290,7 @@ static void change_speed(unsigned int index)
292 if (have_a0 == 1) 290 if (have_a0 == 1)
293 local_irq_enable(); 291 local_irq_enable();
294 292
295 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 293 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
296} 294}
297 295
298 296
@@ -546,7 +544,7 @@ static int powernow_target(struct cpufreq_policy *policy,
546 relation, &newstate)) 544 relation, &newstate))
547 return -EINVAL; 545 return -EINVAL;
548 546
549 change_speed(newstate); 547 change_speed(policy, newstate);
550 548
551 return 0; 549 return 0;
552} 550}
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index d13a13678b5f..b828efe4b2f8 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -928,9 +928,10 @@ static int get_transition_latency(struct powernow_k8_data *data)
928static int transition_frequency_fidvid(struct powernow_k8_data *data, 928static int transition_frequency_fidvid(struct powernow_k8_data *data,
929 unsigned int index) 929 unsigned int index)
930{ 930{
931 struct cpufreq_policy *policy;
931 u32 fid = 0; 932 u32 fid = 0;
932 u32 vid = 0; 933 u32 vid = 0;
933 int res, i; 934 int res;
934 struct cpufreq_freqs freqs; 935 struct cpufreq_freqs freqs;
935 936
936 pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); 937 pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index);
@@ -959,10 +960,10 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
959 freqs.old = find_khz_freq_from_fid(data->currfid); 960 freqs.old = find_khz_freq_from_fid(data->currfid);
960 freqs.new = find_khz_freq_from_fid(fid); 961 freqs.new = find_khz_freq_from_fid(fid);
961 962
962 for_each_cpu(i, data->available_cores) { 963 policy = cpufreq_cpu_get(smp_processor_id());
963 freqs.cpu = i; 964 cpufreq_cpu_put(policy);
964 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 965
965 } 966 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
966 967
967 res = transition_fid_vid(data, fid, vid); 968 res = transition_fid_vid(data, fid, vid);
968 if (res) 969 if (res)
@@ -970,10 +971,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
970 971
971 freqs.new = find_khz_freq_from_fid(data->currfid); 972 freqs.new = find_khz_freq_from_fid(data->currfid);
972 973
973 for_each_cpu(i, data->available_cores) { 974 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
974 freqs.cpu = i;
975 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
976 }
977 return res; 975 return res;
978} 976}
979 977
@@ -1104,9 +1102,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1104 struct init_on_cpu init_on_cpu; 1102 struct init_on_cpu init_on_cpu;
1105 int rc; 1103 int rc;
1106 1104
1107 if (!cpu_online(pol->cpu))
1108 return -ENODEV;
1109
1110 smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1); 1105 smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
1111 if (rc) 1106 if (rc)
1112 return -ENODEV; 1107 return -ENODEV;
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
new file mode 100644
index 000000000000..e577a1dbbfcd
--- /dev/null
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -0,0 +1,209 @@
1/*
2 * cpufreq driver for the cell processor
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
5 *
6 * Author: Christian Krafft <krafft@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/cpufreq.h>
24#include <linux/module.h>
25#include <linux/of_platform.h>
26
27#include <asm/machdep.h>
28#include <asm/prom.h>
29#include <asm/cell-regs.h>
30
31#include "ppc_cbe_cpufreq.h"
32
33static DEFINE_MUTEX(cbe_switch_mutex);
34
35
36/* the CBE supports an 8 step frequency scaling */
37static struct cpufreq_frequency_table cbe_freqs[] = {
38 {1, 0},
39 {2, 0},
40 {3, 0},
41 {4, 0},
42 {5, 0},
43 {6, 0},
44 {8, 0},
45 {10, 0},
46 {0, CPUFREQ_TABLE_END},
47};
48
49/*
50 * hardware specific functions
51 */
52
53static int set_pmode(unsigned int cpu, unsigned int slow_mode)
54{
55 int rc;
56
57 if (cbe_cpufreq_has_pmi)
58 rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode);
59 else
60 rc = cbe_cpufreq_set_pmode(cpu, slow_mode);
61
62 pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu));
63
64 return rc;
65}
66
67/*
68 * cpufreq functions
69 */
70
71static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
72{
73 const u32 *max_freqp;
74 u32 max_freq;
75 int i, cur_pmode;
76 struct device_node *cpu;
77
78 cpu = of_get_cpu_node(policy->cpu, NULL);
79
80 if (!cpu)
81 return -ENODEV;
82
83 pr_debug("init cpufreq on CPU %d\n", policy->cpu);
84
85 /*
86 * Let's check we can actually get to the CELL regs
87 */
88 if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
89 !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
90 pr_info("invalid CBE regs pointers for cpufreq\n");
91 return -EINVAL;
92 }
93
94 max_freqp = of_get_property(cpu, "clock-frequency", NULL);
95
96 of_node_put(cpu);
97
98 if (!max_freqp)
99 return -EINVAL;
100
101 /* we need the freq in kHz */
102 max_freq = *max_freqp / 1000;
103
104 pr_debug("max clock-frequency is at %u kHz\n", max_freq);
105 pr_debug("initializing frequency table\n");
106
107 /* initialize frequency table */
108 for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
109 cbe_freqs[i].frequency = max_freq / cbe_freqs[i].index;
110 pr_debug("%d: %d\n", i, cbe_freqs[i].frequency);
111 }
112
113 /* if DEBUG is enabled set_pmode() measures the latency
114 * of a transition */
115 policy->cpuinfo.transition_latency = 25000;
116
117 cur_pmode = cbe_cpufreq_get_pmode(policy->cpu);
118 pr_debug("current pmode is at %d\n",cur_pmode);
119
120 policy->cur = cbe_freqs[cur_pmode].frequency;
121
122#ifdef CONFIG_SMP
123 cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
124#endif
125
126 cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
127
128 /* this ensures that policy->cpuinfo_min
129 * and policy->cpuinfo_max are set correctly */
130 return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
131}
132
133static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
134{
135 cpufreq_frequency_table_put_attr(policy->cpu);
136 return 0;
137}
138
139static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
140{
141 return cpufreq_frequency_table_verify(policy, cbe_freqs);
142}
143
144static int cbe_cpufreq_target(struct cpufreq_policy *policy,
145 unsigned int target_freq,
146 unsigned int relation)
147{
148 int rc;
149 struct cpufreq_freqs freqs;
150 unsigned int cbe_pmode_new;
151
152 cpufreq_frequency_table_target(policy,
153 cbe_freqs,
154 target_freq,
155 relation,
156 &cbe_pmode_new);
157
158 freqs.old = policy->cur;
159 freqs.new = cbe_freqs[cbe_pmode_new].frequency;
160
161 mutex_lock(&cbe_switch_mutex);
162 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
163
164 pr_debug("setting frequency for cpu %d to %d kHz, " \
165 "1/%d of max frequency\n",
166 policy->cpu,
167 cbe_freqs[cbe_pmode_new].frequency,
168 cbe_freqs[cbe_pmode_new].index);
169
170 rc = set_pmode(policy->cpu, cbe_pmode_new);
171
172 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
173 mutex_unlock(&cbe_switch_mutex);
174
175 return rc;
176}
177
178static struct cpufreq_driver cbe_cpufreq_driver = {
179 .verify = cbe_cpufreq_verify,
180 .target = cbe_cpufreq_target,
181 .init = cbe_cpufreq_cpu_init,
182 .exit = cbe_cpufreq_cpu_exit,
183 .name = "cbe-cpufreq",
184 .owner = THIS_MODULE,
185 .flags = CPUFREQ_CONST_LOOPS,
186};
187
188/*
189 * module init and destoy
190 */
191
192static int __init cbe_cpufreq_init(void)
193{
194 if (!machine_is(cell))
195 return -ENODEV;
196
197 return cpufreq_register_driver(&cbe_cpufreq_driver);
198}
199
200static void __exit cbe_cpufreq_exit(void)
201{
202 cpufreq_unregister_driver(&cbe_cpufreq_driver);
203}
204
205module_init(cbe_cpufreq_init);
206module_exit(cbe_cpufreq_exit);
207
208MODULE_LICENSE("GPL");
209MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.h b/drivers/cpufreq/ppc_cbe_cpufreq.h
new file mode 100644
index 000000000000..b4c00a5a6a59
--- /dev/null
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.h
@@ -0,0 +1,24 @@
1/*
2 * ppc_cbe_cpufreq.h
3 *
4 * This file contains the definitions used by the cbe_cpufreq driver.
5 *
6 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
7 *
8 * Author: Christian Krafft <krafft@de.ibm.com>
9 *
10 */
11
12#include <linux/cpufreq.h>
13#include <linux/types.h>
14
15int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode);
16int cbe_cpufreq_get_pmode(int cpu);
17
18int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
19
20#if defined(CONFIG_CPU_FREQ_CBE_PMI) || defined(CONFIG_CPU_FREQ_CBE_PMI_MODULE)
21extern bool cbe_cpufreq_has_pmi;
22#else
23#define cbe_cpufreq_has_pmi (0)
24#endif
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c b/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
new file mode 100644
index 000000000000..84d2f2cf5ba7
--- /dev/null
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
@@ -0,0 +1,115 @@
1/*
2 * pervasive backend for the cbe_cpufreq driver
3 *
4 * This driver makes use of the pervasive unit to
5 * engage the desired frequency.
6 *
7 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
8 *
9 * Author: Christian Krafft <krafft@de.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <linux/io.h>
27#include <linux/kernel.h>
28#include <linux/time.h>
29#include <asm/machdep.h>
30#include <asm/hw_irq.h>
31#include <asm/cell-regs.h>
32
33#include "ppc_cbe_cpufreq.h"
34
35/* to write to MIC register */
36static u64 MIC_Slow_Fast_Timer_table[] = {
37 [0 ... 7] = 0x007fc00000000000ull,
38};
39
40/* more values for the MIC */
41static u64 MIC_Slow_Next_Timer_table[] = {
42 0x0000240000000000ull,
43 0x0000268000000000ull,
44 0x000029C000000000ull,
45 0x00002D0000000000ull,
46 0x0000300000000000ull,
47 0x0000334000000000ull,
48 0x000039C000000000ull,
49 0x00003FC000000000ull,
50};
51
52
53int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
54{
55 struct cbe_pmd_regs __iomem *pmd_regs;
56 struct cbe_mic_tm_regs __iomem *mic_tm_regs;
57 unsigned long flags;
58 u64 value;
59#ifdef DEBUG
60 long time;
61#endif
62
63 local_irq_save(flags);
64
65 mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
66 pmd_regs = cbe_get_cpu_pmd_regs(cpu);
67
68#ifdef DEBUG
69 time = jiffies;
70#endif
71
72 out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
73 out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
74
75 out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
76 out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
77
78 value = in_be64(&pmd_regs->pmcr);
79 /* set bits to zero */
80 value &= 0xFFFFFFFFFFFFFFF8ull;
81 /* set bits to next pmode */
82 value |= pmode;
83
84 out_be64(&pmd_regs->pmcr, value);
85
86#ifdef DEBUG
87 /* wait until new pmode appears in status register */
88 value = in_be64(&pmd_regs->pmsr) & 0x07;
89 while (value != pmode) {
90 cpu_relax();
91 value = in_be64(&pmd_regs->pmsr) & 0x07;
92 }
93
94 time = jiffies - time;
95 time = jiffies_to_msecs(time);
96 pr_debug("had to wait %lu ms for a transition using " \
97 "pervasive unit\n", time);
98#endif
99 local_irq_restore(flags);
100
101 return 0;
102}
103
104
105int cbe_cpufreq_get_pmode(int cpu)
106{
107 int ret;
108 struct cbe_pmd_regs __iomem *pmd_regs;
109
110 pmd_regs = cbe_get_cpu_pmd_regs(cpu);
111 ret = in_be64(&pmd_regs->pmsr) & 0x07;
112
113 return ret;
114}
115
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
new file mode 100644
index 000000000000..d29e8da396a0
--- /dev/null
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -0,0 +1,156 @@
1/*
2 * pmi backend for the cbe_cpufreq driver
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
5 *
6 * Author: Christian Krafft <krafft@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/types.h>
25#include <linux/timer.h>
26#include <linux/module.h>
27#include <linux/of_platform.h>
28
29#include <asm/processor.h>
30#include <asm/prom.h>
31#include <asm/pmi.h>
32#include <asm/cell-regs.h>
33
34#ifdef DEBUG
35#include <asm/time.h>
36#endif
37
38#include "ppc_cbe_cpufreq.h"
39
40static u8 pmi_slow_mode_limit[MAX_CBE];
41
42bool cbe_cpufreq_has_pmi = false;
43EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi);
44
45/*
46 * hardware specific functions
47 */
48
49int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode)
50{
51 int ret;
52 pmi_message_t pmi_msg;
53#ifdef DEBUG
54 long time;
55#endif
56 pmi_msg.type = PMI_TYPE_FREQ_CHANGE;
57 pmi_msg.data1 = cbe_cpu_to_node(cpu);
58 pmi_msg.data2 = pmode;
59
60#ifdef DEBUG
61 time = jiffies;
62#endif
63 pmi_send_message(pmi_msg);
64
65#ifdef DEBUG
66 time = jiffies - time;
67 time = jiffies_to_msecs(time);
68 pr_debug("had to wait %lu ms for a transition using " \
69 "PMI\n", time);
70#endif
71 ret = pmi_msg.data2;
72 pr_debug("PMI returned slow mode %d\n", ret);
73
74 return ret;
75}
76EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
77
78
79static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
80{
81 u8 node, slow_mode;
82
83 BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
84
85 node = pmi_msg.data1;
86 slow_mode = pmi_msg.data2;
87
88 pmi_slow_mode_limit[node] = slow_mode;
89
90 pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode);
91}
92
93static int pmi_notifier(struct notifier_block *nb,
94 unsigned long event, void *data)
95{
96 struct cpufreq_policy *policy = data;
97 struct cpufreq_frequency_table *cbe_freqs;
98 u8 node;
99
100 /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE
101 * and CPUFREQ_NOTIFY policy events?)
102 */
103 if (event == CPUFREQ_START)
104 return 0;
105
106 cbe_freqs = cpufreq_frequency_get_table(policy->cpu);
107 node = cbe_cpu_to_node(policy->cpu);
108
109 pr_debug("got notified, event=%lu, node=%u\n", event, node);
110
111 if (pmi_slow_mode_limit[node] != 0) {
112 pr_debug("limiting node %d to slow mode %d\n",
113 node, pmi_slow_mode_limit[node]);
114
115 cpufreq_verify_within_limits(policy, 0,
116
117 cbe_freqs[pmi_slow_mode_limit[node]].frequency);
118 }
119
120 return 0;
121}
122
123static struct notifier_block pmi_notifier_block = {
124 .notifier_call = pmi_notifier,
125};
126
127static struct pmi_handler cbe_pmi_handler = {
128 .type = PMI_TYPE_FREQ_CHANGE,
129 .handle_pmi_message = cbe_cpufreq_handle_pmi,
130};
131
132
133
134static int __init cbe_cpufreq_pmi_init(void)
135{
136 cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0;
137
138 if (!cbe_cpufreq_has_pmi)
139 return -ENODEV;
140
141 cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
142
143 return 0;
144}
145
146static void __exit cbe_cpufreq_pmi_exit(void)
147{
148 cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
149 pmi_unregister_handler(&cbe_pmi_handler);
150}
151
152module_init(cbe_cpufreq_pmi_init);
153module_exit(cbe_cpufreq_pmi_exit);
154
155MODULE_LICENSE("GPL");
156MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
new file mode 100644
index 000000000000..9e5bc8e388a0
--- /dev/null
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -0,0 +1,492 @@
1/*
2 * Copyright (C) 2002,2003 Intrinsyc Software
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * History:
19 * 31-Jul-2002 : Initial version [FB]
20 * 29-Jan-2003 : added PXA255 support [FB]
21 * 20-Apr-2003 : ported to v2.5 (Dustin McIntire, Sensoria Corp.)
22 *
23 * Note:
24 * This driver may change the memory bus clock rate, but will not do any
25 * platform specific access timing changes... for example if you have flash
26 * memory connected to CS0, you will need to register a platform specific
27 * notifier which will adjust the memory access strobes to maintain a
28 * minimum strobe width.
29 *
30 */
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/sched.h>
35#include <linux/init.h>
36#include <linux/cpufreq.h>
37#include <linux/err.h>
38#include <linux/regulator/consumer.h>
39#include <linux/io.h>
40
41#include <mach/pxa2xx-regs.h>
42#include <mach/smemc.h>
43
44#ifdef DEBUG
45static unsigned int freq_debug;
46module_param(freq_debug, uint, 0);
47MODULE_PARM_DESC(freq_debug, "Set the debug messages to on=1/off=0");
48#else
49#define freq_debug 0
50#endif
51
52static struct regulator *vcc_core;
53
54static unsigned int pxa27x_maxfreq;
55module_param(pxa27x_maxfreq, uint, 0);
56MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz"
57 "(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)");
58
59typedef struct {
60 unsigned int khz;
61 unsigned int membus;
62 unsigned int cccr;
63 unsigned int div2;
64 unsigned int cclkcfg;
65 int vmin;
66 int vmax;
67} pxa_freqs_t;
68
69/* Define the refresh period in mSec for the SDRAM and the number of rows */
70#define SDRAM_TREF 64 /* standard 64ms SDRAM */
71static unsigned int sdram_rows;
72
73#define CCLKCFG_TURBO 0x1
74#define CCLKCFG_FCS 0x2
75#define CCLKCFG_HALFTURBO 0x4
76#define CCLKCFG_FASTBUS 0x8
77#define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2)
78#define MDREFR_DRI_MASK 0xFFF
79
80#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3)
81#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3)
82
83/*
84 * PXA255 definitions
85 */
86/* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */
87#define CCLKCFG CCLKCFG_TURBO | CCLKCFG_FCS
88
89static pxa_freqs_t pxa255_run_freqs[] =
90{
91 /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
92 { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
93 {132700, 132700, 0x123, 1, CCLKCFG, -1, -1}, /* 133, 133, 66, 66 */
94 {199100, 99500, 0x141, 0, CCLKCFG, -1, -1}, /* 199, 199, 99, 99 */
95 {265400, 132700, 0x143, 1, CCLKCFG, -1, -1}, /* 265, 265, 133, 66 */
96 {331800, 165900, 0x145, 1, CCLKCFG, -1, -1}, /* 331, 331, 166, 83 */
97 {398100, 99500, 0x161, 0, CCLKCFG, -1, -1}, /* 398, 398, 196, 99 */
98};
99
100/* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */
101static pxa_freqs_t pxa255_turbo_freqs[] =
102{
103 /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
104 { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
105 {199100, 99500, 0x221, 0, CCLKCFG, -1, -1}, /* 99, 199, 50, 99 */
106 {298500, 99500, 0x321, 0, CCLKCFG, -1, -1}, /* 99, 287, 50, 99 */
107 {298600, 99500, 0x1c1, 0, CCLKCFG, -1, -1}, /* 199, 287, 99, 99 */
108 {398100, 99500, 0x241, 0, CCLKCFG, -1, -1}, /* 199, 398, 99, 99 */
109};
110
111#define NUM_PXA25x_RUN_FREQS ARRAY_SIZE(pxa255_run_freqs)
112#define NUM_PXA25x_TURBO_FREQS ARRAY_SIZE(pxa255_turbo_freqs)
113
114static struct cpufreq_frequency_table
115 pxa255_run_freq_table[NUM_PXA25x_RUN_FREQS+1];
116static struct cpufreq_frequency_table
117 pxa255_turbo_freq_table[NUM_PXA25x_TURBO_FREQS+1];
118
119static unsigned int pxa255_turbo_table;
120module_param(pxa255_turbo_table, uint, 0);
121MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table, !0 = turbo table)");
122
123/*
124 * PXA270 definitions
125 *
126 * For the PXA27x:
127 * Control variables are A, L, 2N for CCCR; B, HT, T for CLKCFG.
128 *
129 * A = 0 => memory controller clock from table 3-7,
130 * A = 1 => memory controller clock = system bus clock
131 * Run mode frequency = 13 MHz * L
132 * Turbo mode frequency = 13 MHz * L * N
133 * System bus frequency = 13 MHz * L / (B + 1)
134 *
135 * In CCCR:
136 * A = 1
137 * L = 16 oscillator to run mode ratio
138 * 2N = 6 2 * (turbo mode to run mode ratio)
139 *
140 * In CCLKCFG:
141 * B = 1 Fast bus mode
142 * HT = 0 Half-Turbo mode
143 * T = 1 Turbo mode
144 *
145 * For now, just support some of the combinations in table 3-7 of
146 * PXA27x Processor Family Developer's Manual to simplify frequency
147 * change sequences.
148 */
149#define PXA27x_CCCR(A, L, N2) (A << 25 | N2 << 7 | L)
150#define CCLKCFG2(B, HT, T) \
151 (CCLKCFG_FCS | \
152 ((B) ? CCLKCFG_FASTBUS : 0) | \
153 ((HT) ? CCLKCFG_HALFTURBO : 0) | \
154 ((T) ? CCLKCFG_TURBO : 0))
155
156static pxa_freqs_t pxa27x_freqs[] = {
157 {104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 },
158 {156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 },
159 {208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 },
160 {312000, 208000, PXA27x_CCCR(1, 16, 3), 1, CCLKCFG2(1, 0, 1), 1250000, 1705000 },
161 {416000, 208000, PXA27x_CCCR(1, 16, 4), 1, CCLKCFG2(1, 0, 1), 1350000, 1705000 },
162 {520000, 208000, PXA27x_CCCR(1, 16, 5), 1, CCLKCFG2(1, 0, 1), 1450000, 1705000 },
163 {624000, 208000, PXA27x_CCCR(1, 16, 6), 1, CCLKCFG2(1, 0, 1), 1550000, 1705000 }
164};
165
166#define NUM_PXA27x_FREQS ARRAY_SIZE(pxa27x_freqs)
167static struct cpufreq_frequency_table
168 pxa27x_freq_table[NUM_PXA27x_FREQS+1];
169
170extern unsigned get_clk_frequency_khz(int info);
171
172#ifdef CONFIG_REGULATOR
173
174static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
175{
176 int ret = 0;
177 int vmin, vmax;
178
179 if (!cpu_is_pxa27x())
180 return 0;
181
182 vmin = pxa_freq->vmin;
183 vmax = pxa_freq->vmax;
184 if ((vmin == -1) || (vmax == -1))
185 return 0;
186
187 ret = regulator_set_voltage(vcc_core, vmin, vmax);
188 if (ret)
189 pr_err("cpufreq: Failed to set vcc_core in [%dmV..%dmV]\n",
190 vmin, vmax);
191 return ret;
192}
193
194static __init void pxa_cpufreq_init_voltages(void)
195{
196 vcc_core = regulator_get(NULL, "vcc_core");
197 if (IS_ERR(vcc_core)) {
198 pr_info("cpufreq: Didn't find vcc_core regulator\n");
199 vcc_core = NULL;
200 } else {
201 pr_info("cpufreq: Found vcc_core regulator\n");
202 }
203}
204#else
205static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
206{
207 return 0;
208}
209
210static __init void pxa_cpufreq_init_voltages(void) { }
211#endif
212
213static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
214 pxa_freqs_t **pxa_freqs)
215{
216 if (cpu_is_pxa25x()) {
217 if (!pxa255_turbo_table) {
218 *pxa_freqs = pxa255_run_freqs;
219 *freq_table = pxa255_run_freq_table;
220 } else {
221 *pxa_freqs = pxa255_turbo_freqs;
222 *freq_table = pxa255_turbo_freq_table;
223 }
224 } else if (cpu_is_pxa27x()) {
225 *pxa_freqs = pxa27x_freqs;
226 *freq_table = pxa27x_freq_table;
227 } else {
228 BUG();
229 }
230}
231
232static void pxa27x_guess_max_freq(void)
233{
234 if (!pxa27x_maxfreq) {
235 pxa27x_maxfreq = 416000;
236 printk(KERN_INFO "PXA CPU 27x max frequency not defined "
237 "(pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n",
238 pxa27x_maxfreq);
239 } else {
240 pxa27x_maxfreq *= 1000;
241 }
242}
243
244static void init_sdram_rows(void)
245{
246 uint32_t mdcnfg = __raw_readl(MDCNFG);
247 unsigned int drac2 = 0, drac0 = 0;
248
249 if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
250 drac2 = MDCNFG_DRAC2(mdcnfg);
251
252 if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
253 drac0 = MDCNFG_DRAC0(mdcnfg);
254
255 sdram_rows = 1 << (11 + max(drac0, drac2));
256}
257
258static u32 mdrefr_dri(unsigned int freq)
259{
260 u32 interval = freq * SDRAM_TREF / sdram_rows;
261
262 return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
263}
264
265/* find a valid frequency point */
266static int pxa_verify_policy(struct cpufreq_policy *policy)
267{
268 struct cpufreq_frequency_table *pxa_freqs_table;
269 pxa_freqs_t *pxa_freqs;
270 int ret;
271
272 find_freq_tables(&pxa_freqs_table, &pxa_freqs);
273 ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table);
274
275 if (freq_debug)
276 pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n",
277 policy->min, policy->max);
278
279 return ret;
280}
281
282static unsigned int pxa_cpufreq_get(unsigned int cpu)
283{
284 return get_clk_frequency_khz(0);
285}
286
287static int pxa_set_target(struct cpufreq_policy *policy,
288 unsigned int target_freq,
289 unsigned int relation)
290{
291 struct cpufreq_frequency_table *pxa_freqs_table;
292 pxa_freqs_t *pxa_freq_settings;
293 struct cpufreq_freqs freqs;
294 unsigned int idx;
295 unsigned long flags;
296 unsigned int new_freq_cpu, new_freq_mem;
297 unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg;
298 int ret = 0;
299
300 /* Get the current policy */
301 find_freq_tables(&pxa_freqs_table, &pxa_freq_settings);
302
303 /* Lookup the next frequency */
304 if (cpufreq_frequency_table_target(policy, pxa_freqs_table,
305 target_freq, relation, &idx)) {
306 return -EINVAL;
307 }
308
309 new_freq_cpu = pxa_freq_settings[idx].khz;
310 new_freq_mem = pxa_freq_settings[idx].membus;
311 freqs.old = policy->cur;
312 freqs.new = new_freq_cpu;
313
314 if (freq_debug)
315 pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
316 freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
317 (new_freq_mem / 2000) : (new_freq_mem / 1000));
318
319 if (vcc_core && freqs.new > freqs.old)
320 ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
321 if (ret)
322 return ret;
323 /*
324 * Tell everyone what we're about to do...
325 * you should add a notify client with any platform specific
326 * Vcc changing capability
327 */
328 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
329
330 /* Calculate the next MDREFR. If we're slowing down the SDRAM clock
331 * we need to preset the smaller DRI before the change. If we're
332 * speeding up we need to set the larger DRI value after the change.
333 */
334 preset_mdrefr = postset_mdrefr = __raw_readl(MDREFR);
335 if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(new_freq_mem)) {
336 preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK);
337 preset_mdrefr |= mdrefr_dri(new_freq_mem);
338 }
339 postset_mdrefr =
340 (postset_mdrefr & ~MDREFR_DRI_MASK) | mdrefr_dri(new_freq_mem);
341
342 /* If we're dividing the memory clock by two for the SDRAM clock, this
343 * must be set prior to the change. Clearing the divide must be done
344 * after the change.
345 */
346 if (pxa_freq_settings[idx].div2) {
347 preset_mdrefr |= MDREFR_DB2_MASK;
348 postset_mdrefr |= MDREFR_DB2_MASK;
349 } else {
350 postset_mdrefr &= ~MDREFR_DB2_MASK;
351 }
352
353 local_irq_save(flags);
354
355 /* Set new the CCCR and prepare CCLKCFG */
356 CCCR = pxa_freq_settings[idx].cccr;
357 cclkcfg = pxa_freq_settings[idx].cclkcfg;
358
359 asm volatile(" \n\
360 ldr r4, [%1] /* load MDREFR */ \n\
361 b 2f \n\
362 .align 5 \n\
3631: \n\
364 str %3, [%1] /* preset the MDREFR */ \n\
365 mcr p14, 0, %2, c6, c0, 0 /* set CCLKCFG[FCS] */ \n\
366 str %4, [%1] /* postset the MDREFR */ \n\
367 \n\
368 b 3f \n\
3692: b 1b \n\
3703: nop \n\
371 "
372 : "=&r" (unused)
373 : "r" (MDREFR), "r" (cclkcfg),
374 "r" (preset_mdrefr), "r" (postset_mdrefr)
375 : "r4", "r5");
376 local_irq_restore(flags);
377
378 /*
379 * Tell everyone what we've just done...
380 * you should add a notify client with any platform specific
381 * SDRAM refresh timer adjustments
382 */
383 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
384
385 /*
386 * Even if voltage setting fails, we don't report it, as the frequency
387 * change succeeded. The voltage reduction is not a critical failure,
388 * only power savings will suffer from this.
389 *
390 * Note: if the voltage change fails, and a return value is returned, a
391 * bug is triggered (seems a deadlock). Should anybody find out where,
392 * the "return 0" should become a "return ret".
393 */
394 if (vcc_core && freqs.new < freqs.old)
395 ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
396
397 return 0;
398}
399
400static int pxa_cpufreq_init(struct cpufreq_policy *policy)
401{
402 int i;
403 unsigned int freq;
404 struct cpufreq_frequency_table *pxa255_freq_table;
405 pxa_freqs_t *pxa255_freqs;
406
407 /* try to guess pxa27x cpu */
408 if (cpu_is_pxa27x())
409 pxa27x_guess_max_freq();
410
411 pxa_cpufreq_init_voltages();
412
413 init_sdram_rows();
414
415 /* set default policy and cpuinfo */
416 policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
417 policy->cur = get_clk_frequency_khz(0); /* current freq */
418 policy->min = policy->max = policy->cur;
419
420 /* Generate pxa25x the run cpufreq_frequency_table struct */
421 for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) {
422 pxa255_run_freq_table[i].frequency = pxa255_run_freqs[i].khz;
423 pxa255_run_freq_table[i].index = i;
424 }
425 pxa255_run_freq_table[i].frequency = CPUFREQ_TABLE_END;
426
427 /* Generate pxa25x the turbo cpufreq_frequency_table struct */
428 for (i = 0; i < NUM_PXA25x_TURBO_FREQS; i++) {
429 pxa255_turbo_freq_table[i].frequency =
430 pxa255_turbo_freqs[i].khz;
431 pxa255_turbo_freq_table[i].index = i;
432 }
433 pxa255_turbo_freq_table[i].frequency = CPUFREQ_TABLE_END;
434
435 pxa255_turbo_table = !!pxa255_turbo_table;
436
437 /* Generate the pxa27x cpufreq_frequency_table struct */
438 for (i = 0; i < NUM_PXA27x_FREQS; i++) {
439 freq = pxa27x_freqs[i].khz;
440 if (freq > pxa27x_maxfreq)
441 break;
442 pxa27x_freq_table[i].frequency = freq;
443 pxa27x_freq_table[i].index = i;
444 }
445 pxa27x_freq_table[i].index = i;
446 pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END;
447
448 /*
449 * Set the policy's minimum and maximum frequencies from the tables
450 * just constructed. This sets cpuinfo.mxx_freq, min and max.
451 */
452 if (cpu_is_pxa25x()) {
453 find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
454 pr_info("PXA255 cpufreq using %s frequency table\n",
455 pxa255_turbo_table ? "turbo" : "run");
456 cpufreq_frequency_table_cpuinfo(policy, pxa255_freq_table);
457 }
458 else if (cpu_is_pxa27x())
459 cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table);
460
461 printk(KERN_INFO "PXA CPU frequency change support initialized\n");
462
463 return 0;
464}
465
466static struct cpufreq_driver pxa_cpufreq_driver = {
467 .verify = pxa_verify_policy,
468 .target = pxa_set_target,
469 .init = pxa_cpufreq_init,
470 .get = pxa_cpufreq_get,
471 .name = "PXA2xx",
472};
473
474static int __init pxa_cpu_init(void)
475{
476 int ret = -ENODEV;
477 if (cpu_is_pxa25x() || cpu_is_pxa27x())
478 ret = cpufreq_register_driver(&pxa_cpufreq_driver);
479 return ret;
480}
481
482static void __exit pxa_cpu_exit(void)
483{
484 cpufreq_unregister_driver(&pxa_cpufreq_driver);
485}
486
487
488MODULE_AUTHOR("Intrinsyc Software Inc.");
489MODULE_DESCRIPTION("CPU frequency changing driver for the PXA architecture");
490MODULE_LICENSE("GPL");
491module_init(pxa_cpu_init);
492module_exit(pxa_cpu_exit);
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c
new file mode 100644
index 000000000000..15d60f857ad5
--- /dev/null
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -0,0 +1,254 @@
1/*
2 * Copyright (C) 2008 Marvell International Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/init.h>
14#include <linux/cpufreq.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17
18#include <mach/generic.h>
19#include <mach/pxa3xx-regs.h>
20
21#define HSS_104M (0)
22#define HSS_156M (1)
23#define HSS_208M (2)
24#define HSS_312M (3)
25
26#define SMCFS_78M (0)
27#define SMCFS_104M (2)
28#define SMCFS_208M (5)
29
30#define SFLFS_104M (0)
31#define SFLFS_156M (1)
32#define SFLFS_208M (2)
33#define SFLFS_312M (3)
34
35#define XSPCLK_156M (0)
36#define XSPCLK_NONE (3)
37
38#define DMCFS_26M (0)
39#define DMCFS_260M (3)
40
41struct pxa3xx_freq_info {
42 unsigned int cpufreq_mhz;
43 unsigned int core_xl : 5;
44 unsigned int core_xn : 3;
45 unsigned int hss : 2;
46 unsigned int dmcfs : 2;
47 unsigned int smcfs : 3;
48 unsigned int sflfs : 2;
49 unsigned int df_clkdiv : 3;
50
51 int vcc_core; /* in mV */
52 int vcc_sram; /* in mV */
53};
54
55#define OP(cpufreq, _xl, _xn, _hss, _dmc, _smc, _sfl, _dfi, vcore, vsram) \
56{ \
57 .cpufreq_mhz = cpufreq, \
58 .core_xl = _xl, \
59 .core_xn = _xn, \
60 .hss = HSS_##_hss##M, \
61 .dmcfs = DMCFS_##_dmc##M, \
62 .smcfs = SMCFS_##_smc##M, \
63 .sflfs = SFLFS_##_sfl##M, \
64 .df_clkdiv = _dfi, \
65 .vcc_core = vcore, \
66 .vcc_sram = vsram, \
67}
68
69static struct pxa3xx_freq_info pxa300_freqs[] = {
70 /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */
71 OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */
72 OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */
73 OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */
74 OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */
75};
76
77static struct pxa3xx_freq_info pxa320_freqs[] = {
78 /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */
79 OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */
80 OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */
81 OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */
82 OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */
83 OP(806, 31, 2, 208, 260, 208, 312, 3, 1400, 1400), /* 806MHz */
84};
85
86static unsigned int pxa3xx_freqs_num;
87static struct pxa3xx_freq_info *pxa3xx_freqs;
88static struct cpufreq_frequency_table *pxa3xx_freqs_table;
89
90static int setup_freqs_table(struct cpufreq_policy *policy,
91 struct pxa3xx_freq_info *freqs, int num)
92{
93 struct cpufreq_frequency_table *table;
94 int i;
95
96 table = kzalloc((num + 1) * sizeof(*table), GFP_KERNEL);
97 if (table == NULL)
98 return -ENOMEM;
99
100 for (i = 0; i < num; i++) {
101 table[i].index = i;
102 table[i].frequency = freqs[i].cpufreq_mhz * 1000;
103 }
104 table[num].index = i;
105 table[num].frequency = CPUFREQ_TABLE_END;
106
107 pxa3xx_freqs = freqs;
108 pxa3xx_freqs_num = num;
109 pxa3xx_freqs_table = table;
110
111 return cpufreq_frequency_table_cpuinfo(policy, table);
112}
113
114static void __update_core_freq(struct pxa3xx_freq_info *info)
115{
116 uint32_t mask = ACCR_XN_MASK | ACCR_XL_MASK;
117 uint32_t accr = ACCR;
118 uint32_t xclkcfg;
119
120 accr &= ~(ACCR_XN_MASK | ACCR_XL_MASK | ACCR_XSPCLK_MASK);
121 accr |= ACCR_XN(info->core_xn) | ACCR_XL(info->core_xl);
122
123 /* No clock until core PLL is re-locked */
124 accr |= ACCR_XSPCLK(XSPCLK_NONE);
125
126 xclkcfg = (info->core_xn == 2) ? 0x3 : 0x2; /* turbo bit */
127
128 ACCR = accr;
129 __asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg));
130
131 while ((ACSR & mask) != (accr & mask))
132 cpu_relax();
133}
134
135static void __update_bus_freq(struct pxa3xx_freq_info *info)
136{
137 uint32_t mask;
138 uint32_t accr = ACCR;
139
140 mask = ACCR_SMCFS_MASK | ACCR_SFLFS_MASK | ACCR_HSS_MASK |
141 ACCR_DMCFS_MASK;
142
143 accr &= ~mask;
144 accr |= ACCR_SMCFS(info->smcfs) | ACCR_SFLFS(info->sflfs) |
145 ACCR_HSS(info->hss) | ACCR_DMCFS(info->dmcfs);
146
147 ACCR = accr;
148
149 while ((ACSR & mask) != (accr & mask))
150 cpu_relax();
151}
152
153static int pxa3xx_cpufreq_verify(struct cpufreq_policy *policy)
154{
155 return cpufreq_frequency_table_verify(policy, pxa3xx_freqs_table);
156}
157
158static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
159{
160 return pxa3xx_get_clk_frequency_khz(0);
161}
162
163static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy,
164 unsigned int target_freq,
165 unsigned int relation)
166{
167 struct pxa3xx_freq_info *next;
168 struct cpufreq_freqs freqs;
169 unsigned long flags;
170 int idx;
171
172 if (policy->cpu != 0)
173 return -EINVAL;
174
175 /* Lookup the next frequency */
176 if (cpufreq_frequency_table_target(policy, pxa3xx_freqs_table,
177 target_freq, relation, &idx))
178 return -EINVAL;
179
180 next = &pxa3xx_freqs[idx];
181
182 freqs.old = policy->cur;
183 freqs.new = next->cpufreq_mhz * 1000;
184
185 pr_debug("CPU frequency from %d MHz to %d MHz%s\n",
186 freqs.old / 1000, freqs.new / 1000,
187 (freqs.old == freqs.new) ? " (skipped)" : "");
188
189 if (freqs.old == target_freq)
190 return 0;
191
192 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
193
194 local_irq_save(flags);
195 __update_core_freq(next);
196 __update_bus_freq(next);
197 local_irq_restore(flags);
198
199 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
200
201 return 0;
202}
203
204static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
205{
206 int ret = -EINVAL;
207
208 /* set default policy and cpuinfo */
209 policy->cpuinfo.min_freq = 104000;
210 policy->cpuinfo.max_freq = (cpu_is_pxa320()) ? 806000 : 624000;
211 policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
212 policy->max = pxa3xx_get_clk_frequency_khz(0);
213 policy->cur = policy->min = policy->max;
214
215 if (cpu_is_pxa300() || cpu_is_pxa310())
216 ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa300_freqs));
217
218 if (cpu_is_pxa320())
219 ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa320_freqs));
220
221 if (ret) {
222 pr_err("failed to setup frequency table\n");
223 return ret;
224 }
225
226 pr_info("CPUFREQ support for PXA3xx initialized\n");
227 return 0;
228}
229
230static struct cpufreq_driver pxa3xx_cpufreq_driver = {
231 .verify = pxa3xx_cpufreq_verify,
232 .target = pxa3xx_cpufreq_set,
233 .init = pxa3xx_cpufreq_init,
234 .get = pxa3xx_cpufreq_get,
235 .name = "pxa3xx-cpufreq",
236};
237
238static int __init cpufreq_init(void)
239{
240 if (cpu_is_pxa3xx())
241 return cpufreq_register_driver(&pxa3xx_cpufreq_driver);
242
243 return 0;
244}
245module_init(cpufreq_init);
246
247static void __exit cpufreq_exit(void)
248{
249 cpufreq_unregister_driver(&pxa3xx_cpufreq_driver);
250}
251module_exit(cpufreq_exit);
252
253MODULE_DESCRIPTION("CPU frequency scaling driver for PXA3xx");
254MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index bcc053bc02c4..4f1881eee3f1 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -256,7 +256,6 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
256 goto out; 256 goto out;
257 } 257 }
258 258
259 freqs.cpu = 0;
260 freqs.flags = 0; 259 freqs.flags = 0;
261 freqs.old = s3c_freq->is_dvs ? FREQ_DVS 260 freqs.old = s3c_freq->is_dvs ? FREQ_DVS
262 : clk_get_rate(s3c_freq->armclk) / 1000; 261 : clk_get_rate(s3c_freq->armclk) / 1000;
@@ -274,7 +273,7 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
274 if (!to_dvs && freqs.old == freqs.new) 273 if (!to_dvs && freqs.old == freqs.new)
275 goto out; 274 goto out;
276 275
277 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 276 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
278 277
279 if (to_dvs) { 278 if (to_dvs) {
280 pr_debug("cpufreq: enter dvs\n"); 279 pr_debug("cpufreq: enter dvs\n");
@@ -287,7 +286,7 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
287 ret = s3c2416_cpufreq_set_armdiv(s3c_freq, freqs.new); 286 ret = s3c2416_cpufreq_set_armdiv(s3c_freq, freqs.new);
288 } 287 }
289 288
290 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 289 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
291 290
292out: 291out:
293 mutex_unlock(&cpufreq_lock); 292 mutex_unlock(&cpufreq_lock);
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 6f9490b3c356..27cacb524796 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -84,7 +84,6 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
84 if (ret != 0) 84 if (ret != 0)
85 return ret; 85 return ret;
86 86
87 freqs.cpu = 0;
88 freqs.old = clk_get_rate(armclk) / 1000; 87 freqs.old = clk_get_rate(armclk) / 1000;
89 freqs.new = s3c64xx_freq_table[i].frequency; 88 freqs.new = s3c64xx_freq_table[i].frequency;
90 freqs.flags = 0; 89 freqs.flags = 0;
@@ -95,7 +94,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
95 94
96 pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new); 95 pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new);
97 96
98 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 97 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
99 98
100#ifdef CONFIG_REGULATOR 99#ifdef CONFIG_REGULATOR
101 if (vddarm && freqs.new > freqs.old) { 100 if (vddarm && freqs.new > freqs.old) {
@@ -117,7 +116,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
117 goto err; 116 goto err;
118 } 117 }
119 118
120 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 119 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
121 120
122#ifdef CONFIG_REGULATOR 121#ifdef CONFIG_REGULATOR
123 if (vddarm && freqs.new < freqs.old) { 122 if (vddarm && freqs.new < freqs.old) {
@@ -141,7 +140,7 @@ err_clk:
141 if (clk_set_rate(armclk, freqs.old * 1000) < 0) 140 if (clk_set_rate(armclk, freqs.old * 1000) < 0)
142 pr_err("Failed to restore original clock rate\n"); 141 pr_err("Failed to restore original clock rate\n");
143err: 142err:
144 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 143 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
145 144
146 return ret; 145 return ret;
147} 146}
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index a484aaea9809..5c7757073793 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -229,7 +229,6 @@ static int s5pv210_target(struct cpufreq_policy *policy,
229 } 229 }
230 230
231 freqs.new = s5pv210_freq_table[index].frequency; 231 freqs.new = s5pv210_freq_table[index].frequency;
232 freqs.cpu = 0;
233 232
234 if (freqs.new == freqs.old) 233 if (freqs.new == freqs.old)
235 goto exit; 234 goto exit;
@@ -256,7 +255,7 @@ static int s5pv210_target(struct cpufreq_policy *policy,
256 goto exit; 255 goto exit;
257 } 256 }
258 257
259 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 258 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
260 259
261 /* Check if there need to change PLL */ 260 /* Check if there need to change PLL */
262 if ((index == L0) || (priv_index == L0)) 261 if ((index == L0) || (priv_index == L0))
@@ -468,7 +467,7 @@ static int s5pv210_target(struct cpufreq_policy *policy,
468 } 467 }
469 } 468 }
470 469
471 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 470 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
472 471
473 if (freqs.new < freqs.old) { 472 if (freqs.new < freqs.old) {
474 regulator_set_voltage(int_regulator, 473 regulator_set_voltage(int_regulator,
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c
new file mode 100644
index 000000000000..cff18e87ca58
--- /dev/null
+++ b/drivers/cpufreq/sa1100-cpufreq.c
@@ -0,0 +1,247 @@
1/*
2 * cpu-sa1100.c: clock scaling for the SA1100
3 *
4 * Copyright (C) 2000 2001, The Delft University of Technology
5 *
6 * Authors:
7 * - Johan Pouwelse (J.A.Pouwelse@its.tudelft.nl): initial version
8 * - Erik Mouw (J.A.K.Mouw@its.tudelft.nl):
9 * - major rewrite for linux-2.3.99
10 * - rewritten for the more generic power management scheme in
11 * linux-2.4.5-rmk1
12 *
13 * This software has been developed while working on the LART
14 * computing board (http://www.lartmaker.nl/), which is
15 * sponsored by the Mobile Multi-media Communications
16 * (http://www.mobimedia.org/) and Ubiquitous Communications
17 * (http://www.ubicom.tudelft.nl/) projects.
18 *
19 * The authors can be reached at:
20 *
21 * Erik Mouw
22 * Information and Communication Theory Group
23 * Faculty of Information Technology and Systems
24 * Delft University of Technology
25 * P.O. Box 5031
26 * 2600 GA Delft
27 * The Netherlands
28 *
29 *
30 * This program is free software; you can redistribute it and/or modify
31 * it under the terms of the GNU General Public License as published by
32 * the Free Software Foundation; either version 2 of the License, or
33 * (at your option) any later version.
34 *
35 * This program is distributed in the hope that it will be useful,
36 * but WITHOUT ANY WARRANTY; without even the implied warranty of
37 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
38 * GNU General Public License for more details.
39 *
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
43 *
44 *
45 * Theory of operations
46 * ====================
47 *
48 * Clock scaling can be used to lower the power consumption of the CPU
49 * core. This will give you a somewhat longer running time.
50 *
51 * The SA-1100 has a single register to change the core clock speed:
52 *
53 * PPCR 0x90020014 PLL config
54 *
55 * However, the DRAM timings are closely related to the core clock
56 * speed, so we need to change these, too. The used registers are:
57 *
58 * MDCNFG 0xA0000000 DRAM config
59 * MDCAS0 0xA0000004 Access waveform
60 * MDCAS1 0xA0000008 Access waveform
61 * MDCAS2 0xA000000C Access waveform
62 *
63 * Care must be taken to change the DRAM parameters the correct way,
64 * because otherwise the DRAM becomes unusable and the kernel will
65 * crash.
66 *
67 * The simple solution to avoid a kernel crash is to put the actual
68 * clock change in ROM and jump to that code from the kernel. The main
69 * disadvantage is that the ROM has to be modified, which is not
70 * possible on all SA-1100 platforms. Another disadvantage is that
71 * jumping to ROM makes clock switching unnecessary complicated.
72 *
73 * The idea behind this driver is that the memory configuration can be
74 * changed while running from DRAM (even with interrupts turned on!)
75 * as long as all re-configuration steps yield a valid DRAM
76 * configuration. The advantages are clear: it will run on all SA-1100
77 * platforms, and the code is very simple.
78 *
79 * If you really want to understand what is going on in
80 * sa1100_update_dram_timings(), you'll have to read sections 8.2,
81 * 9.5.7.3, and 10.2 from the "Intel StrongARM SA-1100 Microprocessor
82 * Developers Manual" (available for free from Intel).
83 *
84 */
85
86#include <linux/kernel.h>
87#include <linux/types.h>
88#include <linux/init.h>
89#include <linux/cpufreq.h>
90#include <linux/io.h>
91
92#include <asm/cputype.h>
93
94#include <mach/generic.h>
95#include <mach/hardware.h>
96
97struct sa1100_dram_regs {
98 int speed;
99 u32 mdcnfg;
100 u32 mdcas0;
101 u32 mdcas1;
102 u32 mdcas2;
103};
104
105
106static struct cpufreq_driver sa1100_driver;
107
108static struct sa1100_dram_regs sa1100_dram_settings[] = {
109 /*speed, mdcnfg, mdcas0, mdcas1, mdcas2, clock freq */
110 { 59000, 0x00dc88a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 59.0 MHz */
111 { 73700, 0x011490a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 73.7 MHz */
112 { 88500, 0x014e90a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 88.5 MHz */
113 {103200, 0x01889923, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 103.2 MHz */
114 {118000, 0x01c29923, 0x9999998f, 0xfffffff9, 0xffffffff},/* 118.0 MHz */
115 {132700, 0x01fb2123, 0x9999998f, 0xfffffff9, 0xffffffff},/* 132.7 MHz */
116 {147500, 0x02352123, 0x3333330f, 0xfffffff3, 0xffffffff},/* 147.5 MHz */
117 {162200, 0x026b29a3, 0x38e38e1f, 0xfff8e38e, 0xffffffff},/* 162.2 MHz */
118 {176900, 0x02a329a3, 0x71c71c1f, 0xfff1c71c, 0xffffffff},/* 176.9 MHz */
119 {191700, 0x02dd31a3, 0xe38e383f, 0xffe38e38, 0xffffffff},/* 191.7 MHz */
120 {206400, 0x03153223, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 206.4 MHz */
121 {221200, 0x034fba23, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 221.2 MHz */
122 {235900, 0x03853a23, 0xe1e1e07f, 0xe1e1e1e1, 0xffffffe1},/* 235.9 MHz */
123 {250700, 0x03bf3aa3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 250.7 MHz */
124 {265400, 0x03f7c2a3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 265.4 MHz */
125 {280200, 0x0431c2a3, 0x878780ff, 0x87878787, 0xffffff87},/* 280.2 MHz */
126 { 0, 0, 0, 0, 0 } /* last entry */
127};
128
129static void sa1100_update_dram_timings(int current_speed, int new_speed)
130{
131 struct sa1100_dram_regs *settings = sa1100_dram_settings;
132
133 /* find speed */
134 while (settings->speed != 0) {
135 if (new_speed == settings->speed)
136 break;
137
138 settings++;
139 }
140
141 if (settings->speed == 0) {
142 panic("%s: couldn't find dram setting for speed %d\n",
143 __func__, new_speed);
144 }
145
146 /* No risk, no fun: run with interrupts on! */
147 if (new_speed > current_speed) {
148 /* We're going FASTER, so first relax the memory
149 * timings before changing the core frequency
150 */
151
152 /* Half the memory access clock */
153 MDCNFG |= MDCNFG_CDB2;
154
155 /* The order of these statements IS important, keep 8
156 * pulses!!
157 */
158 MDCAS2 = settings->mdcas2;
159 MDCAS1 = settings->mdcas1;
160 MDCAS0 = settings->mdcas0;
161 MDCNFG = settings->mdcnfg;
162 } else {
163 /* We're going SLOWER: first decrease the core
164 * frequency and then tighten the memory settings.
165 */
166
167 /* Half the memory access clock */
168 MDCNFG |= MDCNFG_CDB2;
169
170 /* The order of these statements IS important, keep 8
171 * pulses!!
172 */
173 MDCAS0 = settings->mdcas0;
174 MDCAS1 = settings->mdcas1;
175 MDCAS2 = settings->mdcas2;
176 MDCNFG = settings->mdcnfg;
177 }
178}
179
180static int sa1100_target(struct cpufreq_policy *policy,
181 unsigned int target_freq,
182 unsigned int relation)
183{
184 unsigned int cur = sa11x0_getspeed(0);
185 unsigned int new_ppcr;
186 struct cpufreq_freqs freqs;
187
188 new_ppcr = sa11x0_freq_to_ppcr(target_freq);
189 switch (relation) {
190 case CPUFREQ_RELATION_L:
191 if (sa11x0_ppcr_to_freq(new_ppcr) > policy->max)
192 new_ppcr--;
193 break;
194 case CPUFREQ_RELATION_H:
195 if ((sa11x0_ppcr_to_freq(new_ppcr) > target_freq) &&
196 (sa11x0_ppcr_to_freq(new_ppcr - 1) >= policy->min))
197 new_ppcr--;
198 break;
199 }
200
201 freqs.old = cur;
202 freqs.new = sa11x0_ppcr_to_freq(new_ppcr);
203
204 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
205
206 if (freqs.new > cur)
207 sa1100_update_dram_timings(cur, freqs.new);
208
209 PPCR = new_ppcr;
210
211 if (freqs.new < cur)
212 sa1100_update_dram_timings(cur, freqs.new);
213
214 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
215
216 return 0;
217}
218
219static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
220{
221 if (policy->cpu != 0)
222 return -EINVAL;
223 policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
224 policy->cpuinfo.min_freq = 59000;
225 policy->cpuinfo.max_freq = 287000;
226 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
227 return 0;
228}
229
230static struct cpufreq_driver sa1100_driver __refdata = {
231 .flags = CPUFREQ_STICKY,
232 .verify = sa11x0_verify_speed,
233 .target = sa1100_target,
234 .get = sa11x0_getspeed,
235 .init = sa1100_cpu_init,
236 .name = "sa1100",
237};
238
239static int __init sa1100_dram_init(void)
240{
241 if (cpu_is_sa1100())
242 return cpufreq_register_driver(&sa1100_driver);
243 else
244 return -ENODEV;
245}
246
247arch_initcall(sa1100_dram_init);
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
new file mode 100644
index 000000000000..39c90b6f4286
--- /dev/null
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -0,0 +1,406 @@
1/*
2 * linux/arch/arm/mach-sa1100/cpu-sa1110.c
3 *
4 * Copyright (C) 2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Note: there are two erratas that apply to the SA1110 here:
11 * 7 - SDRAM auto-power-up failure (rev A0)
12 * 13 - Corruption of internal register reads/writes following
13 * SDRAM reads (rev A0, B0, B1)
14 *
15 * We ignore rev. A0 and B0 devices; I don't think they're worth supporting.
16 *
17 * The SDRAM type can be passed on the command line as cpu_sa1110.sdram=type
18 */
19#include <linux/cpufreq.h>
20#include <linux/delay.h>
21#include <linux/init.h>
22#include <linux/io.h>
23#include <linux/kernel.h>
24#include <linux/moduleparam.h>
25#include <linux/types.h>
26
27#include <asm/cputype.h>
28#include <asm/mach-types.h>
29
30#include <mach/generic.h>
31#include <mach/hardware.h>
32
33#undef DEBUG
34
35struct sdram_params {
36 const char name[20];
37 u_char rows; /* bits */
38 u_char cas_latency; /* cycles */
39 u_char tck; /* clock cycle time (ns) */
40 u_char trcd; /* activate to r/w (ns) */
41 u_char trp; /* precharge to activate (ns) */
42 u_char twr; /* write recovery time (ns) */
43 u_short refresh; /* refresh time for array (us) */
44};
45
46struct sdram_info {
47 u_int mdcnfg;
48 u_int mdrefr;
49 u_int mdcas[3];
50};
51
52static struct sdram_params sdram_tbl[] __initdata = {
53 { /* Toshiba TC59SM716 CL2 */
54 .name = "TC59SM716-CL2",
55 .rows = 12,
56 .tck = 10,
57 .trcd = 20,
58 .trp = 20,
59 .twr = 10,
60 .refresh = 64000,
61 .cas_latency = 2,
62 }, { /* Toshiba TC59SM716 CL3 */
63 .name = "TC59SM716-CL3",
64 .rows = 12,
65 .tck = 8,
66 .trcd = 20,
67 .trp = 20,
68 .twr = 8,
69 .refresh = 64000,
70 .cas_latency = 3,
71 }, { /* Samsung K4S641632D TC75 */
72 .name = "K4S641632D",
73 .rows = 14,
74 .tck = 9,
75 .trcd = 27,
76 .trp = 20,
77 .twr = 9,
78 .refresh = 64000,
79 .cas_latency = 3,
80 }, { /* Samsung K4S281632B-1H */
81 .name = "K4S281632B-1H",
82 .rows = 12,
83 .tck = 10,
84 .trp = 20,
85 .twr = 10,
86 .refresh = 64000,
87 .cas_latency = 3,
88 }, { /* Samsung KM416S4030CT */
89 .name = "KM416S4030CT",
90 .rows = 13,
91 .tck = 8,
92 .trcd = 24, /* 3 CLKs */
93 .trp = 24, /* 3 CLKs */
94 .twr = 16, /* Trdl: 2 CLKs */
95 .refresh = 64000,
96 .cas_latency = 3,
97 }, { /* Winbond W982516AH75L CL3 */
98 .name = "W982516AH75L",
99 .rows = 16,
100 .tck = 8,
101 .trcd = 20,
102 .trp = 20,
103 .twr = 8,
104 .refresh = 64000,
105 .cas_latency = 3,
106 }, { /* Micron MT48LC8M16A2TG-75 */
107 .name = "MT48LC8M16A2TG-75",
108 .rows = 12,
109 .tck = 8,
110 .trcd = 20,
111 .trp = 20,
112 .twr = 8,
113 .refresh = 64000,
114 .cas_latency = 3,
115 },
116};
117
118static struct sdram_params sdram_params;
119
120/*
121 * Given a period in ns and frequency in khz, calculate the number of
122 * cycles of frequency in period. Note that we round up to the next
123 * cycle, even if we are only slightly over.
124 */
125static inline u_int ns_to_cycles(u_int ns, u_int khz)
126{
127 return (ns * khz + 999999) / 1000000;
128}
129
130/*
131 * Create the MDCAS register bit pattern.
132 */
133static inline void set_mdcas(u_int *mdcas, int delayed, u_int rcd)
134{
135 u_int shift;
136
137 rcd = 2 * rcd - 1;
138 shift = delayed + 1 + rcd;
139
140 mdcas[0] = (1 << rcd) - 1;
141 mdcas[0] |= 0x55555555 << shift;
142 mdcas[1] = mdcas[2] = 0x55555555 << (shift & 1);
143}
144
145static void
146sdram_calculate_timing(struct sdram_info *sd, u_int cpu_khz,
147 struct sdram_params *sdram)
148{
149 u_int mem_khz, sd_khz, trp, twr;
150
151 mem_khz = cpu_khz / 2;
152 sd_khz = mem_khz;
153
154 /*
155 * If SDCLK would invalidate the SDRAM timings,
156 * run SDCLK at half speed.
157 *
158 * CPU steppings prior to B2 must either run the memory at
159 * half speed or use delayed read latching (errata 13).
160 */
161 if ((ns_to_cycles(sdram->tck, sd_khz) > 1) ||
162 (CPU_REVISION < CPU_SA1110_B2 && sd_khz < 62000))
163 sd_khz /= 2;
164
165 sd->mdcnfg = MDCNFG & 0x007f007f;
166
167 twr = ns_to_cycles(sdram->twr, mem_khz);
168
169 /* trp should always be >1 */
170 trp = ns_to_cycles(sdram->trp, mem_khz) - 1;
171 if (trp < 1)
172 trp = 1;
173
174 sd->mdcnfg |= trp << 8;
175 sd->mdcnfg |= trp << 24;
176 sd->mdcnfg |= sdram->cas_latency << 12;
177 sd->mdcnfg |= sdram->cas_latency << 28;
178 sd->mdcnfg |= twr << 14;
179 sd->mdcnfg |= twr << 30;
180
181 sd->mdrefr = MDREFR & 0xffbffff0;
182 sd->mdrefr |= 7;
183
184 if (sd_khz != mem_khz)
185 sd->mdrefr |= MDREFR_K1DB2;
186
187 /* initial number of '1's in MDCAS + 1 */
188 set_mdcas(sd->mdcas, sd_khz >= 62000,
189 ns_to_cycles(sdram->trcd, mem_khz));
190
191#ifdef DEBUG
192 printk(KERN_DEBUG "MDCNFG: %08x MDREFR: %08x MDCAS0: %08x MDCAS1: %08x MDCAS2: %08x\n",
193 sd->mdcnfg, sd->mdrefr, sd->mdcas[0], sd->mdcas[1],
194 sd->mdcas[2]);
195#endif
196}
197
198/*
199 * Set the SDRAM refresh rate.
200 */
201static inline void sdram_set_refresh(u_int dri)
202{
203 MDREFR = (MDREFR & 0xffff000f) | (dri << 4);
204 (void) MDREFR;
205}
206
207/*
208 * Update the refresh period. We do this such that we always refresh
209 * the SDRAMs within their permissible period. The refresh period is
210 * always a multiple of the memory clock (fixed at cpu_clock / 2).
211 *
212 * FIXME: we don't currently take account of burst accesses here,
213 * but neither do Intels DM nor Angel.
214 */
215static void
216sdram_update_refresh(u_int cpu_khz, struct sdram_params *sdram)
217{
218 u_int ns_row = (sdram->refresh * 1000) >> sdram->rows;
219 u_int dri = ns_to_cycles(ns_row, cpu_khz / 2) / 32;
220
221#ifdef DEBUG
222 mdelay(250);
223 printk(KERN_DEBUG "new dri value = %d\n", dri);
224#endif
225
226 sdram_set_refresh(dri);
227}
228
229/*
230 * Ok, set the CPU frequency.
231 */
232static int sa1110_target(struct cpufreq_policy *policy,
233 unsigned int target_freq,
234 unsigned int relation)
235{
236 struct sdram_params *sdram = &sdram_params;
237 struct cpufreq_freqs freqs;
238 struct sdram_info sd;
239 unsigned long flags;
240 unsigned int ppcr, unused;
241
242 switch (relation) {
243 case CPUFREQ_RELATION_L:
244 ppcr = sa11x0_freq_to_ppcr(target_freq);
245 if (sa11x0_ppcr_to_freq(ppcr) > policy->max)
246 ppcr--;
247 break;
248 case CPUFREQ_RELATION_H:
249 ppcr = sa11x0_freq_to_ppcr(target_freq);
250 if (ppcr && (sa11x0_ppcr_to_freq(ppcr) > target_freq) &&
251 (sa11x0_ppcr_to_freq(ppcr-1) >= policy->min))
252 ppcr--;
253 break;
254 default:
255 return -EINVAL;
256 }
257
258 freqs.old = sa11x0_getspeed(0);
259 freqs.new = sa11x0_ppcr_to_freq(ppcr);
260
261 sdram_calculate_timing(&sd, freqs.new, sdram);
262
263#if 0
264 /*
265 * These values are wrong according to the SA1110 documentation
266 * and errata, but they seem to work. Need to get a storage
267 * scope on to the SDRAM signals to work out why.
268 */
269 if (policy->max < 147500) {
270 sd.mdrefr |= MDREFR_K1DB2;
271 sd.mdcas[0] = 0xaaaaaa7f;
272 } else {
273 sd.mdrefr &= ~MDREFR_K1DB2;
274 sd.mdcas[0] = 0xaaaaaa9f;
275 }
276 sd.mdcas[1] = 0xaaaaaaaa;
277 sd.mdcas[2] = 0xaaaaaaaa;
278#endif
279
280 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
281
282 /*
283 * The clock could be going away for some time. Set the SDRAMs
284 * to refresh rapidly (every 64 memory clock cycles). To get
285 * through the whole array, we need to wait 262144 mclk cycles.
286 * We wait 20ms to be safe.
287 */
288 sdram_set_refresh(2);
289 if (!irqs_disabled())
290 msleep(20);
291 else
292 mdelay(20);
293
294 /*
295 * Reprogram the DRAM timings with interrupts disabled, and
296 * ensure that we are doing this within a complete cache line.
297 * This means that we won't access SDRAM for the duration of
298 * the programming.
299 */
300 local_irq_save(flags);
301 asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
302 udelay(10);
303 __asm__ __volatile__("\n\
304 b 2f \n\
305 .align 5 \n\
3061: str %3, [%1, #0] @ MDCNFG \n\
307 str %4, [%1, #28] @ MDREFR \n\
308 str %5, [%1, #4] @ MDCAS0 \n\
309 str %6, [%1, #8] @ MDCAS1 \n\
310 str %7, [%1, #12] @ MDCAS2 \n\
311 str %8, [%2, #0] @ PPCR \n\
312 ldr %0, [%1, #0] \n\
313 b 3f \n\
3142: b 1b \n\
3153: nop \n\
316 nop"
317 : "=&r" (unused)
318 : "r" (&MDCNFG), "r" (&PPCR), "0" (sd.mdcnfg),
319 "r" (sd.mdrefr), "r" (sd.mdcas[0]),
320 "r" (sd.mdcas[1]), "r" (sd.mdcas[2]), "r" (ppcr));
321 local_irq_restore(flags);
322
323 /*
324 * Now, return the SDRAM refresh back to normal.
325 */
326 sdram_update_refresh(freqs.new, sdram);
327
328 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
329
330 return 0;
331}
332
333static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
334{
335 if (policy->cpu != 0)
336 return -EINVAL;
337 policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
338 policy->cpuinfo.min_freq = 59000;
339 policy->cpuinfo.max_freq = 287000;
340 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
341 return 0;
342}
343
344/* sa1110_driver needs __refdata because it must remain after init registers
345 * it with cpufreq_register_driver() */
346static struct cpufreq_driver sa1110_driver __refdata = {
347 .flags = CPUFREQ_STICKY,
348 .verify = sa11x0_verify_speed,
349 .target = sa1110_target,
350 .get = sa11x0_getspeed,
351 .init = sa1110_cpu_init,
352 .name = "sa1110",
353};
354
355static struct sdram_params *sa1110_find_sdram(const char *name)
356{
357 struct sdram_params *sdram;
358
359 for (sdram = sdram_tbl; sdram < sdram_tbl + ARRAY_SIZE(sdram_tbl);
360 sdram++)
361 if (strcmp(name, sdram->name) == 0)
362 return sdram;
363
364 return NULL;
365}
366
367static char sdram_name[16];
368
369static int __init sa1110_clk_init(void)
370{
371 struct sdram_params *sdram;
372 const char *name = sdram_name;
373
374 if (!cpu_is_sa1110())
375 return -ENODEV;
376
377 if (!name[0]) {
378 if (machine_is_assabet())
379 name = "TC59SM716-CL3";
380 if (machine_is_pt_system3())
381 name = "K4S641632D";
382 if (machine_is_h3100())
383 name = "KM416S4030CT";
384 if (machine_is_jornada720())
385 name = "K4S281632B-1H";
386 if (machine_is_nanoengine())
387 name = "MT48LC8M16A2TG-75";
388 }
389
390 sdram = sa1110_find_sdram(name);
391 if (sdram) {
392 printk(KERN_DEBUG "SDRAM: tck: %d trcd: %d trp: %d"
393 " twr: %d refresh: %d cas_latency: %d\n",
394 sdram->tck, sdram->trcd, sdram->trp,
395 sdram->twr, sdram->refresh, sdram->cas_latency);
396
397 memcpy(&sdram_params, sdram, sizeof(sdram_params));
398
399 return cpufreq_register_driver(&sa1110_driver);
400 }
401
402 return 0;
403}
404
405module_param_string(sdram, sdram_name, sizeof(sdram_name), 0);
406arch_initcall(sa1110_clk_init);
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index e42e073cd9b8..f740b134d27b 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -53,7 +53,8 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
53 } 53 }
54} 54}
55 55
56static void sc520_freq_set_cpu_state(unsigned int state) 56static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy,
57 unsigned int state)
57{ 58{
58 59
59 struct cpufreq_freqs freqs; 60 struct cpufreq_freqs freqs;
@@ -61,9 +62,8 @@ static void sc520_freq_set_cpu_state(unsigned int state)
61 62
62 freqs.old = sc520_freq_get_cpu_frequency(0); 63 freqs.old = sc520_freq_get_cpu_frequency(0);
63 freqs.new = sc520_freq_table[state].frequency; 64 freqs.new = sc520_freq_table[state].frequency;
64 freqs.cpu = 0; /* AMD Elan is UP */
65 65
66 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 66 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
67 67
68 pr_debug("attempting to set frequency to %i kHz\n", 68 pr_debug("attempting to set frequency to %i kHz\n",
69 sc520_freq_table[state].frequency); 69 sc520_freq_table[state].frequency);
@@ -75,7 +75,7 @@ static void sc520_freq_set_cpu_state(unsigned int state)
75 75
76 local_irq_enable(); 76 local_irq_enable();
77 77
78 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 78 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
79}; 79};
80 80
81static int sc520_freq_verify(struct cpufreq_policy *policy) 81static int sc520_freq_verify(struct cpufreq_policy *policy)
@@ -93,7 +93,7 @@ static int sc520_freq_target(struct cpufreq_policy *policy,
93 target_freq, relation, &newstate)) 93 target_freq, relation, &newstate))
94 return -EINVAL; 94 return -EINVAL;
95 95
96 sc520_freq_set_cpu_state(newstate); 96 sc520_freq_set_cpu_state(policy, newstate);
97 97
98 return 0; 98 return 0;
99} 99}
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
new file mode 100644
index 000000000000..73adb64651e8
--- /dev/null
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -0,0 +1,189 @@
1/*
2 * cpufreq driver for the SuperH processors.
3 *
4 * Copyright (C) 2002 - 2012 Paul Mundt
5 * Copyright (C) 2002 M. R. Brown
6 *
7 * Clock framework bits from arch/avr32/mach-at32ap/cpufreq.c
8 *
9 * Copyright (C) 2004-2007 Atmel Corporation
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
13 * for more details.
14 */
15#define pr_fmt(fmt) "cpufreq: " fmt
16
17#include <linux/types.h>
18#include <linux/cpufreq.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/err.h>
23#include <linux/cpumask.h>
24#include <linux/cpu.h>
25#include <linux/smp.h>
26#include <linux/sched.h> /* set_cpus_allowed() */
27#include <linux/clk.h>
28#include <linux/percpu.h>
29#include <linux/sh_clk.h>
30
31static DEFINE_PER_CPU(struct clk, sh_cpuclk);
32
33static unsigned int sh_cpufreq_get(unsigned int cpu)
34{
35 return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
36}
37
38/*
39 * Here we notify other drivers of the proposed change and the final change.
40 */
41static int sh_cpufreq_target(struct cpufreq_policy *policy,
42 unsigned int target_freq,
43 unsigned int relation)
44{
45 unsigned int cpu = policy->cpu;
46 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
47 cpumask_t cpus_allowed;
48 struct cpufreq_freqs freqs;
49 struct device *dev;
50 long freq;
51
52 cpus_allowed = current->cpus_allowed;
53 set_cpus_allowed_ptr(current, cpumask_of(cpu));
54
55 BUG_ON(smp_processor_id() != cpu);
56
57 dev = get_cpu_device(cpu);
58
59 /* Convert target_freq from kHz to Hz */
60 freq = clk_round_rate(cpuclk, target_freq * 1000);
61
62 if (freq < (policy->min * 1000) || freq > (policy->max * 1000))
63 return -EINVAL;
64
65 dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000);
66
67 freqs.old = sh_cpufreq_get(cpu);
68 freqs.new = (freq + 500) / 1000;
69 freqs.flags = 0;
70
71 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
72 set_cpus_allowed_ptr(current, &cpus_allowed);
73 clk_set_rate(cpuclk, freq);
74 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
75
76 dev_dbg(dev, "set frequency %lu Hz\n", freq);
77
78 return 0;
79}
80
81static int sh_cpufreq_verify(struct cpufreq_policy *policy)
82{
83 struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
84 struct cpufreq_frequency_table *freq_table;
85
86 freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
87 if (freq_table)
88 return cpufreq_frequency_table_verify(policy, freq_table);
89
90 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
91 policy->cpuinfo.max_freq);
92
93 policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000;
94 policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
95
96 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
97 policy->cpuinfo.max_freq);
98
99 return 0;
100}
101
102static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
103{
104 unsigned int cpu = policy->cpu;
105 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
106 struct cpufreq_frequency_table *freq_table;
107 struct device *dev;
108
109 dev = get_cpu_device(cpu);
110
111 cpuclk = clk_get(dev, "cpu_clk");
112 if (IS_ERR(cpuclk)) {
113 dev_err(dev, "couldn't get CPU clk\n");
114 return PTR_ERR(cpuclk);
115 }
116
117 policy->cur = sh_cpufreq_get(cpu);
118
119 freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
120 if (freq_table) {
121 int result;
122
123 result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
124 if (!result)
125 cpufreq_frequency_table_get_attr(freq_table, cpu);
126 } else {
127 dev_notice(dev, "no frequency table found, falling back "
128 "to rate rounding.\n");
129
130 policy->min = policy->cpuinfo.min_freq =
131 (clk_round_rate(cpuclk, 1) + 500) / 1000;
132 policy->max = policy->cpuinfo.max_freq =
133 (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
134 }
135
136 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
137
138 dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, "
139 "Maximum %u.%03u MHz.\n",
140 policy->min / 1000, policy->min % 1000,
141 policy->max / 1000, policy->max % 1000);
142
143 return 0;
144}
145
146static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
147{
148 unsigned int cpu = policy->cpu;
149 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
150
151 cpufreq_frequency_table_put_attr(cpu);
152 clk_put(cpuclk);
153
154 return 0;
155}
156
157static struct freq_attr *sh_freq_attr[] = {
158 &cpufreq_freq_attr_scaling_available_freqs,
159 NULL,
160};
161
162static struct cpufreq_driver sh_cpufreq_driver = {
163 .owner = THIS_MODULE,
164 .name = "sh",
165 .get = sh_cpufreq_get,
166 .target = sh_cpufreq_target,
167 .verify = sh_cpufreq_verify,
168 .init = sh_cpufreq_cpu_init,
169 .exit = sh_cpufreq_cpu_exit,
170 .attr = sh_freq_attr,
171};
172
173static int __init sh_cpufreq_module_init(void)
174{
175 pr_notice("SuperH CPU frequency driver.\n");
176 return cpufreq_register_driver(&sh_cpufreq_driver);
177}
178
179static void __exit sh_cpufreq_module_exit(void)
180{
181 cpufreq_unregister_driver(&sh_cpufreq_driver);
182}
183
184module_init(sh_cpufreq_module_init);
185module_exit(sh_cpufreq_module_exit);
186
187MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
188MODULE_DESCRIPTION("cpufreq driver for SuperH");
189MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
new file mode 100644
index 000000000000..306ae462bba6
--- /dev/null
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -0,0 +1,408 @@
1/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support
2 *
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
4 *
5 * Many thanks to Dominik Brodowski for fixing up the cpufreq
6 * infrastructure in order to make this driver easier to implement.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/smp.h>
13#include <linux/cpufreq.h>
14#include <linux/threads.h>
15#include <linux/slab.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18
19#include <asm/asi.h>
20#include <asm/timer.h>
21
22static struct cpufreq_driver *cpufreq_us2e_driver;
23
24struct us2e_freq_percpu_info {
25 struct cpufreq_frequency_table table[6];
26};
27
28/* Indexed by cpu number. */
29static struct us2e_freq_percpu_info *us2e_freq_table;
30
31#define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL
32#define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL
33
34/* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled
35 * in the ESTAR mode control register.
36 */
37#define ESTAR_MODE_DIV_1 0x0000000000000000UL
38#define ESTAR_MODE_DIV_2 0x0000000000000001UL
39#define ESTAR_MODE_DIV_4 0x0000000000000003UL
40#define ESTAR_MODE_DIV_6 0x0000000000000002UL
41#define ESTAR_MODE_DIV_8 0x0000000000000004UL
42#define ESTAR_MODE_DIV_MASK 0x0000000000000007UL
43
44#define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL
45#define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL
46#define MCTRL0_REFR_COUNT_SHIFT 8
47#define MCTRL0_REFR_INTERVAL 7800
48#define MCTRL0_REFR_CLKS_P_CNT 64
49
50static unsigned long read_hbreg(unsigned long addr)
51{
52 unsigned long ret;
53
54 __asm__ __volatile__("ldxa [%1] %2, %0"
55 : "=&r" (ret)
56 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
57 return ret;
58}
59
60static void write_hbreg(unsigned long addr, unsigned long val)
61{
62 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
63 "membar #Sync"
64 : /* no outputs */
65 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
66 : "memory");
67 if (addr == HBIRD_ESTAR_MODE_ADDR) {
68 /* Need to wait 16 clock cycles for the PLL to lock. */
69 udelay(1);
70 }
71}
72
73static void self_refresh_ctl(int enable)
74{
75 unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
76
77 if (enable)
78 mctrl |= MCTRL0_SREFRESH_ENAB;
79 else
80 mctrl &= ~MCTRL0_SREFRESH_ENAB;
81 write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
82 (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR);
83}
84
85static void frob_mem_refresh(int cpu_slowing_down,
86 unsigned long clock_tick,
87 unsigned long old_divisor, unsigned long divisor)
88{
89 unsigned long old_refr_count, refr_count, mctrl;
90
91 refr_count = (clock_tick * MCTRL0_REFR_INTERVAL);
92 refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
93
94 mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
95 old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK)
96 >> MCTRL0_REFR_COUNT_SHIFT;
97
98 mctrl &= ~MCTRL0_REFR_COUNT_MASK;
99 mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT;
100 write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
101 mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
102
103 if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) {
104 unsigned long usecs;
105
106 /* We have to wait for both refresh counts (old
107 * and new) to go to zero.
108 */
109 usecs = (MCTRL0_REFR_CLKS_P_CNT *
110 (refr_count + old_refr_count) *
111 1000000UL *
112 old_divisor) / clock_tick;
113 udelay(usecs + 1UL);
114 }
115}
116
117static void us2e_transition(unsigned long estar, unsigned long new_bits,
118 unsigned long clock_tick,
119 unsigned long old_divisor, unsigned long divisor)
120{
121 unsigned long flags;
122
123 local_irq_save(flags);
124
125 estar &= ~ESTAR_MODE_DIV_MASK;
126
127 /* This is based upon the state transition diagram in the IIe manual. */
128 if (old_divisor == 2 && divisor == 1) {
129 self_refresh_ctl(0);
130 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
131 frob_mem_refresh(0, clock_tick, old_divisor, divisor);
132 } else if (old_divisor == 1 && divisor == 2) {
133 frob_mem_refresh(1, clock_tick, old_divisor, divisor);
134 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
135 self_refresh_ctl(1);
136 } else if (old_divisor == 1 && divisor > 2) {
137 us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
138 1, 2);
139 us2e_transition(estar, new_bits, clock_tick,
140 2, divisor);
141 } else if (old_divisor > 2 && divisor == 1) {
142 us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
143 old_divisor, 2);
144 us2e_transition(estar, new_bits, clock_tick,
145 2, divisor);
146 } else if (old_divisor < divisor) {
147 frob_mem_refresh(0, clock_tick, old_divisor, divisor);
148 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
149 } else if (old_divisor > divisor) {
150 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
151 frob_mem_refresh(1, clock_tick, old_divisor, divisor);
152 } else {
153 BUG();
154 }
155
156 local_irq_restore(flags);
157}
158
159static unsigned long index_to_estar_mode(unsigned int index)
160{
161 switch (index) {
162 case 0:
163 return ESTAR_MODE_DIV_1;
164
165 case 1:
166 return ESTAR_MODE_DIV_2;
167
168 case 2:
169 return ESTAR_MODE_DIV_4;
170
171 case 3:
172 return ESTAR_MODE_DIV_6;
173
174 case 4:
175 return ESTAR_MODE_DIV_8;
176
177 default:
178 BUG();
179 }
180}
181
182static unsigned long index_to_divisor(unsigned int index)
183{
184 switch (index) {
185 case 0:
186 return 1;
187
188 case 1:
189 return 2;
190
191 case 2:
192 return 4;
193
194 case 3:
195 return 6;
196
197 case 4:
198 return 8;
199
200 default:
201 BUG();
202 }
203}
204
205static unsigned long estar_to_divisor(unsigned long estar)
206{
207 unsigned long ret;
208
209 switch (estar & ESTAR_MODE_DIV_MASK) {
210 case ESTAR_MODE_DIV_1:
211 ret = 1;
212 break;
213 case ESTAR_MODE_DIV_2:
214 ret = 2;
215 break;
216 case ESTAR_MODE_DIV_4:
217 ret = 4;
218 break;
219 case ESTAR_MODE_DIV_6:
220 ret = 6;
221 break;
222 case ESTAR_MODE_DIV_8:
223 ret = 8;
224 break;
225 default:
226 BUG();
227 }
228
229 return ret;
230}
231
232static unsigned int us2e_freq_get(unsigned int cpu)
233{
234 cpumask_t cpus_allowed;
235 unsigned long clock_tick, estar;
236
237 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
238 set_cpus_allowed_ptr(current, cpumask_of(cpu));
239
240 clock_tick = sparc64_get_clock_tick(cpu) / 1000;
241 estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
242
243 set_cpus_allowed_ptr(current, &cpus_allowed);
244
245 return clock_tick / estar_to_divisor(estar);
246}
247
248static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy,
249 unsigned int index)
250{
251 unsigned int cpu = policy->cpu;
252 unsigned long new_bits, new_freq;
253 unsigned long clock_tick, divisor, old_divisor, estar;
254 cpumask_t cpus_allowed;
255 struct cpufreq_freqs freqs;
256
257 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
258 set_cpus_allowed_ptr(current, cpumask_of(cpu));
259
260 new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
261 new_bits = index_to_estar_mode(index);
262 divisor = index_to_divisor(index);
263 new_freq /= divisor;
264
265 estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
266
267 old_divisor = estar_to_divisor(estar);
268
269 freqs.old = clock_tick / old_divisor;
270 freqs.new = new_freq;
271 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
272
273 if (old_divisor != divisor)
274 us2e_transition(estar, new_bits, clock_tick * 1000,
275 old_divisor, divisor);
276
277 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
278
279 set_cpus_allowed_ptr(current, &cpus_allowed);
280}
281
282static int us2e_freq_target(struct cpufreq_policy *policy,
283 unsigned int target_freq,
284 unsigned int relation)
285{
286 unsigned int new_index = 0;
287
288 if (cpufreq_frequency_table_target(policy,
289 &us2e_freq_table[policy->cpu].table[0],
290 target_freq, relation, &new_index))
291 return -EINVAL;
292
293 us2e_set_cpu_divider_index(policy, new_index);
294
295 return 0;
296}
297
298static int us2e_freq_verify(struct cpufreq_policy *policy)
299{
300 return cpufreq_frequency_table_verify(policy,
301 &us2e_freq_table[policy->cpu].table[0]);
302}
303
304static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
305{
306 unsigned int cpu = policy->cpu;
307 unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
308 struct cpufreq_frequency_table *table =
309 &us2e_freq_table[cpu].table[0];
310
311 table[0].index = 0;
312 table[0].frequency = clock_tick / 1;
313 table[1].index = 1;
314 table[1].frequency = clock_tick / 2;
315 table[2].index = 2;
316 table[2].frequency = clock_tick / 4;
317 table[2].index = 3;
318 table[2].frequency = clock_tick / 6;
319 table[2].index = 4;
320 table[2].frequency = clock_tick / 8;
321 table[2].index = 5;
322 table[3].frequency = CPUFREQ_TABLE_END;
323
324 policy->cpuinfo.transition_latency = 0;
325 policy->cur = clock_tick;
326
327 return cpufreq_frequency_table_cpuinfo(policy, table);
328}
329
330static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
331{
332 if (cpufreq_us2e_driver)
333 us2e_set_cpu_divider_index(policy, 0);
334
335 return 0;
336}
337
338static int __init us2e_freq_init(void)
339{
340 unsigned long manuf, impl, ver;
341 int ret;
342
343 if (tlb_type != spitfire)
344 return -ENODEV;
345
346 __asm__("rdpr %%ver, %0" : "=r" (ver));
347 manuf = ((ver >> 48) & 0xffff);
348 impl = ((ver >> 32) & 0xffff);
349
350 if (manuf == 0x17 && impl == 0x13) {
351 struct cpufreq_driver *driver;
352
353 ret = -ENOMEM;
354 driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
355 if (!driver)
356 goto err_out;
357
358 us2e_freq_table = kzalloc(
359 (NR_CPUS * sizeof(struct us2e_freq_percpu_info)),
360 GFP_KERNEL);
361 if (!us2e_freq_table)
362 goto err_out;
363
364 driver->init = us2e_freq_cpu_init;
365 driver->verify = us2e_freq_verify;
366 driver->target = us2e_freq_target;
367 driver->get = us2e_freq_get;
368 driver->exit = us2e_freq_cpu_exit;
369 driver->owner = THIS_MODULE,
370 strcpy(driver->name, "UltraSPARC-IIe");
371
372 cpufreq_us2e_driver = driver;
373 ret = cpufreq_register_driver(driver);
374 if (ret)
375 goto err_out;
376
377 return 0;
378
379err_out:
380 if (driver) {
381 kfree(driver);
382 cpufreq_us2e_driver = NULL;
383 }
384 kfree(us2e_freq_table);
385 us2e_freq_table = NULL;
386 return ret;
387 }
388
389 return -ENODEV;
390}
391
392static void __exit us2e_freq_exit(void)
393{
394 if (cpufreq_us2e_driver) {
395 cpufreq_unregister_driver(cpufreq_us2e_driver);
396 kfree(cpufreq_us2e_driver);
397 cpufreq_us2e_driver = NULL;
398 kfree(us2e_freq_table);
399 us2e_freq_table = NULL;
400 }
401}
402
403MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
404MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe");
405MODULE_LICENSE("GPL");
406
407module_init(us2e_freq_init);
408module_exit(us2e_freq_exit);
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
new file mode 100644
index 000000000000..c71ee142347a
--- /dev/null
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -0,0 +1,269 @@
1/* us3_cpufreq.c: UltraSPARC-III cpu frequency support
2 *
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
4 *
5 * Many thanks to Dominik Brodowski for fixing up the cpufreq
6 * infrastructure in order to make this driver easier to implement.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/smp.h>
13#include <linux/cpufreq.h>
14#include <linux/threads.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17
18#include <asm/head.h>
19#include <asm/timer.h>
20
21static struct cpufreq_driver *cpufreq_us3_driver;
22
23struct us3_freq_percpu_info {
24 struct cpufreq_frequency_table table[4];
25};
26
27/* Indexed by cpu number. */
28static struct us3_freq_percpu_info *us3_freq_table;
29
30/* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
31 * in the Safari config register.
32 */
33#define SAFARI_CFG_DIV_1 0x0000000000000000UL
34#define SAFARI_CFG_DIV_2 0x0000000040000000UL
35#define SAFARI_CFG_DIV_32 0x0000000080000000UL
36#define SAFARI_CFG_DIV_MASK 0x00000000C0000000UL
37
38static unsigned long read_safari_cfg(void)
39{
40 unsigned long ret;
41
42 __asm__ __volatile__("ldxa [%%g0] %1, %0"
43 : "=&r" (ret)
44 : "i" (ASI_SAFARI_CONFIG));
45 return ret;
46}
47
48static void write_safari_cfg(unsigned long val)
49{
50 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
51 "membar #Sync"
52 : /* no outputs */
53 : "r" (val), "i" (ASI_SAFARI_CONFIG)
54 : "memory");
55}
56
57static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg)
58{
59 unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
60 unsigned long ret;
61
62 switch (safari_cfg & SAFARI_CFG_DIV_MASK) {
63 case SAFARI_CFG_DIV_1:
64 ret = clock_tick / 1;
65 break;
66 case SAFARI_CFG_DIV_2:
67 ret = clock_tick / 2;
68 break;
69 case SAFARI_CFG_DIV_32:
70 ret = clock_tick / 32;
71 break;
72 default:
73 BUG();
74 }
75
76 return ret;
77}
78
79static unsigned int us3_freq_get(unsigned int cpu)
80{
81 cpumask_t cpus_allowed;
82 unsigned long reg;
83 unsigned int ret;
84
85 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
86 set_cpus_allowed_ptr(current, cpumask_of(cpu));
87
88 reg = read_safari_cfg();
89 ret = get_current_freq(cpu, reg);
90
91 set_cpus_allowed_ptr(current, &cpus_allowed);
92
93 return ret;
94}
95
96static void us3_set_cpu_divider_index(struct cpufreq_policy *policy,
97 unsigned int index)
98{
99 unsigned int cpu = policy->cpu;
100 unsigned long new_bits, new_freq, reg;
101 cpumask_t cpus_allowed;
102 struct cpufreq_freqs freqs;
103
104 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
105 set_cpus_allowed_ptr(current, cpumask_of(cpu));
106
107 new_freq = sparc64_get_clock_tick(cpu) / 1000;
108 switch (index) {
109 case 0:
110 new_bits = SAFARI_CFG_DIV_1;
111 new_freq /= 1;
112 break;
113 case 1:
114 new_bits = SAFARI_CFG_DIV_2;
115 new_freq /= 2;
116 break;
117 case 2:
118 new_bits = SAFARI_CFG_DIV_32;
119 new_freq /= 32;
120 break;
121
122 default:
123 BUG();
124 }
125
126 reg = read_safari_cfg();
127
128 freqs.old = get_current_freq(cpu, reg);
129 freqs.new = new_freq;
130 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
131
132 reg &= ~SAFARI_CFG_DIV_MASK;
133 reg |= new_bits;
134 write_safari_cfg(reg);
135
136 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
137
138 set_cpus_allowed_ptr(current, &cpus_allowed);
139}
140
141static int us3_freq_target(struct cpufreq_policy *policy,
142 unsigned int target_freq,
143 unsigned int relation)
144{
145 unsigned int new_index = 0;
146
147 if (cpufreq_frequency_table_target(policy,
148 &us3_freq_table[policy->cpu].table[0],
149 target_freq,
150 relation,
151 &new_index))
152 return -EINVAL;
153
154 us3_set_cpu_divider_index(policy, new_index);
155
156 return 0;
157}
158
159static int us3_freq_verify(struct cpufreq_policy *policy)
160{
161 return cpufreq_frequency_table_verify(policy,
162 &us3_freq_table[policy->cpu].table[0]);
163}
164
165static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
166{
167 unsigned int cpu = policy->cpu;
168 unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
169 struct cpufreq_frequency_table *table =
170 &us3_freq_table[cpu].table[0];
171
172 table[0].index = 0;
173 table[0].frequency = clock_tick / 1;
174 table[1].index = 1;
175 table[1].frequency = clock_tick / 2;
176 table[2].index = 2;
177 table[2].frequency = clock_tick / 32;
178 table[3].index = 0;
179 table[3].frequency = CPUFREQ_TABLE_END;
180
181 policy->cpuinfo.transition_latency = 0;
182 policy->cur = clock_tick;
183
184 return cpufreq_frequency_table_cpuinfo(policy, table);
185}
186
187static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
188{
189 if (cpufreq_us3_driver)
190 us3_set_cpu_divider_index(policy, 0);
191
192 return 0;
193}
194
195static int __init us3_freq_init(void)
196{
197 unsigned long manuf, impl, ver;
198 int ret;
199
200 if (tlb_type != cheetah && tlb_type != cheetah_plus)
201 return -ENODEV;
202
203 __asm__("rdpr %%ver, %0" : "=r" (ver));
204 manuf = ((ver >> 48) & 0xffff);
205 impl = ((ver >> 32) & 0xffff);
206
207 if (manuf == CHEETAH_MANUF &&
208 (impl == CHEETAH_IMPL ||
209 impl == CHEETAH_PLUS_IMPL ||
210 impl == JAGUAR_IMPL ||
211 impl == PANTHER_IMPL)) {
212 struct cpufreq_driver *driver;
213
214 ret = -ENOMEM;
215 driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
216 if (!driver)
217 goto err_out;
218
219 us3_freq_table = kzalloc(
220 (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
221 GFP_KERNEL);
222 if (!us3_freq_table)
223 goto err_out;
224
225 driver->init = us3_freq_cpu_init;
226 driver->verify = us3_freq_verify;
227 driver->target = us3_freq_target;
228 driver->get = us3_freq_get;
229 driver->exit = us3_freq_cpu_exit;
230 driver->owner = THIS_MODULE,
231 strcpy(driver->name, "UltraSPARC-III");
232
233 cpufreq_us3_driver = driver;
234 ret = cpufreq_register_driver(driver);
235 if (ret)
236 goto err_out;
237
238 return 0;
239
240err_out:
241 if (driver) {
242 kfree(driver);
243 cpufreq_us3_driver = NULL;
244 }
245 kfree(us3_freq_table);
246 us3_freq_table = NULL;
247 return ret;
248 }
249
250 return -ENODEV;
251}
252
253static void __exit us3_freq_exit(void)
254{
255 if (cpufreq_us3_driver) {
256 cpufreq_unregister_driver(cpufreq_us3_driver);
257 kfree(cpufreq_us3_driver);
258 cpufreq_us3_driver = NULL;
259 kfree(us3_freq_table);
260 us3_freq_table = NULL;
261 }
262}
263
264MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
265MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III");
266MODULE_LICENSE("GPL");
267
268module_init(us3_freq_init);
269module_exit(us3_freq_exit);
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 7e4d77327957..156829f4576d 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -121,7 +121,6 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
121 target_freq, relation, &index)) 121 target_freq, relation, &index))
122 return -EINVAL; 122 return -EINVAL;
123 123
124 freqs.cpu = policy->cpu;
125 freqs.old = spear_cpufreq_get(0); 124 freqs.old = spear_cpufreq_get(0);
126 125
127 newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000; 126 newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000;
@@ -158,8 +157,7 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
158 freqs.new = newfreq / 1000; 157 freqs.new = newfreq / 1000;
159 freqs.new /= mult; 158 freqs.new /= mult;
160 159
161 for_each_cpu(freqs.cpu, policy->cpus) 160 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
162 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
163 161
164 if (mult == 2) 162 if (mult == 2)
165 ret = spear1340_set_cpu_rate(srcclk, newfreq); 163 ret = spear1340_set_cpu_rate(srcclk, newfreq);
@@ -172,8 +170,7 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
172 freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000; 170 freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000;
173 } 171 }
174 172
175 for_each_cpu(freqs.cpu, policy->cpus) 173 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
176 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
177 return ret; 174 return ret;
178} 175}
179 176
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 3a953d519f46..618e6f417b1c 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -457,7 +457,7 @@ static int centrino_target (struct cpufreq_policy *policy,
457 unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; 457 unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
458 struct cpufreq_freqs freqs; 458 struct cpufreq_freqs freqs;
459 int retval = 0; 459 int retval = 0;
460 unsigned int j, k, first_cpu, tmp; 460 unsigned int j, first_cpu, tmp;
461 cpumask_var_t covered_cpus; 461 cpumask_var_t covered_cpus;
462 462
463 if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) 463 if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)))
@@ -481,10 +481,6 @@ static int centrino_target (struct cpufreq_policy *policy,
481 for_each_cpu(j, policy->cpus) { 481 for_each_cpu(j, policy->cpus) {
482 int good_cpu; 482 int good_cpu;
483 483
484 /* cpufreq holds the hotplug lock, so we are safe here */
485 if (!cpu_online(j))
486 continue;
487
488 /* 484 /*
489 * Support for SMP systems. 485 * Support for SMP systems.
490 * Make sure we are running on CPU that wants to change freq 486 * Make sure we are running on CPU that wants to change freq
@@ -522,13 +518,8 @@ static int centrino_target (struct cpufreq_policy *policy,
522 pr_debug("target=%dkHz old=%d new=%d msr=%04x\n", 518 pr_debug("target=%dkHz old=%d new=%d msr=%04x\n",
523 target_freq, freqs.old, freqs.new, msr); 519 target_freq, freqs.old, freqs.new, msr);
524 520
525 for_each_cpu(k, policy->cpus) { 521 cpufreq_notify_transition(policy, &freqs,
526 if (!cpu_online(k))
527 continue;
528 freqs.cpu = k;
529 cpufreq_notify_transition(&freqs,
530 CPUFREQ_PRECHANGE); 522 CPUFREQ_PRECHANGE);
531 }
532 523
533 first_cpu = 0; 524 first_cpu = 0;
534 /* all but 16 LSB are reserved, treat them with care */ 525 /* all but 16 LSB are reserved, treat them with care */
@@ -544,12 +535,7 @@ static int centrino_target (struct cpufreq_policy *policy,
544 cpumask_set_cpu(j, covered_cpus); 535 cpumask_set_cpu(j, covered_cpus);
545 } 536 }
546 537
547 for_each_cpu(k, policy->cpus) { 538 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
548 if (!cpu_online(k))
549 continue;
550 freqs.cpu = k;
551 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
552 }
553 539
554 if (unlikely(retval)) { 540 if (unlikely(retval)) {
555 /* 541 /*
@@ -565,12 +551,8 @@ static int centrino_target (struct cpufreq_policy *policy,
565 tmp = freqs.new; 551 tmp = freqs.new;
566 freqs.new = freqs.old; 552 freqs.new = freqs.old;
567 freqs.old = tmp; 553 freqs.old = tmp;
568 for_each_cpu(j, policy->cpus) { 554 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
569 if (!cpu_online(j)) 555 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
570 continue;
571 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
572 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
573 }
574 } 556 }
575 retval = 0; 557 retval = 0;
576 558
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index e29b59aa68a8..e2e5aa971452 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -263,7 +263,6 @@ static int speedstep_target(struct cpufreq_policy *policy,
263{ 263{
264 unsigned int newstate = 0, policy_cpu; 264 unsigned int newstate = 0, policy_cpu;
265 struct cpufreq_freqs freqs; 265 struct cpufreq_freqs freqs;
266 int i;
267 266
268 if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], 267 if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
269 target_freq, relation, &newstate)) 268 target_freq, relation, &newstate))
@@ -272,7 +271,6 @@ static int speedstep_target(struct cpufreq_policy *policy,
272 policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); 271 policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
273 freqs.old = speedstep_get(policy_cpu); 272 freqs.old = speedstep_get(policy_cpu);
274 freqs.new = speedstep_freqs[newstate].frequency; 273 freqs.new = speedstep_freqs[newstate].frequency;
275 freqs.cpu = policy->cpu;
276 274
277 pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new); 275 pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new);
278 276
@@ -280,18 +278,12 @@ static int speedstep_target(struct cpufreq_policy *policy,
280 if (freqs.old == freqs.new) 278 if (freqs.old == freqs.new)
281 return 0; 279 return 0;
282 280
283 for_each_cpu(i, policy->cpus) { 281 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
284 freqs.cpu = i;
285 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
286 }
287 282
288 smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate, 283 smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate,
289 true); 284 true);
290 285
291 for_each_cpu(i, policy->cpus) { 286 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
292 freqs.cpu = i;
293 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
294 }
295 287
296 return 0; 288 return 0;
297} 289}
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 6a457fcaaad5..f5a6b70ee6c0 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -252,14 +252,13 @@ static int speedstep_target(struct cpufreq_policy *policy,
252 252
253 freqs.old = speedstep_freqs[speedstep_get_state()].frequency; 253 freqs.old = speedstep_freqs[speedstep_get_state()].frequency;
254 freqs.new = speedstep_freqs[newstate].frequency; 254 freqs.new = speedstep_freqs[newstate].frequency;
255 freqs.cpu = 0; /* speedstep.c is UP only driver */
256 255
257 if (freqs.old == freqs.new) 256 if (freqs.old == freqs.new)
258 return 0; 257 return 0;
259 258
260 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 259 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
261 speedstep_set_state(newstate); 260 speedstep_set_state(newstate);
262 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 261 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
263 262
264 return 0; 263 return 0;
265} 264}
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
new file mode 100644
index 000000000000..c74c0e130ef4
--- /dev/null
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -0,0 +1,292 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 *
4 * Author:
5 * Colin Cross <ccross@google.com>
6 * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/sched.h>
23#include <linux/cpufreq.h>
24#include <linux/delay.h>
25#include <linux/init.h>
26#include <linux/err.h>
27#include <linux/clk.h>
28#include <linux/io.h>
29#include <linux/suspend.h>
30
31/* Frequency table index must be sequential starting at 0 */
32static struct cpufreq_frequency_table freq_table[] = {
33 { 0, 216000 },
34 { 1, 312000 },
35 { 2, 456000 },
36 { 3, 608000 },
37 { 4, 760000 },
38 { 5, 816000 },
39 { 6, 912000 },
40 { 7, 1000000 },
41 { 8, CPUFREQ_TABLE_END },
42};
43
44#define NUM_CPUS 2
45
46static struct clk *cpu_clk;
47static struct clk *pll_x_clk;
48static struct clk *pll_p_clk;
49static struct clk *emc_clk;
50
51static unsigned long target_cpu_speed[NUM_CPUS];
52static DEFINE_MUTEX(tegra_cpu_lock);
53static bool is_suspended;
54
55static int tegra_verify_speed(struct cpufreq_policy *policy)
56{
57 return cpufreq_frequency_table_verify(policy, freq_table);
58}
59
60static unsigned int tegra_getspeed(unsigned int cpu)
61{
62 unsigned long rate;
63
64 if (cpu >= NUM_CPUS)
65 return 0;
66
67 rate = clk_get_rate(cpu_clk) / 1000;
68 return rate;
69}
70
71static int tegra_cpu_clk_set_rate(unsigned long rate)
72{
73 int ret;
74
75 /*
76 * Take an extra reference to the main pll so it doesn't turn
77 * off when we move the cpu off of it
78 */
79 clk_prepare_enable(pll_x_clk);
80
81 ret = clk_set_parent(cpu_clk, pll_p_clk);
82 if (ret) {
83 pr_err("Failed to switch cpu to clock pll_p\n");
84 goto out;
85 }
86
87 if (rate == clk_get_rate(pll_p_clk))
88 goto out;
89
90 ret = clk_set_rate(pll_x_clk, rate);
91 if (ret) {
92 pr_err("Failed to change pll_x to %lu\n", rate);
93 goto out;
94 }
95
96 ret = clk_set_parent(cpu_clk, pll_x_clk);
97 if (ret) {
98 pr_err("Failed to switch cpu to clock pll_x\n");
99 goto out;
100 }
101
102out:
103 clk_disable_unprepare(pll_x_clk);
104 return ret;
105}
106
107static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
108 unsigned long rate)
109{
110 int ret = 0;
111 struct cpufreq_freqs freqs;
112
113 freqs.old = tegra_getspeed(0);
114 freqs.new = rate;
115
116 if (freqs.old == freqs.new)
117 return ret;
118
119 /*
120 * Vote on memory bus frequency based on cpu frequency
121 * This sets the minimum frequency, display or avp may request higher
122 */
123 if (rate >= 816000)
124 clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */
125 else if (rate >= 456000)
126 clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */
127 else
128 clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */
129
130 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
131
132#ifdef CONFIG_CPU_FREQ_DEBUG
133 printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n",
134 freqs.old, freqs.new);
135#endif
136
137 ret = tegra_cpu_clk_set_rate(freqs.new * 1000);
138 if (ret) {
139 pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
140 freqs.new);
141 return ret;
142 }
143
144 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
145
146 return 0;
147}
148
149static unsigned long tegra_cpu_highest_speed(void)
150{
151 unsigned long rate = 0;
152 int i;
153
154 for_each_online_cpu(i)
155 rate = max(rate, target_cpu_speed[i]);
156 return rate;
157}
158
159static int tegra_target(struct cpufreq_policy *policy,
160 unsigned int target_freq,
161 unsigned int relation)
162{
163 unsigned int idx;
164 unsigned int freq;
165 int ret = 0;
166
167 mutex_lock(&tegra_cpu_lock);
168
169 if (is_suspended) {
170 ret = -EBUSY;
171 goto out;
172 }
173
174 cpufreq_frequency_table_target(policy, freq_table, target_freq,
175 relation, &idx);
176
177 freq = freq_table[idx].frequency;
178
179 target_cpu_speed[policy->cpu] = freq;
180
181 ret = tegra_update_cpu_speed(policy, tegra_cpu_highest_speed());
182
183out:
184 mutex_unlock(&tegra_cpu_lock);
185 return ret;
186}
187
188static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
189 void *dummy)
190{
191 mutex_lock(&tegra_cpu_lock);
192 if (event == PM_SUSPEND_PREPARE) {
193 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
194 is_suspended = true;
195 pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
196 freq_table[0].frequency);
197 tegra_update_cpu_speed(policy, freq_table[0].frequency);
198 cpufreq_cpu_put(policy);
199 } else if (event == PM_POST_SUSPEND) {
200 is_suspended = false;
201 }
202 mutex_unlock(&tegra_cpu_lock);
203
204 return NOTIFY_OK;
205}
206
207static struct notifier_block tegra_cpu_pm_notifier = {
208 .notifier_call = tegra_pm_notify,
209};
210
211static int tegra_cpu_init(struct cpufreq_policy *policy)
212{
213 if (policy->cpu >= NUM_CPUS)
214 return -EINVAL;
215
216 clk_prepare_enable(emc_clk);
217 clk_prepare_enable(cpu_clk);
218
219 cpufreq_frequency_table_cpuinfo(policy, freq_table);
220 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
221 policy->cur = tegra_getspeed(policy->cpu);
222 target_cpu_speed[policy->cpu] = policy->cur;
223
224 /* FIXME: what's the actual transition time? */
225 policy->cpuinfo.transition_latency = 300 * 1000;
226
227 cpumask_copy(policy->cpus, cpu_possible_mask);
228
229 if (policy->cpu == 0)
230 register_pm_notifier(&tegra_cpu_pm_notifier);
231
232 return 0;
233}
234
235static int tegra_cpu_exit(struct cpufreq_policy *policy)
236{
237 cpufreq_frequency_table_cpuinfo(policy, freq_table);
238 clk_disable_unprepare(emc_clk);
239 return 0;
240}
241
242static struct freq_attr *tegra_cpufreq_attr[] = {
243 &cpufreq_freq_attr_scaling_available_freqs,
244 NULL,
245};
246
247static struct cpufreq_driver tegra_cpufreq_driver = {
248 .verify = tegra_verify_speed,
249 .target = tegra_target,
250 .get = tegra_getspeed,
251 .init = tegra_cpu_init,
252 .exit = tegra_cpu_exit,
253 .name = "tegra",
254 .attr = tegra_cpufreq_attr,
255};
256
257static int __init tegra_cpufreq_init(void)
258{
259 cpu_clk = clk_get_sys(NULL, "cpu");
260 if (IS_ERR(cpu_clk))
261 return PTR_ERR(cpu_clk);
262
263 pll_x_clk = clk_get_sys(NULL, "pll_x");
264 if (IS_ERR(pll_x_clk))
265 return PTR_ERR(pll_x_clk);
266
267 pll_p_clk = clk_get_sys(NULL, "pll_p_cclk");
268 if (IS_ERR(pll_p_clk))
269 return PTR_ERR(pll_p_clk);
270
271 emc_clk = clk_get_sys("cpu", "emc");
272 if (IS_ERR(emc_clk)) {
273 clk_put(cpu_clk);
274 return PTR_ERR(emc_clk);
275 }
276
277 return cpufreq_register_driver(&tegra_cpufreq_driver);
278}
279
280static void __exit tegra_cpufreq_exit(void)
281{
282 cpufreq_unregister_driver(&tegra_cpufreq_driver);
283 clk_put(emc_clk);
284 clk_put(cpu_clk);
285}
286
287
288MODULE_AUTHOR("Colin Cross <ccross@android.com>");
289MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2");
290MODULE_LICENSE("GPL");
291module_init(tegra_cpufreq_init);
292module_exit(tegra_cpufreq_exit);
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
new file mode 100644
index 000000000000..12fc904d7dab
--- /dev/null
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -0,0 +1,92 @@
1/*
2 * clock scaling for the UniCore-II
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
7 * Copyright (C) 2001-2010 Guan Xuetao
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/init.h>
17#include <linux/clk.h>
18#include <linux/cpufreq.h>
19
20#include <mach/hardware.h>
21
22static struct cpufreq_driver ucv2_driver;
23
24/* make sure that only the "userspace" governor is run
25 * -- anything else wouldn't make sense on this platform, anyway.
26 */
27int ucv2_verify_speed(struct cpufreq_policy *policy)
28{
29 if (policy->cpu)
30 return -EINVAL;
31
32 cpufreq_verify_within_limits(policy,
33 policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
34
35 return 0;
36}
37
38static unsigned int ucv2_getspeed(unsigned int cpu)
39{
40 struct clk *mclk = clk_get(NULL, "MAIN_CLK");
41
42 if (cpu)
43 return 0;
44 return clk_get_rate(mclk)/1000;
45}
46
47static int ucv2_target(struct cpufreq_policy *policy,
48 unsigned int target_freq,
49 unsigned int relation)
50{
51 unsigned int cur = ucv2_getspeed(0);
52 struct cpufreq_freqs freqs;
53 struct clk *mclk = clk_get(NULL, "MAIN_CLK");
54
55 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
56
57 if (!clk_set_rate(mclk, target_freq * 1000)) {
58 freqs.old = cur;
59 freqs.new = target_freq;
60 }
61
62 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
63
64 return 0;
65}
66
67static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
68{
69 if (policy->cpu != 0)
70 return -EINVAL;
71 policy->cur = ucv2_getspeed(0);
72 policy->min = policy->cpuinfo.min_freq = 250000;
73 policy->max = policy->cpuinfo.max_freq = 1000000;
74 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
75 return 0;
76}
77
78static struct cpufreq_driver ucv2_driver = {
79 .flags = CPUFREQ_STICKY,
80 .verify = ucv2_verify_speed,
81 .target = ucv2_target,
82 .get = ucv2_getspeed,
83 .init = ucv2_cpu_init,
84 .name = "UniCore-II",
85};
86
87static int __init ucv2_cpufreq_init(void)
88{
89 return cpufreq_register_driver(&ucv2_driver);
90}
91
92arch_initcall(ucv2_cpufreq_init);
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 071e2c3eec4f..c4cc27e5c8a5 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -39,10 +39,4 @@ config CPU_IDLE_CALXEDA
39 help 39 help
40 Select this to enable cpuidle on Calxeda processors. 40 Select this to enable cpuidle on Calxeda processors.
41 41
42config CPU_IDLE_KIRKWOOD
43 bool "CPU Idle Driver for Kirkwood processors"
44 depends on ARCH_KIRKWOOD
45 help
46 Select this to enable cpuidle on Kirkwood processors.
47
48endif 42endif
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 24c6e7d945ed..0d8bd55e776f 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -6,4 +6,4 @@ obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
6obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o 6obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
7 7
8obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o 8obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o
9obj-$(CONFIG_CPU_IDLE_KIRKWOOD) += cpuidle-kirkwood.o 9obj-$(CONFIG_ARCH_KIRKWOOD) += cpuidle-kirkwood.o
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index e1aab38c5a8d..223379169cb0 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright 2012 Calxeda, Inc. 2 * Copyright 2012 Calxeda, Inc.
3 * 3 *
4 * Based on arch/arm/plat-mxc/cpuidle.c: 4 * Based on arch/arm/plat-mxc/cpuidle.c: #v3.7
5 * Copyright 2012 Freescale Semiconductor, Inc. 5 * Copyright 2012 Freescale Semiconductor, Inc.
6 * Copyright 2012 Linaro Ltd. 6 * Copyright 2012 Linaro Ltd.
7 * 7 *
@@ -16,6 +16,8 @@
16 * 16 *
17 * You should have received a copy of the GNU General Public License along with 17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>. 18 * this program. If not, see <http://www.gnu.org/licenses/>.
19 *
20 * Maintainer: Rob Herring <rob.herring@calxeda.com>
19 */ 21 */
20 22
21#include <linux/cpuidle.h> 23#include <linux/cpuidle.h>
@@ -35,8 +37,6 @@
35extern void highbank_set_cpu_jump(int cpu, void *jump_addr); 37extern void highbank_set_cpu_jump(int cpu, void *jump_addr);
36extern void *scu_base_addr; 38extern void *scu_base_addr;
37 39
38static struct cpuidle_device __percpu *calxeda_idle_cpuidle_devices;
39
40static inline unsigned int get_auxcr(void) 40static inline unsigned int get_auxcr(void)
41{ 41{
42 unsigned int val; 42 unsigned int val;
@@ -85,22 +85,8 @@ static int calxeda_pwrdown_idle(struct cpuidle_device *dev,
85 return index; 85 return index;
86} 86}
87 87
88static void calxeda_idle_cpuidle_devices_uninit(void)
89{
90 int i;
91 struct cpuidle_device *dev;
92
93 for_each_possible_cpu(i) {
94 dev = per_cpu_ptr(calxeda_idle_cpuidle_devices, i);
95 cpuidle_unregister_device(dev);
96 }
97
98 free_percpu(calxeda_idle_cpuidle_devices);
99}
100
101static struct cpuidle_driver calxeda_idle_driver = { 88static struct cpuidle_driver calxeda_idle_driver = {
102 .name = "calxeda_idle", 89 .name = "calxeda_idle",
103 .en_core_tk_irqen = 1,
104 .states = { 90 .states = {
105 ARM_CPUIDLE_WFI_STATE, 91 ARM_CPUIDLE_WFI_STATE,
106 { 92 {
@@ -118,44 +104,9 @@ static struct cpuidle_driver calxeda_idle_driver = {
118 104
119static int __init calxeda_cpuidle_init(void) 105static int __init calxeda_cpuidle_init(void)
120{ 106{
121 int cpu_id;
122 int ret;
123 struct cpuidle_device *dev;
124 struct cpuidle_driver *drv = &calxeda_idle_driver;
125
126 if (!of_machine_is_compatible("calxeda,highbank")) 107 if (!of_machine_is_compatible("calxeda,highbank"))
127 return -ENODEV; 108 return -ENODEV;
128 109
129 ret = cpuidle_register_driver(drv); 110 return cpuidle_register(&calxeda_idle_driver, NULL);
130 if (ret)
131 return ret;
132
133 calxeda_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
134 if (calxeda_idle_cpuidle_devices == NULL) {
135 ret = -ENOMEM;
136 goto unregister_drv;
137 }
138
139 /* initialize state data for each cpuidle_device */
140 for_each_possible_cpu(cpu_id) {
141 dev = per_cpu_ptr(calxeda_idle_cpuidle_devices, cpu_id);
142 dev->cpu = cpu_id;
143 dev->state_count = drv->state_count;
144
145 ret = cpuidle_register_device(dev);
146 if (ret) {
147 pr_err("Failed to register cpu %u, error: %d\n",
148 cpu_id, ret);
149 goto uninit;
150 }
151 }
152
153 return 0;
154
155uninit:
156 calxeda_idle_cpuidle_devices_uninit();
157unregister_drv:
158 cpuidle_unregister_driver(drv);
159 return ret;
160} 111}
161module_init(calxeda_cpuidle_init); 112module_init(calxeda_cpuidle_init);
diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c
index 670aa1e55cd6..521b0a7fdd89 100644
--- a/drivers/cpuidle/cpuidle-kirkwood.c
+++ b/drivers/cpuidle/cpuidle-kirkwood.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * arch/arm/mach-kirkwood/cpuidle.c
3 *
4 * CPU idle Marvell Kirkwood SoCs 2 * CPU idle Marvell Kirkwood SoCs
5 * 3 *
6 * This file is licensed under the terms of the GNU General Public 4 * This file is licensed under the terms of the GNU General Public
@@ -11,6 +9,9 @@
11 * to implement two idle states - 9 * to implement two idle states -
12 * #1 wait-for-interrupt 10 * #1 wait-for-interrupt
13 * #2 wait-for-interrupt and DDR self refresh 11 * #2 wait-for-interrupt and DDR self refresh
12 *
13 * Maintainer: Jason Cooper <jason@lakedaemon.net>
14 * Maintainer: Andrew Lunn <andrew@lunn.ch>
14 */ 15 */
15 16
16#include <linux/kernel.h> 17#include <linux/kernel.h>
@@ -41,7 +42,6 @@ static int kirkwood_enter_idle(struct cpuidle_device *dev,
41static struct cpuidle_driver kirkwood_idle_driver = { 42static struct cpuidle_driver kirkwood_idle_driver = {
42 .name = "kirkwood_idle", 43 .name = "kirkwood_idle",
43 .owner = THIS_MODULE, 44 .owner = THIS_MODULE,
44 .en_core_tk_irqen = 1,
45 .states[0] = ARM_CPUIDLE_WFI_STATE, 45 .states[0] = ARM_CPUIDLE_WFI_STATE,
46 .states[1] = { 46 .states[1] = {
47 .enter = kirkwood_enter_idle, 47 .enter = kirkwood_enter_idle,
@@ -53,9 +53,6 @@ static struct cpuidle_driver kirkwood_idle_driver = {
53 }, 53 },
54 .state_count = KIRKWOOD_MAX_STATES, 54 .state_count = KIRKWOOD_MAX_STATES,
55}; 55};
56static struct cpuidle_device *device;
57
58static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
59 56
60/* Initialize CPU idle by registering the idle states */ 57/* Initialize CPU idle by registering the idle states */
61static int kirkwood_cpuidle_probe(struct platform_device *pdev) 58static int kirkwood_cpuidle_probe(struct platform_device *pdev)
@@ -66,26 +63,16 @@ static int kirkwood_cpuidle_probe(struct platform_device *pdev)
66 if (res == NULL) 63 if (res == NULL)
67 return -EINVAL; 64 return -EINVAL;
68 65
69 ddr_operation_base = devm_request_and_ioremap(&pdev->dev, res); 66 ddr_operation_base = devm_ioremap_resource(&pdev->dev, res);
70 if (!ddr_operation_base) 67 if (IS_ERR(ddr_operation_base))
71 return -EADDRNOTAVAIL; 68 return PTR_ERR(ddr_operation_base);
72 69
73 device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id()); 70 return cpuidle_register(&kirkwood_idle_driver, NULL);
74 device->state_count = KIRKWOOD_MAX_STATES;
75
76 cpuidle_register_driver(&kirkwood_idle_driver);
77 if (cpuidle_register_device(device)) {
78 pr_err("kirkwood_init_cpuidle: Failed registering\n");
79 return -EIO;
80 }
81 return 0;
82} 71}
83 72
84int kirkwood_cpuidle_remove(struct platform_device *pdev) 73int kirkwood_cpuidle_remove(struct platform_device *pdev)
85{ 74{
86 cpuidle_unregister_device(device); 75 cpuidle_unregister(&kirkwood_idle_driver);
87 cpuidle_unregister_driver(&kirkwood_idle_driver);
88
89 return 0; 76 return 0;
90} 77}
91 78
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index eba69290e074..c3a93fece819 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -8,6 +8,7 @@
8 * This code is licenced under the GPL. 8 * This code is licenced under the GPL.
9 */ 9 */
10 10
11#include <linux/clockchips.h>
11#include <linux/kernel.h> 12#include <linux/kernel.h>
12#include <linux/mutex.h> 13#include <linux/mutex.h>
13#include <linux/sched.h> 14#include <linux/sched.h>
@@ -23,6 +24,7 @@
23#include "cpuidle.h" 24#include "cpuidle.h"
24 25
25DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 26DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
27DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev);
26 28
27DEFINE_MUTEX(cpuidle_lock); 29DEFINE_MUTEX(cpuidle_lock);
28LIST_HEAD(cpuidle_detected_devices); 30LIST_HEAD(cpuidle_detected_devices);
@@ -42,24 +44,6 @@ void disable_cpuidle(void)
42 44
43static int __cpuidle_register_device(struct cpuidle_device *dev); 45static int __cpuidle_register_device(struct cpuidle_device *dev);
44 46
45static inline int cpuidle_enter(struct cpuidle_device *dev,
46 struct cpuidle_driver *drv, int index)
47{
48 struct cpuidle_state *target_state = &drv->states[index];
49 return target_state->enter(dev, drv, index);
50}
51
52static inline int cpuidle_enter_tk(struct cpuidle_device *dev,
53 struct cpuidle_driver *drv, int index)
54{
55 return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter);
56}
57
58typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev,
59 struct cpuidle_driver *drv, int index);
60
61static cpuidle_enter_t cpuidle_enter_ops;
62
63/** 47/**
64 * cpuidle_play_dead - cpu off-lining 48 * cpuidle_play_dead - cpu off-lining
65 * 49 *
@@ -89,11 +73,27 @@ int cpuidle_play_dead(void)
89 * @next_state: index into drv->states of the state to enter 73 * @next_state: index into drv->states of the state to enter
90 */ 74 */
91int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, 75int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
92 int next_state) 76 int index)
93{ 77{
94 int entered_state; 78 int entered_state;
95 79
96 entered_state = cpuidle_enter_ops(dev, drv, next_state); 80 struct cpuidle_state *target_state = &drv->states[index];
81 ktime_t time_start, time_end;
82 s64 diff;
83
84 time_start = ktime_get();
85
86 entered_state = target_state->enter(dev, drv, index);
87
88 time_end = ktime_get();
89
90 local_irq_enable();
91
92 diff = ktime_to_us(ktime_sub(time_end, time_start));
93 if (diff > INT_MAX)
94 diff = INT_MAX;
95
96 dev->last_residency = (int) diff;
97 97
98 if (entered_state >= 0) { 98 if (entered_state >= 0) {
99 /* Update cpuidle counters */ 99 /* Update cpuidle counters */
@@ -146,12 +146,20 @@ int cpuidle_idle_call(void)
146 146
147 trace_cpu_idle_rcuidle(next_state, dev->cpu); 147 trace_cpu_idle_rcuidle(next_state, dev->cpu);
148 148
149 if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
150 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
151 &dev->cpu);
152
149 if (cpuidle_state_is_coupled(dev, drv, next_state)) 153 if (cpuidle_state_is_coupled(dev, drv, next_state))
150 entered_state = cpuidle_enter_state_coupled(dev, drv, 154 entered_state = cpuidle_enter_state_coupled(dev, drv,
151 next_state); 155 next_state);
152 else 156 else
153 entered_state = cpuidle_enter_state(dev, drv, next_state); 157 entered_state = cpuidle_enter_state(dev, drv, next_state);
154 158
159 if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
160 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
161 &dev->cpu);
162
155 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 163 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
156 164
157 /* give the governor an opportunity to reflect on the outcome */ 165 /* give the governor an opportunity to reflect on the outcome */
@@ -222,37 +230,6 @@ void cpuidle_resume(void)
222 mutex_unlock(&cpuidle_lock); 230 mutex_unlock(&cpuidle_lock);
223} 231}
224 232
225/**
226 * cpuidle_wrap_enter - performs timekeeping and irqen around enter function
227 * @dev: pointer to a valid cpuidle_device object
228 * @drv: pointer to a valid cpuidle_driver object
229 * @index: index of the target cpuidle state.
230 */
231int cpuidle_wrap_enter(struct cpuidle_device *dev,
232 struct cpuidle_driver *drv, int index,
233 int (*enter)(struct cpuidle_device *dev,
234 struct cpuidle_driver *drv, int index))
235{
236 ktime_t time_start, time_end;
237 s64 diff;
238
239 time_start = ktime_get();
240
241 index = enter(dev, drv, index);
242
243 time_end = ktime_get();
244
245 local_irq_enable();
246
247 diff = ktime_to_us(ktime_sub(time_end, time_start));
248 if (diff > INT_MAX)
249 diff = INT_MAX;
250
251 dev->last_residency = (int) diff;
252
253 return index;
254}
255
256#ifdef CONFIG_ARCH_HAS_CPU_RELAX 233#ifdef CONFIG_ARCH_HAS_CPU_RELAX
257static int poll_idle(struct cpuidle_device *dev, 234static int poll_idle(struct cpuidle_device *dev,
258 struct cpuidle_driver *drv, int index) 235 struct cpuidle_driver *drv, int index)
@@ -324,9 +301,6 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
324 return ret; 301 return ret;
325 } 302 }
326 303
327 cpuidle_enter_ops = drv->en_core_tk_irqen ?
328 cpuidle_enter_tk : cpuidle_enter;
329
330 poll_idle_init(drv); 304 poll_idle_init(drv);
331 305
332 ret = cpuidle_add_device_sysfs(dev); 306 ret = cpuidle_add_device_sysfs(dev);
@@ -480,6 +454,77 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
480 454
481EXPORT_SYMBOL_GPL(cpuidle_unregister_device); 455EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
482 456
457/**
458 * cpuidle_unregister: unregister a driver and the devices. This function
459 * can be used only if the driver has been previously registered through
460 * the cpuidle_register function.
461 *
462 * @drv: a valid pointer to a struct cpuidle_driver
463 */
464void cpuidle_unregister(struct cpuidle_driver *drv)
465{
466 int cpu;
467 struct cpuidle_device *device;
468
469 for_each_possible_cpu(cpu) {
470 device = &per_cpu(cpuidle_dev, cpu);
471 cpuidle_unregister_device(device);
472 }
473
474 cpuidle_unregister_driver(drv);
475}
476EXPORT_SYMBOL_GPL(cpuidle_unregister);
477
478/**
479 * cpuidle_register: registers the driver and the cpu devices with the
480 * coupled_cpus passed as parameter. This function is used for all common
481 * initialization pattern there are in the arch specific drivers. The
482 * devices is globally defined in this file.
483 *
484 * @drv : a valid pointer to a struct cpuidle_driver
485 * @coupled_cpus: a cpumask for the coupled states
486 *
487 * Returns 0 on success, < 0 otherwise
488 */
489int cpuidle_register(struct cpuidle_driver *drv,
490 const struct cpumask *const coupled_cpus)
491{
492 int ret, cpu;
493 struct cpuidle_device *device;
494
495 ret = cpuidle_register_driver(drv);
496 if (ret) {
497 pr_err("failed to register cpuidle driver\n");
498 return ret;
499 }
500
501 for_each_possible_cpu(cpu) {
502 device = &per_cpu(cpuidle_dev, cpu);
503 device->cpu = cpu;
504
505#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
506 /*
507 * On multiplatform for ARM, the coupled idle states could
508 * enabled in the kernel even if the cpuidle driver does not
509 * use it. Note, coupled_cpus is a struct copy.
510 */
511 if (coupled_cpus)
512 device->coupled_cpus = *coupled_cpus;
513#endif
514 ret = cpuidle_register_device(device);
515 if (!ret)
516 continue;
517
518 pr_err("Failed to register cpuidle device for cpu%d\n", cpu);
519
520 cpuidle_unregister(drv);
521 break;
522 }
523
524 return ret;
525}
526EXPORT_SYMBOL_GPL(cpuidle_register);
527
483#ifdef CONFIG_SMP 528#ifdef CONFIG_SMP
484 529
485static void smp_callback(void *v) 530static void smp_callback(void *v)
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 422c7b69ba7c..8dfaaae94444 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -11,6 +11,8 @@
11#include <linux/mutex.h> 11#include <linux/mutex.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/cpuidle.h> 13#include <linux/cpuidle.h>
14#include <linux/cpumask.h>
15#include <linux/clockchips.h>
14 16
15#include "cpuidle.h" 17#include "cpuidle.h"
16 18
@@ -19,9 +21,28 @@ DEFINE_SPINLOCK(cpuidle_driver_lock);
19static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu); 21static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu);
20static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu); 22static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu);
21 23
22static void __cpuidle_driver_init(struct cpuidle_driver *drv) 24static void cpuidle_setup_broadcast_timer(void *arg)
23{ 25{
26 int cpu = smp_processor_id();
27 clockevents_notify((long)(arg), &cpu);
28}
29
30static void __cpuidle_driver_init(struct cpuidle_driver *drv, int cpu)
31{
32 int i;
33
24 drv->refcnt = 0; 34 drv->refcnt = 0;
35
36 for (i = drv->state_count - 1; i >= 0 ; i--) {
37
38 if (!(drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP))
39 continue;
40
41 drv->bctimer = 1;
42 on_each_cpu_mask(get_cpu_mask(cpu), cpuidle_setup_broadcast_timer,
43 (void *)CLOCK_EVT_NOTIFY_BROADCAST_ON, 1);
44 break;
45 }
25} 46}
26 47
27static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu) 48static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu)
@@ -35,7 +56,7 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu)
35 if (__cpuidle_get_cpu_driver(cpu)) 56 if (__cpuidle_get_cpu_driver(cpu))
36 return -EBUSY; 57 return -EBUSY;
37 58
38 __cpuidle_driver_init(drv); 59 __cpuidle_driver_init(drv, cpu);
39 60
40 __cpuidle_set_cpu_driver(drv, cpu); 61 __cpuidle_set_cpu_driver(drv, cpu);
41 62
@@ -49,6 +70,12 @@ static void __cpuidle_unregister_driver(struct cpuidle_driver *drv, int cpu)
49 70
50 if (!WARN_ON(drv->refcnt > 0)) 71 if (!WARN_ON(drv->refcnt > 0))
51 __cpuidle_set_cpu_driver(NULL, cpu); 72 __cpuidle_set_cpu_driver(NULL, cpu);
73
74 if (drv->bctimer) {
75 drv->bctimer = 0;
76 on_each_cpu_mask(get_cpu_mask(cpu), cpuidle_setup_broadcast_timer,
77 (void *)CLOCK_EVT_NOTIFY_BROADCAST_OFF, 1);
78 }
52} 79}
53 80
54#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS 81#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 4d338740f2cb..a8117e614009 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -350,11 +350,11 @@ static void intel_didl_outputs(struct drm_device *dev)
350 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) 350 if (!handle || acpi_bus_get_device(handle, &acpi_dev))
351 return; 351 return;
352 352
353 if (acpi_is_video_device(acpi_dev)) 353 if (acpi_is_video_device(handle))
354 acpi_video_bus = acpi_dev; 354 acpi_video_bus = acpi_dev;
355 else { 355 else {
356 list_for_each_entry(acpi_cdev, &acpi_dev->children, node) { 356 list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
357 if (acpi_is_video_device(acpi_cdev)) { 357 if (acpi_is_video_device(acpi_cdev->handle)) {
358 acpi_video_bus = acpi_cdev; 358 acpi_video_bus = acpi_cdev;
359 break; 359 break;
360 } 360 }
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 1a38dd7dfe4e..0e8fab1913df 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -71,7 +71,6 @@
71static struct cpuidle_driver intel_idle_driver = { 71static struct cpuidle_driver intel_idle_driver = {
72 .name = "intel_idle", 72 .name = "intel_idle",
73 .owner = THIS_MODULE, 73 .owner = THIS_MODULE,
74 .en_core_tk_irqen = 1,
75}; 74};
76/* intel_idle.max_cstate=0 disables driver */ 75/* intel_idle.max_cstate=0 disables driver */
77static int max_cstate = CPUIDLE_STATE_MAX - 1; 76static int max_cstate = CPUIDLE_STATE_MAX - 1;
@@ -339,7 +338,6 @@ static int intel_idle(struct cpuidle_device *dev,
339 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 338 if (!(lapic_timer_reliable_states & (1 << (cstate))))
340 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); 339 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
341 340
342 stop_critical_timings();
343 if (!need_resched()) { 341 if (!need_resched()) {
344 342
345 __monitor((void *)&current_thread_info()->flags, 0, 0); 343 __monitor((void *)&current_thread_info()->flags, 0, 0);
@@ -348,8 +346,6 @@ static int intel_idle(struct cpuidle_device *dev,
348 __mwait(eax, ecx); 346 __mwait(eax, ecx);
349 } 347 }
350 348
351 start_critical_timings();
352
353 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 349 if (!(lapic_timer_reliable_states & (1 << (cstate))))
354 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); 350 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
355 351
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 14d4dced1def..d544e3aaf761 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4121,7 +4121,7 @@ static int sony_pic_enable(struct acpi_device *device,
4121 resource->res3.data.irq.sharable = ACPI_SHARED; 4121 resource->res3.data.irq.sharable = ACPI_SHARED;
4122 4122
4123 resource->res4.type = ACPI_RESOURCE_TYPE_END_TAG; 4123 resource->res4.type = ACPI_RESOURCE_TYPE_END_TAG;
4124 4124 resource->res4.length = sizeof(struct acpi_resource);
4125 } 4125 }
4126 /* setup Type 2/3 resources */ 4126 /* setup Type 2/3 resources */
4127 else { 4127 else {
@@ -4140,6 +4140,7 @@ static int sony_pic_enable(struct acpi_device *device,
4140 resource->res2.data.irq.sharable = ACPI_SHARED; 4140 resource->res2.data.irq.sharable = ACPI_SHARED;
4141 4141
4142 resource->res3.type = ACPI_RESOURCE_TYPE_END_TAG; 4142 resource->res3.type = ACPI_RESOURCE_TYPE_END_TAG;
4143 resource->res3.length = sizeof(struct acpi_resource);
4143 } 4144 }
4144 4145
4145 /* Attempt to set the resource */ 4146 /* Attempt to set the resource */
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index 918d5f044865..cf88f9b62445 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -379,10 +379,6 @@ static int __init isapnp_read_tag(unsigned char *type, unsigned short *size)
379 *type = (tag >> 3) & 0x0f; 379 *type = (tag >> 3) & 0x0f;
380 *size = tag & 0x07; 380 *size = tag & 0x07;
381 } 381 }
382#if 0
383 printk(KERN_DEBUG "tag = 0x%x, type = 0x%x, size = %i\n", tag, *type,
384 *size);
385#endif
386 if (*type == 0xff && *size == 0xffff) /* probably invalid data */ 382 if (*type == 0xff && *size == 0xffff) /* probably invalid data */
387 return -1; 383 return -1;
388 return 0; 384 return 0;
@@ -813,13 +809,6 @@ static int __init isapnp_build_device_list(void)
813 if (!card) 809 if (!card)
814 continue; 810 continue;
815 811
816#if 0
817 dev_info(&card->dev,
818 "vendor: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
819 header[0], header[1], header[2], header[3], header[4],
820 header[5], header[6], header[7], header[8]);
821 dev_info(&card->dev, "checksum = %#x\n", checksum);
822#endif
823 INIT_LIST_HEAD(&card->devices); 812 INIT_LIST_HEAD(&card->devices);
824 card->serial = 813 card->serial =
825 (header[7] << 24) | (header[6] << 16) | (header[5] << 8) | 814 (header[7] << 24) | (header[6] << 16) | (header[5] << 8) |
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index b8f4ea7b27fc..9847ab163829 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -634,6 +634,7 @@ int pnpacpi_build_resource_template(struct pnp_dev *dev,
634 } 634 }
635 /* resource will pointer the end resource now */ 635 /* resource will pointer the end resource now */
636 resource->type = ACPI_RESOURCE_TYPE_END_TAG; 636 resource->type = ACPI_RESOURCE_TYPE_END_TAG;
637 resource->length = sizeof(struct acpi_resource);
637 638
638 return 0; 639 return 0;
639} 640}
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index 63ddb0173456..1c03ee822e50 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -185,10 +185,9 @@ static int pnp_devices_proc_show(struct seq_file *m, void *v)
185 185
186 if (pnp_bios_get_dev_node(&nodenum, PNPMODE_DYNAMIC, node)) 186 if (pnp_bios_get_dev_node(&nodenum, PNPMODE_DYNAMIC, node))
187 break; 187 break;
188 seq_printf(m, "%02x\t%08x\t%02x:%02x:%02x\t%04x\n", 188 seq_printf(m, "%02x\t%08x\t%3phC\t%04x\n",
189 node->handle, node->eisa_id, 189 node->handle, node->eisa_id,
190 node->type_code[0], node->type_code[1], 190 node->type_code, node->flags);
191 node->type_code[2], node->flags);
192 if (nodenum <= thisnodenum) { 191 if (nodenum <= thisnodenum) {
193 printk(KERN_ERR 192 printk(KERN_ERR
194 "%s Node number 0x%x is out of sequence following node 0x%x. Aborting.\n", 193 "%s Node number 0x%x is out of sequence following node 0x%x. Aborting.\n",