aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-11-07 13:13:52 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-07 13:13:52 -0500
commit3c00303206c3a1ccd86579efdc90bc35f140962e (patch)
tree66170c84b5ddaeb102aea3530517a26657b6ea29 /arch
parent83dbb15e9cd78a3619e3db36777e2f81d09b2914 (diff)
parentefb90582c575084723cc14302c1300cb26c7e01f (diff)
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux: cpuidle: Single/Global registration of idle states cpuidle: Split cpuidle_state structure and move per-cpu statistics fields cpuidle: Remove CPUIDLE_FLAG_IGNORE and dev->prepare() cpuidle: Move dev->last_residency update to driver enter routine; remove dev->last_state ACPI: Fix CONFIG_ACPI_DOCK=n compiler warning ACPI: Export FADT pm_profile integer value to userspace thermal: Prevent polling from happening during system suspend ACPI: Drop ACPI_NO_HARDWARE_INIT ACPI atomicio: Convert width in bits to bytes in __acpi_ioremap_fast() PNPACPI: Simplify disabled resource registration ACPI: Fix possible recursive locking in hwregs.c ACPI: use kstrdup() mrst pmu: update comment tools/power turbostat: less verbose debugging
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mach-at91/cpuidle.c41
-rw-r--r--arch/arm/mach-davinci/cpuidle.c51
-rw-r--r--arch/arm/mach-exynos/cpuidle.c30
-rw-r--r--arch/arm/mach-kirkwood/cpuidle.c42
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c133
-rw-r--r--arch/sh/kernel/cpu/shmobile/cpuidle.c28
-rw-r--r--arch/x86/platform/mrst/pmu.c2
7 files changed, 196 insertions, 131 deletions
diff --git a/arch/arm/mach-at91/cpuidle.c b/arch/arm/mach-at91/cpuidle.c
index f474272c0ea..a851e6c9842 100644
--- a/arch/arm/mach-at91/cpuidle.c
+++ b/arch/arm/mach-at91/cpuidle.c
@@ -34,7 +34,8 @@ static struct cpuidle_driver at91_idle_driver = {
34 34
35/* Actual code that puts the SoC in different idle states */ 35/* Actual code that puts the SoC in different idle states */
36static int at91_enter_idle(struct cpuidle_device *dev, 36static int at91_enter_idle(struct cpuidle_device *dev,
37 struct cpuidle_state *state) 37 struct cpuidle_driver *drv,
38 int index)
38{ 39{
39 struct timeval before, after; 40 struct timeval before, after;
40 int idle_time; 41 int idle_time;
@@ -42,10 +43,10 @@ static int at91_enter_idle(struct cpuidle_device *dev,
42 43
43 local_irq_disable(); 44 local_irq_disable();
44 do_gettimeofday(&before); 45 do_gettimeofday(&before);
45 if (state == &dev->states[0]) 46 if (index == 0)
46 /* Wait for interrupt state */ 47 /* Wait for interrupt state */
47 cpu_do_idle(); 48 cpu_do_idle();
48 else if (state == &dev->states[1]) { 49 else if (index == 1) {
49 asm("b 1f; .align 5; 1:"); 50 asm("b 1f; .align 5; 1:");
50 asm("mcr p15, 0, r0, c7, c10, 4"); /* drain write buffer */ 51 asm("mcr p15, 0, r0, c7, c10, 4"); /* drain write buffer */
51 saved_lpr = sdram_selfrefresh_enable(); 52 saved_lpr = sdram_selfrefresh_enable();
@@ -56,34 +57,38 @@ static int at91_enter_idle(struct cpuidle_device *dev,
56 local_irq_enable(); 57 local_irq_enable();
57 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + 58 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
58 (after.tv_usec - before.tv_usec); 59 (after.tv_usec - before.tv_usec);
59 return idle_time; 60
61 dev->last_residency = idle_time;
62 return index;
60} 63}
61 64
62/* Initialize CPU idle by registering the idle states */ 65/* Initialize CPU idle by registering the idle states */
63static int at91_init_cpuidle(void) 66static int at91_init_cpuidle(void)
64{ 67{
65 struct cpuidle_device *device; 68 struct cpuidle_device *device;
66 69 struct cpuidle_driver *driver = &at91_idle_driver;
67 cpuidle_register_driver(&at91_idle_driver);
68 70
69 device = &per_cpu(at91_cpuidle_device, smp_processor_id()); 71 device = &per_cpu(at91_cpuidle_device, smp_processor_id());
70 device->state_count = AT91_MAX_STATES; 72 device->state_count = AT91_MAX_STATES;
73 driver->state_count = AT91_MAX_STATES;
71 74
72 /* Wait for interrupt state */ 75 /* Wait for interrupt state */
73 device->states[0].enter = at91_enter_idle; 76 driver->states[0].enter = at91_enter_idle;
74 device->states[0].exit_latency = 1; 77 driver->states[0].exit_latency = 1;
75 device->states[0].target_residency = 10000; 78 driver->states[0].target_residency = 10000;
76 device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; 79 driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
77 strcpy(device->states[0].name, "WFI"); 80 strcpy(driver->states[0].name, "WFI");
78 strcpy(device->states[0].desc, "Wait for interrupt"); 81 strcpy(driver->states[0].desc, "Wait for interrupt");
79 82
80 /* Wait for interrupt and RAM self refresh state */ 83 /* Wait for interrupt and RAM self refresh state */
81 device->states[1].enter = at91_enter_idle; 84 driver->states[1].enter = at91_enter_idle;
82 device->states[1].exit_latency = 10; 85 driver->states[1].exit_latency = 10;
83 device->states[1].target_residency = 10000; 86 driver->states[1].target_residency = 10000;
84 device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; 87 driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
85 strcpy(device->states[1].name, "RAM_SR"); 88 strcpy(driver->states[1].name, "RAM_SR");
86 strcpy(device->states[1].desc, "WFI and RAM Self Refresh"); 89 strcpy(driver->states[1].desc, "WFI and RAM Self Refresh");
90
91 cpuidle_register_driver(&at91_idle_driver);
87 92
88 if (cpuidle_register_device(device)) { 93 if (cpuidle_register_device(device)) {
89 printk(KERN_ERR "at91_init_cpuidle: Failed registering\n"); 94 printk(KERN_ERR "at91_init_cpuidle: Failed registering\n");
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index 60d2f4871af..a30c7c5a6d8 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -79,9 +79,11 @@ static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = {
79 79
80/* Actual code that puts the SoC in different idle states */ 80/* Actual code that puts the SoC in different idle states */
81static int davinci_enter_idle(struct cpuidle_device *dev, 81static int davinci_enter_idle(struct cpuidle_device *dev,
82 struct cpuidle_state *state) 82 struct cpuidle_driver *drv,
83 int index)
83{ 84{
84 struct davinci_ops *ops = cpuidle_get_statedata(state); 85 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
86 struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
85 struct timeval before, after; 87 struct timeval before, after;
86 int idle_time; 88 int idle_time;
87 89
@@ -99,13 +101,17 @@ static int davinci_enter_idle(struct cpuidle_device *dev,
99 local_irq_enable(); 101 local_irq_enable();
100 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + 102 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
101 (after.tv_usec - before.tv_usec); 103 (after.tv_usec - before.tv_usec);
102 return idle_time; 104
105 dev->last_residency = idle_time;
106
107 return index;
103} 108}
104 109
105static int __init davinci_cpuidle_probe(struct platform_device *pdev) 110static int __init davinci_cpuidle_probe(struct platform_device *pdev)
106{ 111{
107 int ret; 112 int ret;
108 struct cpuidle_device *device; 113 struct cpuidle_device *device;
114 struct cpuidle_driver *driver = &davinci_idle_driver;
109 struct davinci_cpuidle_config *pdata = pdev->dev.platform_data; 115 struct davinci_cpuidle_config *pdata = pdev->dev.platform_data;
110 116
111 device = &per_cpu(davinci_cpuidle_device, smp_processor_id()); 117 device = &per_cpu(davinci_cpuidle_device, smp_processor_id());
@@ -117,32 +123,33 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)
117 123
118 ddr2_reg_base = pdata->ddr2_ctlr_base; 124 ddr2_reg_base = pdata->ddr2_ctlr_base;
119 125
120 ret = cpuidle_register_driver(&davinci_idle_driver);
121 if (ret) {
122 dev_err(&pdev->dev, "failed to register driver\n");
123 return ret;
124 }
125
126 /* Wait for interrupt state */ 126 /* Wait for interrupt state */
127 device->states[0].enter = davinci_enter_idle; 127 driver->states[0].enter = davinci_enter_idle;
128 device->states[0].exit_latency = 1; 128 driver->states[0].exit_latency = 1;
129 device->states[0].target_residency = 10000; 129 driver->states[0].target_residency = 10000;
130 device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; 130 driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
131 strcpy(device->states[0].name, "WFI"); 131 strcpy(driver->states[0].name, "WFI");
132 strcpy(device->states[0].desc, "Wait for interrupt"); 132 strcpy(driver->states[0].desc, "Wait for interrupt");
133 133
134 /* Wait for interrupt and DDR self refresh state */ 134 /* Wait for interrupt and DDR self refresh state */
135 device->states[1].enter = davinci_enter_idle; 135 driver->states[1].enter = davinci_enter_idle;
136 device->states[1].exit_latency = 10; 136 driver->states[1].exit_latency = 10;
137 device->states[1].target_residency = 10000; 137 driver->states[1].target_residency = 10000;
138 device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; 138 driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
139 strcpy(device->states[1].name, "DDR SR"); 139 strcpy(driver->states[1].name, "DDR SR");
140 strcpy(device->states[1].desc, "WFI and DDR Self Refresh"); 140 strcpy(driver->states[1].desc, "WFI and DDR Self Refresh");
141 if (pdata->ddr2_pdown) 141 if (pdata->ddr2_pdown)
142 davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN; 142 davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN;
143 cpuidle_set_statedata(&device->states[1], &davinci_states[1]); 143 cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]);
144 144
145 device->state_count = DAVINCI_CPUIDLE_MAX_STATES; 145 device->state_count = DAVINCI_CPUIDLE_MAX_STATES;
146 driver->state_count = DAVINCI_CPUIDLE_MAX_STATES;
147
148 ret = cpuidle_register_driver(&davinci_idle_driver);
149 if (ret) {
150 dev_err(&pdev->dev, "failed to register driver\n");
151 return ret;
152 }
146 153
147 ret = cpuidle_register_device(device); 154 ret = cpuidle_register_device(device);
148 if (ret) { 155 if (ret) {
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index bf7e96f2793..35f6502144a 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -16,7 +16,8 @@
16#include <asm/proc-fns.h> 16#include <asm/proc-fns.h>
17 17
18static int exynos4_enter_idle(struct cpuidle_device *dev, 18static int exynos4_enter_idle(struct cpuidle_device *dev,
19 struct cpuidle_state *state); 19 struct cpuidle_driver *drv,
20 int index);
20 21
21static struct cpuidle_state exynos4_cpuidle_set[] = { 22static struct cpuidle_state exynos4_cpuidle_set[] = {
22 [0] = { 23 [0] = {
@@ -37,7 +38,8 @@ static struct cpuidle_driver exynos4_idle_driver = {
37}; 38};
38 39
39static int exynos4_enter_idle(struct cpuidle_device *dev, 40static int exynos4_enter_idle(struct cpuidle_device *dev,
40 struct cpuidle_state *state) 41 struct cpuidle_driver *drv,
42 int index)
41{ 43{
42 struct timeval before, after; 44 struct timeval before, after;
43 int idle_time; 45 int idle_time;
@@ -52,29 +54,31 @@ static int exynos4_enter_idle(struct cpuidle_device *dev,
52 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + 54 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
53 (after.tv_usec - before.tv_usec); 55 (after.tv_usec - before.tv_usec);
54 56
55 return idle_time; 57 dev->last_residency = idle_time;
58 return index;
56} 59}
57 60
58static int __init exynos4_init_cpuidle(void) 61static int __init exynos4_init_cpuidle(void)
59{ 62{
60 int i, max_cpuidle_state, cpu_id; 63 int i, max_cpuidle_state, cpu_id;
61 struct cpuidle_device *device; 64 struct cpuidle_device *device;
62 65 struct cpuidle_driver *drv = &exynos4_idle_driver;
66
67 /* Setup cpuidle driver */
68 drv->state_count = (sizeof(exynos4_cpuidle_set) /
69 sizeof(struct cpuidle_state));
70 max_cpuidle_state = drv->state_count;
71 for (i = 0; i < max_cpuidle_state; i++) {
72 memcpy(&drv->states[i], &exynos4_cpuidle_set[i],
73 sizeof(struct cpuidle_state));
74 }
63 cpuidle_register_driver(&exynos4_idle_driver); 75 cpuidle_register_driver(&exynos4_idle_driver);
64 76
65 for_each_cpu(cpu_id, cpu_online_mask) { 77 for_each_cpu(cpu_id, cpu_online_mask) {
66 device = &per_cpu(exynos4_cpuidle_device, cpu_id); 78 device = &per_cpu(exynos4_cpuidle_device, cpu_id);
67 device->cpu = cpu_id; 79 device->cpu = cpu_id;
68 80
69 device->state_count = (sizeof(exynos4_cpuidle_set) / 81 device->state_count = drv->state_count;
70 sizeof(struct cpuidle_state));
71
72 max_cpuidle_state = device->state_count;
73
74 for (i = 0; i < max_cpuidle_state; i++) {
75 memcpy(&device->states[i], &exynos4_cpuidle_set[i],
76 sizeof(struct cpuidle_state));
77 }
78 82
79 if (cpuidle_register_device(device)) { 83 if (cpuidle_register_device(device)) {
80 printk(KERN_ERR "CPUidle register device failed\n,"); 84 printk(KERN_ERR "CPUidle register device failed\n,");
diff --git a/arch/arm/mach-kirkwood/cpuidle.c b/arch/arm/mach-kirkwood/cpuidle.c
index 864e569f684..7088180b018 100644
--- a/arch/arm/mach-kirkwood/cpuidle.c
+++ b/arch/arm/mach-kirkwood/cpuidle.c
@@ -33,17 +33,18 @@ static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
33 33
34/* Actual code that puts the SoC in different idle states */ 34/* Actual code that puts the SoC in different idle states */
35static int kirkwood_enter_idle(struct cpuidle_device *dev, 35static int kirkwood_enter_idle(struct cpuidle_device *dev,
36 struct cpuidle_state *state) 36 struct cpuidle_driver *drv,
37 int index)
37{ 38{
38 struct timeval before, after; 39 struct timeval before, after;
39 int idle_time; 40 int idle_time;
40 41
41 local_irq_disable(); 42 local_irq_disable();
42 do_gettimeofday(&before); 43 do_gettimeofday(&before);
43 if (state == &dev->states[0]) 44 if (index == 0)
44 /* Wait for interrupt state */ 45 /* Wait for interrupt state */
45 cpu_do_idle(); 46 cpu_do_idle();
46 else if (state == &dev->states[1]) { 47 else if (index == 1) {
47 /* 48 /*
48 * Following write will put DDR in self refresh. 49 * Following write will put DDR in self refresh.
49 * Note that we have 256 cycles before DDR puts it 50 * Note that we have 256 cycles before DDR puts it
@@ -58,35 +59,40 @@ static int kirkwood_enter_idle(struct cpuidle_device *dev,
58 local_irq_enable(); 59 local_irq_enable();
59 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + 60 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
60 (after.tv_usec - before.tv_usec); 61 (after.tv_usec - before.tv_usec);
61 return idle_time; 62
63 /* Update last residency */
64 dev->last_residency = idle_time;
65
66 return index;
62} 67}
63 68
64/* Initialize CPU idle by registering the idle states */ 69/* Initialize CPU idle by registering the idle states */
65static int kirkwood_init_cpuidle(void) 70static int kirkwood_init_cpuidle(void)
66{ 71{
67 struct cpuidle_device *device; 72 struct cpuidle_device *device;
68 73 struct cpuidle_driver *driver = &kirkwood_idle_driver;
69 cpuidle_register_driver(&kirkwood_idle_driver);
70 74
71 device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id()); 75 device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id());
72 device->state_count = KIRKWOOD_MAX_STATES; 76 device->state_count = KIRKWOOD_MAX_STATES;
77 driver->state_count = KIRKWOOD_MAX_STATES;
73 78
74 /* Wait for interrupt state */ 79 /* Wait for interrupt state */
75 device->states[0].enter = kirkwood_enter_idle; 80 driver->states[0].enter = kirkwood_enter_idle;
76 device->states[0].exit_latency = 1; 81 driver->states[0].exit_latency = 1;
77 device->states[0].target_residency = 10000; 82 driver->states[0].target_residency = 10000;
78 device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; 83 driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
79 strcpy(device->states[0].name, "WFI"); 84 strcpy(driver->states[0].name, "WFI");
80 strcpy(device->states[0].desc, "Wait for interrupt"); 85 strcpy(driver->states[0].desc, "Wait for interrupt");
81 86
82 /* Wait for interrupt and DDR self refresh state */ 87 /* Wait for interrupt and DDR self refresh state */
83 device->states[1].enter = kirkwood_enter_idle; 88 driver->states[1].enter = kirkwood_enter_idle;
84 device->states[1].exit_latency = 10; 89 driver->states[1].exit_latency = 10;
85 device->states[1].target_residency = 10000; 90 driver->states[1].target_residency = 10000;
86 device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; 91 driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
87 strcpy(device->states[1].name, "DDR SR"); 92 strcpy(driver->states[1].name, "DDR SR");
88 strcpy(device->states[1].desc, "WFI and DDR Self Refresh"); 93 strcpy(driver->states[1].desc, "WFI and DDR Self Refresh");
89 94
95 cpuidle_register_driver(&kirkwood_idle_driver);
90 if (cpuidle_register_device(device)) { 96 if (cpuidle_register_device(device)) {
91 printk(KERN_ERR "kirkwood_init_cpuidle: Failed registering\n"); 97 printk(KERN_ERR "kirkwood_init_cpuidle: Failed registering\n");
92 return -EIO; 98 return -EIO;
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 4bf6e6e8b10..1fe35c24fba 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -88,17 +88,21 @@ static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
88/** 88/**
89 * omap3_enter_idle - Programs OMAP3 to enter the specified state 89 * omap3_enter_idle - Programs OMAP3 to enter the specified state
90 * @dev: cpuidle device 90 * @dev: cpuidle device
91 * @state: The target state to be programmed 91 * @drv: cpuidle driver
92 * @index: the index of state to be entered
92 * 93 *
93 * Called from the CPUidle framework to program the device to the 94 * Called from the CPUidle framework to program the device to the
94 * specified target state selected by the governor. 95 * specified target state selected by the governor.
95 */ 96 */
96static int omap3_enter_idle(struct cpuidle_device *dev, 97static int omap3_enter_idle(struct cpuidle_device *dev,
97 struct cpuidle_state *state) 98 struct cpuidle_driver *drv,
99 int index)
98{ 100{
99 struct omap3_idle_statedata *cx = cpuidle_get_statedata(state); 101 struct omap3_idle_statedata *cx =
102 cpuidle_get_statedata(&dev->states_usage[index]);
100 struct timespec ts_preidle, ts_postidle, ts_idle; 103 struct timespec ts_preidle, ts_postidle, ts_idle;
101 u32 mpu_state = cx->mpu_state, core_state = cx->core_state; 104 u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
105 int idle_time;
102 106
103 /* Used to keep track of the total time in idle */ 107 /* Used to keep track of the total time in idle */
104 getnstimeofday(&ts_preidle); 108 getnstimeofday(&ts_preidle);
@@ -113,7 +117,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
113 goto return_sleep_time; 117 goto return_sleep_time;
114 118
115 /* Deny idle for C1 */ 119 /* Deny idle for C1 */
116 if (state == &dev->states[0]) { 120 if (index == 0) {
117 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); 121 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
118 pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); 122 pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
119 } 123 }
@@ -122,7 +126,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
122 omap_sram_idle(); 126 omap_sram_idle();
123 127
124 /* Re-allow idle for C1 */ 128 /* Re-allow idle for C1 */
125 if (state == &dev->states[0]) { 129 if (index == 0) {
126 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); 130 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
127 pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); 131 pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
128 } 132 }
@@ -134,28 +138,38 @@ return_sleep_time:
134 local_irq_enable(); 138 local_irq_enable();
135 local_fiq_enable(); 139 local_fiq_enable();
136 140
137 return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC; 141 idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
142 USEC_PER_SEC;
143
144 /* Update cpuidle counters */
145 dev->last_residency = idle_time;
146
147 return index;
138} 148}
139 149
140/** 150/**
141 * next_valid_state - Find next valid C-state 151 * next_valid_state - Find next valid C-state
142 * @dev: cpuidle device 152 * @dev: cpuidle device
143 * @state: Currently selected C-state 153 * @drv: cpuidle driver
154 * @index: Index of currently selected c-state
144 * 155 *
145 * If the current state is valid, it is returned back to the caller. 156 * If the state corresponding to index is valid, index is returned back
146 * Else, this function searches for a lower c-state which is still 157 * to the caller. Else, this function searches for a lower c-state which is
147 * valid. 158 * still valid (as defined in omap3_power_states[]) and returns its index.
148 * 159 *
149 * A state is valid if the 'valid' field is enabled and 160 * A state is valid if the 'valid' field is enabled and
150 * if it satisfies the enable_off_mode condition. 161 * if it satisfies the enable_off_mode condition.
151 */ 162 */
152static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev, 163static int next_valid_state(struct cpuidle_device *dev,
153 struct cpuidle_state *curr) 164 struct cpuidle_driver *drv,
165 int index)
154{ 166{
155 struct cpuidle_state *next = NULL; 167 struct cpuidle_state_usage *curr_usage = &dev->states_usage[index];
156 struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr); 168 struct cpuidle_state *curr = &drv->states[index];
169 struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr_usage);
157 u32 mpu_deepest_state = PWRDM_POWER_RET; 170 u32 mpu_deepest_state = PWRDM_POWER_RET;
158 u32 core_deepest_state = PWRDM_POWER_RET; 171 u32 core_deepest_state = PWRDM_POWER_RET;
172 int next_index = -1;
159 173
160 if (enable_off_mode) { 174 if (enable_off_mode) {
161 mpu_deepest_state = PWRDM_POWER_OFF; 175 mpu_deepest_state = PWRDM_POWER_OFF;
@@ -172,20 +186,20 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
172 if ((cx->valid) && 186 if ((cx->valid) &&
173 (cx->mpu_state >= mpu_deepest_state) && 187 (cx->mpu_state >= mpu_deepest_state) &&
174 (cx->core_state >= core_deepest_state)) { 188 (cx->core_state >= core_deepest_state)) {
175 return curr; 189 return index;
176 } else { 190 } else {
177 int idx = OMAP3_NUM_STATES - 1; 191 int idx = OMAP3_NUM_STATES - 1;
178 192
179 /* Reach the current state starting at highest C-state */ 193 /* Reach the current state starting at highest C-state */
180 for (; idx >= 0; idx--) { 194 for (; idx >= 0; idx--) {
181 if (&dev->states[idx] == curr) { 195 if (&drv->states[idx] == curr) {
182 next = &dev->states[idx]; 196 next_index = idx;
183 break; 197 break;
184 } 198 }
185 } 199 }
186 200
187 /* Should never hit this condition */ 201 /* Should never hit this condition */
188 WARN_ON(next == NULL); 202 WARN_ON(next_index == -1);
189 203
190 /* 204 /*
191 * Drop to next valid state. 205 * Drop to next valid state.
@@ -193,41 +207,44 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
193 */ 207 */
194 idx--; 208 idx--;
195 for (; idx >= 0; idx--) { 209 for (; idx >= 0; idx--) {
196 cx = cpuidle_get_statedata(&dev->states[idx]); 210 cx = cpuidle_get_statedata(&dev->states_usage[idx]);
197 if ((cx->valid) && 211 if ((cx->valid) &&
198 (cx->mpu_state >= mpu_deepest_state) && 212 (cx->mpu_state >= mpu_deepest_state) &&
199 (cx->core_state >= core_deepest_state)) { 213 (cx->core_state >= core_deepest_state)) {
200 next = &dev->states[idx]; 214 next_index = idx;
201 break; 215 break;
202 } 216 }
203 } 217 }
204 /* 218 /*
205 * C1 is always valid. 219 * C1 is always valid.
206 * So, no need to check for 'next==NULL' outside this loop. 220 * So, no need to check for 'next_index == -1' outside
221 * this loop.
207 */ 222 */
208 } 223 }
209 224
210 return next; 225 return next_index;
211} 226}
212 227
213/** 228/**
214 * omap3_enter_idle_bm - Checks for any bus activity 229 * omap3_enter_idle_bm - Checks for any bus activity
215 * @dev: cpuidle device 230 * @dev: cpuidle device
216 * @state: The target state to be programmed 231 * @drv: cpuidle driver
232 * @index: array index of target state to be programmed
217 * 233 *
218 * This function checks for any pending activity and then programs 234 * This function checks for any pending activity and then programs
219 * the device to the specified or a safer state. 235 * the device to the specified or a safer state.
220 */ 236 */
221static int omap3_enter_idle_bm(struct cpuidle_device *dev, 237static int omap3_enter_idle_bm(struct cpuidle_device *dev,
222 struct cpuidle_state *state) 238 struct cpuidle_driver *drv,
239 int index)
223{ 240{
224 struct cpuidle_state *new_state; 241 int new_state_idx;
225 u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state; 242 u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state;
226 struct omap3_idle_statedata *cx; 243 struct omap3_idle_statedata *cx;
227 int ret; 244 int ret;
228 245
229 if (!omap3_can_sleep()) { 246 if (!omap3_can_sleep()) {
230 new_state = dev->safe_state; 247 new_state_idx = drv->safe_state_index;
231 goto select_state; 248 goto select_state;
232 } 249 }
233 250
@@ -237,7 +254,7 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
237 */ 254 */
238 cam_state = pwrdm_read_pwrst(cam_pd); 255 cam_state = pwrdm_read_pwrst(cam_pd);
239 if (cam_state == PWRDM_POWER_ON) { 256 if (cam_state == PWRDM_POWER_ON) {
240 new_state = dev->safe_state; 257 new_state_idx = drv->safe_state_index;
241 goto select_state; 258 goto select_state;
242 } 259 }
243 260
@@ -253,7 +270,7 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
253 * Prevent PER off if CORE is not in retention or off as this 270 * Prevent PER off if CORE is not in retention or off as this
254 * would disable PER wakeups completely. 271 * would disable PER wakeups completely.
255 */ 272 */
256 cx = cpuidle_get_statedata(state); 273 cx = cpuidle_get_statedata(&dev->states_usage[index]);
257 core_next_state = cx->core_state; 274 core_next_state = cx->core_state;
258 per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd); 275 per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
259 if ((per_next_state == PWRDM_POWER_OFF) && 276 if ((per_next_state == PWRDM_POWER_OFF) &&
@@ -264,11 +281,10 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
264 if (per_next_state != per_saved_state) 281 if (per_next_state != per_saved_state)
265 pwrdm_set_next_pwrst(per_pd, per_next_state); 282 pwrdm_set_next_pwrst(per_pd, per_next_state);
266 283
267 new_state = next_valid_state(dev, state); 284 new_state_idx = next_valid_state(dev, drv, index);
268 285
269select_state: 286select_state:
270 dev->last_state = new_state; 287 ret = omap3_enter_idle(dev, drv, new_state_idx);
271 ret = omap3_enter_idle(dev, new_state);
272 288
273 /* Restore original PER state if it was modified */ 289 /* Restore original PER state if it was modified */
274 if (per_next_state != per_saved_state) 290 if (per_next_state != per_saved_state)
@@ -301,22 +317,31 @@ struct cpuidle_driver omap3_idle_driver = {
301 .owner = THIS_MODULE, 317 .owner = THIS_MODULE,
302}; 318};
303 319
304/* Helper to fill the C-state common data and register the driver_data */ 320/* Helper to fill the C-state common data*/
305static inline struct omap3_idle_statedata *_fill_cstate( 321static inline void _fill_cstate(struct cpuidle_driver *drv,
306 struct cpuidle_device *dev,
307 int idx, const char *descr) 322 int idx, const char *descr)
308{ 323{
309 struct omap3_idle_statedata *cx = &omap3_idle_data[idx]; 324 struct cpuidle_state *state = &drv->states[idx];
310 struct cpuidle_state *state = &dev->states[idx];
311 325
312 state->exit_latency = cpuidle_params_table[idx].exit_latency; 326 state->exit_latency = cpuidle_params_table[idx].exit_latency;
313 state->target_residency = cpuidle_params_table[idx].target_residency; 327 state->target_residency = cpuidle_params_table[idx].target_residency;
314 state->flags = CPUIDLE_FLAG_TIME_VALID; 328 state->flags = CPUIDLE_FLAG_TIME_VALID;
315 state->enter = omap3_enter_idle_bm; 329 state->enter = omap3_enter_idle_bm;
316 cx->valid = cpuidle_params_table[idx].valid;
317 sprintf(state->name, "C%d", idx + 1); 330 sprintf(state->name, "C%d", idx + 1);
318 strncpy(state->desc, descr, CPUIDLE_DESC_LEN); 331 strncpy(state->desc, descr, CPUIDLE_DESC_LEN);
319 cpuidle_set_statedata(state, cx); 332
333}
334
335/* Helper to register the driver_data */
336static inline struct omap3_idle_statedata *_fill_cstate_usage(
337 struct cpuidle_device *dev,
338 int idx)
339{
340 struct omap3_idle_statedata *cx = &omap3_idle_data[idx];
341 struct cpuidle_state_usage *state_usage = &dev->states_usage[idx];
342
343 cx->valid = cpuidle_params_table[idx].valid;
344 cpuidle_set_statedata(state_usage, cx);
320 345
321 return cx; 346 return cx;
322} 347}
@@ -330,6 +355,7 @@ static inline struct omap3_idle_statedata *_fill_cstate(
330int __init omap3_idle_init(void) 355int __init omap3_idle_init(void)
331{ 356{
332 struct cpuidle_device *dev; 357 struct cpuidle_device *dev;
358 struct cpuidle_driver *drv = &omap3_idle_driver;
333 struct omap3_idle_statedata *cx; 359 struct omap3_idle_statedata *cx;
334 360
335 mpu_pd = pwrdm_lookup("mpu_pwrdm"); 361 mpu_pd = pwrdm_lookup("mpu_pwrdm");
@@ -337,44 +363,52 @@ int __init omap3_idle_init(void)
337 per_pd = pwrdm_lookup("per_pwrdm"); 363 per_pd = pwrdm_lookup("per_pwrdm");
338 cam_pd = pwrdm_lookup("cam_pwrdm"); 364 cam_pd = pwrdm_lookup("cam_pwrdm");
339 365
340 cpuidle_register_driver(&omap3_idle_driver); 366
367 drv->safe_state_index = -1;
341 dev = &per_cpu(omap3_idle_dev, smp_processor_id()); 368 dev = &per_cpu(omap3_idle_dev, smp_processor_id());
342 369
343 /* C1 . MPU WFI + Core active */ 370 /* C1 . MPU WFI + Core active */
344 cx = _fill_cstate(dev, 0, "MPU ON + CORE ON"); 371 _fill_cstate(drv, 0, "MPU ON + CORE ON");
345 (&dev->states[0])->enter = omap3_enter_idle; 372 (&drv->states[0])->enter = omap3_enter_idle;
346 dev->safe_state = &dev->states[0]; 373 drv->safe_state_index = 0;
374 cx = _fill_cstate_usage(dev, 0);
347 cx->valid = 1; /* C1 is always valid */ 375 cx->valid = 1; /* C1 is always valid */
348 cx->mpu_state = PWRDM_POWER_ON; 376 cx->mpu_state = PWRDM_POWER_ON;
349 cx->core_state = PWRDM_POWER_ON; 377 cx->core_state = PWRDM_POWER_ON;
350 378
351 /* C2 . MPU WFI + Core inactive */ 379 /* C2 . MPU WFI + Core inactive */
352 cx = _fill_cstate(dev, 1, "MPU ON + CORE ON"); 380 _fill_cstate(drv, 1, "MPU ON + CORE ON");
381 cx = _fill_cstate_usage(dev, 1);
353 cx->mpu_state = PWRDM_POWER_ON; 382 cx->mpu_state = PWRDM_POWER_ON;
354 cx->core_state = PWRDM_POWER_ON; 383 cx->core_state = PWRDM_POWER_ON;
355 384
356 /* C3 . MPU CSWR + Core inactive */ 385 /* C3 . MPU CSWR + Core inactive */
357 cx = _fill_cstate(dev, 2, "MPU RET + CORE ON"); 386 _fill_cstate(drv, 2, "MPU RET + CORE ON");
387 cx = _fill_cstate_usage(dev, 2);
358 cx->mpu_state = PWRDM_POWER_RET; 388 cx->mpu_state = PWRDM_POWER_RET;
359 cx->core_state = PWRDM_POWER_ON; 389 cx->core_state = PWRDM_POWER_ON;
360 390
361 /* C4 . MPU OFF + Core inactive */ 391 /* C4 . MPU OFF + Core inactive */
362 cx = _fill_cstate(dev, 3, "MPU OFF + CORE ON"); 392 _fill_cstate(drv, 3, "MPU OFF + CORE ON");
393 cx = _fill_cstate_usage(dev, 3);
363 cx->mpu_state = PWRDM_POWER_OFF; 394 cx->mpu_state = PWRDM_POWER_OFF;
364 cx->core_state = PWRDM_POWER_ON; 395 cx->core_state = PWRDM_POWER_ON;
365 396
366 /* C5 . MPU RET + Core RET */ 397 /* C5 . MPU RET + Core RET */
367 cx = _fill_cstate(dev, 4, "MPU RET + CORE RET"); 398 _fill_cstate(drv, 4, "MPU RET + CORE RET");
399 cx = _fill_cstate_usage(dev, 4);
368 cx->mpu_state = PWRDM_POWER_RET; 400 cx->mpu_state = PWRDM_POWER_RET;
369 cx->core_state = PWRDM_POWER_RET; 401 cx->core_state = PWRDM_POWER_RET;
370 402
371 /* C6 . MPU OFF + Core RET */ 403 /* C6 . MPU OFF + Core RET */
372 cx = _fill_cstate(dev, 5, "MPU OFF + CORE RET"); 404 _fill_cstate(drv, 5, "MPU OFF + CORE RET");
405 cx = _fill_cstate_usage(dev, 5);
373 cx->mpu_state = PWRDM_POWER_OFF; 406 cx->mpu_state = PWRDM_POWER_OFF;
374 cx->core_state = PWRDM_POWER_RET; 407 cx->core_state = PWRDM_POWER_RET;
375 408
376 /* C7 . MPU OFF + Core OFF */ 409 /* C7 . MPU OFF + Core OFF */
377 cx = _fill_cstate(dev, 6, "MPU OFF + CORE OFF"); 410 _fill_cstate(drv, 6, "MPU OFF + CORE OFF");
411 cx = _fill_cstate_usage(dev, 6);
378 /* 412 /*
379 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot 413 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
380 * enable OFF mode in a stable form for previous revisions. 414 * enable OFF mode in a stable form for previous revisions.
@@ -388,6 +422,9 @@ int __init omap3_idle_init(void)
388 cx->mpu_state = PWRDM_POWER_OFF; 422 cx->mpu_state = PWRDM_POWER_OFF;
389 cx->core_state = PWRDM_POWER_OFF; 423 cx->core_state = PWRDM_POWER_OFF;
390 424
425 drv->state_count = OMAP3_NUM_STATES;
426 cpuidle_register_driver(&omap3_idle_driver);
427
391 dev->state_count = OMAP3_NUM_STATES; 428 dev->state_count = OMAP3_NUM_STATES;
392 if (cpuidle_register_device(dev)) { 429 if (cpuidle_register_device(dev)) {
393 printk(KERN_ERR "%s: CPUidle register device failed\n", 430 printk(KERN_ERR "%s: CPUidle register device failed\n",
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
index 7d98f909a8a..1cc257c9b1e 100644
--- a/arch/sh/kernel/cpu/shmobile/cpuidle.c
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -26,11 +26,12 @@ static unsigned long cpuidle_mode[] = {
26}; 26};
27 27
28static int cpuidle_sleep_enter(struct cpuidle_device *dev, 28static int cpuidle_sleep_enter(struct cpuidle_device *dev,
29 struct cpuidle_state *state) 29 struct cpuidle_driver *drv,
30 int index)
30{ 31{
31 unsigned long allowed_mode = arch_hwblk_sleep_mode(); 32 unsigned long allowed_mode = arch_hwblk_sleep_mode();
32 ktime_t before, after; 33 ktime_t before, after;
33 int requested_state = state - &dev->states[0]; 34 int requested_state = index;
34 int allowed_state; 35 int allowed_state;
35 int k; 36 int k;
36 37
@@ -47,11 +48,13 @@ static int cpuidle_sleep_enter(struct cpuidle_device *dev,
47 */ 48 */
48 k = min_t(int, allowed_state, requested_state); 49 k = min_t(int, allowed_state, requested_state);
49 50
50 dev->last_state = &dev->states[k];
51 before = ktime_get(); 51 before = ktime_get();
52 sh_mobile_call_standby(cpuidle_mode[k]); 52 sh_mobile_call_standby(cpuidle_mode[k]);
53 after = ktime_get(); 53 after = ktime_get();
54 return ktime_to_ns(ktime_sub(after, before)) >> 10; 54
55 dev->last_residency = (int)ktime_to_ns(ktime_sub(after, before)) >> 10;
56
57 return k;
55} 58}
56 59
57static struct cpuidle_device cpuidle_dev; 60static struct cpuidle_device cpuidle_dev;
@@ -63,19 +66,19 @@ static struct cpuidle_driver cpuidle_driver = {
63void sh_mobile_setup_cpuidle(void) 66void sh_mobile_setup_cpuidle(void)
64{ 67{
65 struct cpuidle_device *dev = &cpuidle_dev; 68 struct cpuidle_device *dev = &cpuidle_dev;
69 struct cpuidle_driver *drv = &cpuidle_driver;
66 struct cpuidle_state *state; 70 struct cpuidle_state *state;
67 int i; 71 int i;
68 72
69 cpuidle_register_driver(&cpuidle_driver);
70 73
71 for (i = 0; i < CPUIDLE_STATE_MAX; i++) { 74 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
72 dev->states[i].name[0] = '\0'; 75 drv->states[i].name[0] = '\0';
73 dev->states[i].desc[0] = '\0'; 76 drv->states[i].desc[0] = '\0';
74 } 77 }
75 78
76 i = CPUIDLE_DRIVER_STATE_START; 79 i = CPUIDLE_DRIVER_STATE_START;
77 80
78 state = &dev->states[i++]; 81 state = &drv->states[i++];
79 snprintf(state->name, CPUIDLE_NAME_LEN, "C1"); 82 snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
80 strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN); 83 strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN);
81 state->exit_latency = 1; 84 state->exit_latency = 1;
@@ -85,10 +88,10 @@ void sh_mobile_setup_cpuidle(void)
85 state->flags |= CPUIDLE_FLAG_TIME_VALID; 88 state->flags |= CPUIDLE_FLAG_TIME_VALID;
86 state->enter = cpuidle_sleep_enter; 89 state->enter = cpuidle_sleep_enter;
87 90
88 dev->safe_state = state; 91 drv->safe_state_index = i-1;
89 92
90 if (sh_mobile_sleep_supported & SUSP_SH_SF) { 93 if (sh_mobile_sleep_supported & SUSP_SH_SF) {
91 state = &dev->states[i++]; 94 state = &drv->states[i++];
92 snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); 95 snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
93 strncpy(state->desc, "SuperH Sleep Mode [SF]", 96 strncpy(state->desc, "SuperH Sleep Mode [SF]",
94 CPUIDLE_DESC_LEN); 97 CPUIDLE_DESC_LEN);
@@ -101,7 +104,7 @@ void sh_mobile_setup_cpuidle(void)
101 } 104 }
102 105
103 if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) { 106 if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) {
104 state = &dev->states[i++]; 107 state = &drv->states[i++];
105 snprintf(state->name, CPUIDLE_NAME_LEN, "C3"); 108 snprintf(state->name, CPUIDLE_NAME_LEN, "C3");
106 strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", 109 strncpy(state->desc, "SuperH Mobile Standby Mode [SF]",
107 CPUIDLE_DESC_LEN); 110 CPUIDLE_DESC_LEN);
@@ -113,7 +116,10 @@ void sh_mobile_setup_cpuidle(void)
113 state->enter = cpuidle_sleep_enter; 116 state->enter = cpuidle_sleep_enter;
114 } 117 }
115 118
119 drv->state_count = i;
116 dev->state_count = i; 120 dev->state_count = i;
117 121
122 cpuidle_register_driver(&cpuidle_driver);
123
118 cpuidle_register_device(dev); 124 cpuidle_register_device(dev);
119} 125}
diff --git a/arch/x86/platform/mrst/pmu.c b/arch/x86/platform/mrst/pmu.c
index 9281da7d91b..c0ac06da57a 100644
--- a/arch/x86/platform/mrst/pmu.c
+++ b/arch/x86/platform/mrst/pmu.c
@@ -70,7 +70,7 @@ static struct mrst_device mrst_devs[] = {
70/* 24 */ { 0x4110, 0 }, /* Lincroft */ 70/* 24 */ { 0x4110, 0 }, /* Lincroft */
71}; 71};
72 72
73/* n.b. We ignore PCI-id 0x815 in LSS9 b/c MeeGo has no driver for it */ 73/* n.b. We ignore PCI-id 0x815 in LSS9 b/c Linux has no driver for it */
74static u16 mrst_lss9_pci_ids[] = {0x080a, 0x0814, 0}; 74static u16 mrst_lss9_pci_ids[] = {0x080a, 0x0814, 0};
75static u16 mrst_lss10_pci_ids[] = {0x0800, 0x0801, 0x0802, 0x0803, 75static u16 mrst_lss10_pci_ids[] = {0x0800, 0x0801, 0x0802, 0x0803,
76 0x0804, 0x0805, 0x080f, 0}; 76 0x0804, 0x0805, 0x080f, 0};