diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-07 13:13:52 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-07 13:13:52 -0500 |
| commit | 3c00303206c3a1ccd86579efdc90bc35f140962e (patch) | |
| tree | 66170c84b5ddaeb102aea3530517a26657b6ea29 | |
| parent | 83dbb15e9cd78a3619e3db36777e2f81d09b2914 (diff) | |
| parent | efb90582c575084723cc14302c1300cb26c7e01f (diff) | |
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux:
cpuidle: Single/Global registration of idle states
cpuidle: Split cpuidle_state structure and move per-cpu statistics fields
cpuidle: Remove CPUIDLE_FLAG_IGNORE and dev->prepare()
cpuidle: Move dev->last_residency update to driver enter routine; remove dev->last_state
ACPI: Fix CONFIG_ACPI_DOCK=n compiler warning
ACPI: Export FADT pm_profile integer value to userspace
thermal: Prevent polling from happening during system suspend
ACPI: Drop ACPI_NO_HARDWARE_INIT
ACPI atomicio: Convert width in bits to bytes in __acpi_ioremap_fast()
PNPACPI: Simplify disabled resource registration
ACPI: Fix possible recursive locking in hwregs.c
ACPI: use kstrdup()
mrst pmu: update comment
tools/power turbostat: less verbose debugging
28 files changed, 729 insertions, 412 deletions
diff --git a/Documentation/ABI/stable/sysfs-acpi-pmprofile b/Documentation/ABI/stable/sysfs-acpi-pmprofile new file mode 100644 index 000000000000..964c7a8afb26 --- /dev/null +++ b/Documentation/ABI/stable/sysfs-acpi-pmprofile | |||
| @@ -0,0 +1,22 @@ | |||
| 1 | What: /sys/firmware/acpi/pm_profile | ||
| 2 | Date: 03-Nov-2011 | ||
| 3 | KernelVersion: v3.2 | ||
| 4 | Contact: linux-acpi@vger.kernel.org | ||
| 5 | Description: The ACPI pm_profile sysfs interface exports the platform | ||
| 6 | power management (and performance) requirement expectations | ||
| 7 | as provided by BIOS. The integer value is directly passed as | ||
| 8 | retrieved from the FADT ACPI table. | ||
| 9 | Values: For possible values see ACPI specification: | ||
| 10 | 5.2.9 Fixed ACPI Description Table (FADT) | ||
| 11 | Field: Preferred_PM_Profile | ||
| 12 | |||
| 13 | Currently these values are defined by spec: | ||
| 14 | 0 Unspecified | ||
| 15 | 1 Desktop | ||
| 16 | 2 Mobile | ||
| 17 | 3 Workstation | ||
| 18 | 4 Enterprise Server | ||
| 19 | 5 SOHO Server | ||
| 20 | 6 Appliance PC | ||
| 21 | 7 Performance Server | ||
| 22 | >7 Reserved | ||
diff --git a/arch/arm/mach-at91/cpuidle.c b/arch/arm/mach-at91/cpuidle.c index f474272c0eac..a851e6c98421 100644 --- a/arch/arm/mach-at91/cpuidle.c +++ b/arch/arm/mach-at91/cpuidle.c | |||
| @@ -34,7 +34,8 @@ static struct cpuidle_driver at91_idle_driver = { | |||
| 34 | 34 | ||
| 35 | /* Actual code that puts the SoC in different idle states */ | 35 | /* Actual code that puts the SoC in different idle states */ |
| 36 | static int at91_enter_idle(struct cpuidle_device *dev, | 36 | static int at91_enter_idle(struct cpuidle_device *dev, |
| 37 | struct cpuidle_state *state) | 37 | struct cpuidle_driver *drv, |
| 38 | int index) | ||
| 38 | { | 39 | { |
| 39 | struct timeval before, after; | 40 | struct timeval before, after; |
| 40 | int idle_time; | 41 | int idle_time; |
| @@ -42,10 +43,10 @@ static int at91_enter_idle(struct cpuidle_device *dev, | |||
| 42 | 43 | ||
| 43 | local_irq_disable(); | 44 | local_irq_disable(); |
| 44 | do_gettimeofday(&before); | 45 | do_gettimeofday(&before); |
| 45 | if (state == &dev->states[0]) | 46 | if (index == 0) |
| 46 | /* Wait for interrupt state */ | 47 | /* Wait for interrupt state */ |
| 47 | cpu_do_idle(); | 48 | cpu_do_idle(); |
| 48 | else if (state == &dev->states[1]) { | 49 | else if (index == 1) { |
| 49 | asm("b 1f; .align 5; 1:"); | 50 | asm("b 1f; .align 5; 1:"); |
| 50 | asm("mcr p15, 0, r0, c7, c10, 4"); /* drain write buffer */ | 51 | asm("mcr p15, 0, r0, c7, c10, 4"); /* drain write buffer */ |
| 51 | saved_lpr = sdram_selfrefresh_enable(); | 52 | saved_lpr = sdram_selfrefresh_enable(); |
| @@ -56,34 +57,38 @@ static int at91_enter_idle(struct cpuidle_device *dev, | |||
| 56 | local_irq_enable(); | 57 | local_irq_enable(); |
| 57 | idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + | 58 | idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + |
| 58 | (after.tv_usec - before.tv_usec); | 59 | (after.tv_usec - before.tv_usec); |
| 59 | return idle_time; | 60 | |
| 61 | dev->last_residency = idle_time; | ||
| 62 | return index; | ||
| 60 | } | 63 | } |
| 61 | 64 | ||
| 62 | /* Initialize CPU idle by registering the idle states */ | 65 | /* Initialize CPU idle by registering the idle states */ |
| 63 | static int at91_init_cpuidle(void) | 66 | static int at91_init_cpuidle(void) |
| 64 | { | 67 | { |
| 65 | struct cpuidle_device *device; | 68 | struct cpuidle_device *device; |
| 66 | 69 | struct cpuidle_driver *driver = &at91_idle_driver; | |
| 67 | cpuidle_register_driver(&at91_idle_driver); | ||
| 68 | 70 | ||
| 69 | device = &per_cpu(at91_cpuidle_device, smp_processor_id()); | 71 | device = &per_cpu(at91_cpuidle_device, smp_processor_id()); |
| 70 | device->state_count = AT91_MAX_STATES; | 72 | device->state_count = AT91_MAX_STATES; |
| 73 | driver->state_count = AT91_MAX_STATES; | ||
| 71 | 74 | ||
| 72 | /* Wait for interrupt state */ | 75 | /* Wait for interrupt state */ |
| 73 | device->states[0].enter = at91_enter_idle; | 76 | driver->states[0].enter = at91_enter_idle; |
| 74 | device->states[0].exit_latency = 1; | 77 | driver->states[0].exit_latency = 1; |
| 75 | device->states[0].target_residency = 10000; | 78 | driver->states[0].target_residency = 10000; |
| 76 | device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; | 79 | driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID; |
| 77 | strcpy(device->states[0].name, "WFI"); | 80 | strcpy(driver->states[0].name, "WFI"); |
| 78 | strcpy(device->states[0].desc, "Wait for interrupt"); | 81 | strcpy(driver->states[0].desc, "Wait for interrupt"); |
| 79 | 82 | ||
| 80 | /* Wait for interrupt and RAM self refresh state */ | 83 | /* Wait for interrupt and RAM self refresh state */ |
| 81 | device->states[1].enter = at91_enter_idle; | 84 | driver->states[1].enter = at91_enter_idle; |
| 82 | device->states[1].exit_latency = 10; | 85 | driver->states[1].exit_latency = 10; |
| 83 | device->states[1].target_residency = 10000; | 86 | driver->states[1].target_residency = 10000; |
| 84 | device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; | 87 | driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID; |
| 85 | strcpy(device->states[1].name, "RAM_SR"); | 88 | strcpy(driver->states[1].name, "RAM_SR"); |
| 86 | strcpy(device->states[1].desc, "WFI and RAM Self Refresh"); | 89 | strcpy(driver->states[1].desc, "WFI and RAM Self Refresh"); |
| 90 | |||
| 91 | cpuidle_register_driver(&at91_idle_driver); | ||
| 87 | 92 | ||
| 88 | if (cpuidle_register_device(device)) { | 93 | if (cpuidle_register_device(device)) { |
| 89 | printk(KERN_ERR "at91_init_cpuidle: Failed registering\n"); | 94 | printk(KERN_ERR "at91_init_cpuidle: Failed registering\n"); |
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c index 60d2f4871afa..a30c7c5a6d83 100644 --- a/arch/arm/mach-davinci/cpuidle.c +++ b/arch/arm/mach-davinci/cpuidle.c | |||
| @@ -79,9 +79,11 @@ static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = { | |||
| 79 | 79 | ||
| 80 | /* Actual code that puts the SoC in different idle states */ | 80 | /* Actual code that puts the SoC in different idle states */ |
| 81 | static int davinci_enter_idle(struct cpuidle_device *dev, | 81 | static int davinci_enter_idle(struct cpuidle_device *dev, |
| 82 | struct cpuidle_state *state) | 82 | struct cpuidle_driver *drv, |
| 83 | int index) | ||
| 83 | { | 84 | { |
| 84 | struct davinci_ops *ops = cpuidle_get_statedata(state); | 85 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
| 86 | struct davinci_ops *ops = cpuidle_get_statedata(state_usage); | ||
| 85 | struct timeval before, after; | 87 | struct timeval before, after; |
| 86 | int idle_time; | 88 | int idle_time; |
| 87 | 89 | ||
| @@ -99,13 +101,17 @@ static int davinci_enter_idle(struct cpuidle_device *dev, | |||
| 99 | local_irq_enable(); | 101 | local_irq_enable(); |
| 100 | idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + | 102 | idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + |
| 101 | (after.tv_usec - before.tv_usec); | 103 | (after.tv_usec - before.tv_usec); |
| 102 | return idle_time; | 104 | |
| 105 | dev->last_residency = idle_time; | ||
| 106 | |||
| 107 | return index; | ||
| 103 | } | 108 | } |
| 104 | 109 | ||
| 105 | static int __init davinci_cpuidle_probe(struct platform_device *pdev) | 110 | static int __init davinci_cpuidle_probe(struct platform_device *pdev) |
| 106 | { | 111 | { |
| 107 | int ret; | 112 | int ret; |
| 108 | struct cpuidle_device *device; | 113 | struct cpuidle_device *device; |
| 114 | struct cpuidle_driver *driver = &davinci_idle_driver; | ||
| 109 | struct davinci_cpuidle_config *pdata = pdev->dev.platform_data; | 115 | struct davinci_cpuidle_config *pdata = pdev->dev.platform_data; |
| 110 | 116 | ||
| 111 | device = &per_cpu(davinci_cpuidle_device, smp_processor_id()); | 117 | device = &per_cpu(davinci_cpuidle_device, smp_processor_id()); |
| @@ -117,32 +123,33 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev) | |||
| 117 | 123 | ||
| 118 | ddr2_reg_base = pdata->ddr2_ctlr_base; | 124 | ddr2_reg_base = pdata->ddr2_ctlr_base; |
| 119 | 125 | ||
| 120 | ret = cpuidle_register_driver(&davinci_idle_driver); | ||
| 121 | if (ret) { | ||
| 122 | dev_err(&pdev->dev, "failed to register driver\n"); | ||
| 123 | return ret; | ||
| 124 | } | ||
| 125 | |||
| 126 | /* Wait for interrupt state */ | 126 | /* Wait for interrupt state */ |
| 127 | device->states[0].enter = davinci_enter_idle; | 127 | driver->states[0].enter = davinci_enter_idle; |
| 128 | device->states[0].exit_latency = 1; | 128 | driver->states[0].exit_latency = 1; |
| 129 | device->states[0].target_residency = 10000; | 129 | driver->states[0].target_residency = 10000; |
| 130 | device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; | 130 | driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID; |
| 131 | strcpy(device->states[0].name, "WFI"); | 131 | strcpy(driver->states[0].name, "WFI"); |
| 132 | strcpy(device->states[0].desc, "Wait for interrupt"); | 132 | strcpy(driver->states[0].desc, "Wait for interrupt"); |
| 133 | 133 | ||
| 134 | /* Wait for interrupt and DDR self refresh state */ | 134 | /* Wait for interrupt and DDR self refresh state */ |
| 135 | device->states[1].enter = davinci_enter_idle; | 135 | driver->states[1].enter = davinci_enter_idle; |
| 136 | device->states[1].exit_latency = 10; | 136 | driver->states[1].exit_latency = 10; |
| 137 | device->states[1].target_residency = 10000; | 137 | driver->states[1].target_residency = 10000; |
| 138 | device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; | 138 | driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID; |
| 139 | strcpy(device->states[1].name, "DDR SR"); | 139 | strcpy(driver->states[1].name, "DDR SR"); |
| 140 | strcpy(device->states[1].desc, "WFI and DDR Self Refresh"); | 140 | strcpy(driver->states[1].desc, "WFI and DDR Self Refresh"); |
| 141 | if (pdata->ddr2_pdown) | 141 | if (pdata->ddr2_pdown) |
| 142 | davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN; | 142 | davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN; |
| 143 | cpuidle_set_statedata(&device->states[1], &davinci_states[1]); | 143 | cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]); |
| 144 | 144 | ||
| 145 | device->state_count = DAVINCI_CPUIDLE_MAX_STATES; | 145 | device->state_count = DAVINCI_CPUIDLE_MAX_STATES; |
| 146 | driver->state_count = DAVINCI_CPUIDLE_MAX_STATES; | ||
| 147 | |||
| 148 | ret = cpuidle_register_driver(&davinci_idle_driver); | ||
| 149 | if (ret) { | ||
| 150 | dev_err(&pdev->dev, "failed to register driver\n"); | ||
| 151 | return ret; | ||
| 152 | } | ||
| 146 | 153 | ||
| 147 | ret = cpuidle_register_device(device); | 154 | ret = cpuidle_register_device(device); |
| 148 | if (ret) { | 155 | if (ret) { |
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c index bf7e96f2793a..35f6502144ae 100644 --- a/arch/arm/mach-exynos/cpuidle.c +++ b/arch/arm/mach-exynos/cpuidle.c | |||
| @@ -16,7 +16,8 @@ | |||
| 16 | #include <asm/proc-fns.h> | 16 | #include <asm/proc-fns.h> |
| 17 | 17 | ||
| 18 | static int exynos4_enter_idle(struct cpuidle_device *dev, | 18 | static int exynos4_enter_idle(struct cpuidle_device *dev, |
| 19 | struct cpuidle_state *state); | 19 | struct cpuidle_driver *drv, |
| 20 | int index); | ||
| 20 | 21 | ||
| 21 | static struct cpuidle_state exynos4_cpuidle_set[] = { | 22 | static struct cpuidle_state exynos4_cpuidle_set[] = { |
| 22 | [0] = { | 23 | [0] = { |
| @@ -37,7 +38,8 @@ static struct cpuidle_driver exynos4_idle_driver = { | |||
| 37 | }; | 38 | }; |
| 38 | 39 | ||
| 39 | static int exynos4_enter_idle(struct cpuidle_device *dev, | 40 | static int exynos4_enter_idle(struct cpuidle_device *dev, |
| 40 | struct cpuidle_state *state) | 41 | struct cpuidle_driver *drv, |
| 42 | int index) | ||
| 41 | { | 43 | { |
| 42 | struct timeval before, after; | 44 | struct timeval before, after; |
| 43 | int idle_time; | 45 | int idle_time; |
| @@ -52,29 +54,31 @@ static int exynos4_enter_idle(struct cpuidle_device *dev, | |||
| 52 | idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + | 54 | idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + |
| 53 | (after.tv_usec - before.tv_usec); | 55 | (after.tv_usec - before.tv_usec); |
| 54 | 56 | ||
| 55 | return idle_time; | 57 | dev->last_residency = idle_time; |
| 58 | return index; | ||
| 56 | } | 59 | } |
| 57 | 60 | ||
| 58 | static int __init exynos4_init_cpuidle(void) | 61 | static int __init exynos4_init_cpuidle(void) |
| 59 | { | 62 | { |
| 60 | int i, max_cpuidle_state, cpu_id; | 63 | int i, max_cpuidle_state, cpu_id; |
| 61 | struct cpuidle_device *device; | 64 | struct cpuidle_device *device; |
| 62 | 65 | struct cpuidle_driver *drv = &exynos4_idle_driver; | |
| 66 | |||
| 67 | /* Setup cpuidle driver */ | ||
| 68 | drv->state_count = (sizeof(exynos4_cpuidle_set) / | ||
| 69 | sizeof(struct cpuidle_state)); | ||
| 70 | max_cpuidle_state = drv->state_count; | ||
| 71 | for (i = 0; i < max_cpuidle_state; i++) { | ||
| 72 | memcpy(&drv->states[i], &exynos4_cpuidle_set[i], | ||
| 73 | sizeof(struct cpuidle_state)); | ||
| 74 | } | ||
| 63 | cpuidle_register_driver(&exynos4_idle_driver); | 75 | cpuidle_register_driver(&exynos4_idle_driver); |
| 64 | 76 | ||
| 65 | for_each_cpu(cpu_id, cpu_online_mask) { | 77 | for_each_cpu(cpu_id, cpu_online_mask) { |
| 66 | device = &per_cpu(exynos4_cpuidle_device, cpu_id); | 78 | device = &per_cpu(exynos4_cpuidle_device, cpu_id); |
| 67 | device->cpu = cpu_id; | 79 | device->cpu = cpu_id; |
| 68 | 80 | ||
| 69 | device->state_count = (sizeof(exynos4_cpuidle_set) / | 81 | device->state_count = drv->state_count; |
| 70 | sizeof(struct cpuidle_state)); | ||
| 71 | |||
| 72 | max_cpuidle_state = device->state_count; | ||
| 73 | |||
| 74 | for (i = 0; i < max_cpuidle_state; i++) { | ||
| 75 | memcpy(&device->states[i], &exynos4_cpuidle_set[i], | ||
| 76 | sizeof(struct cpuidle_state)); | ||
| 77 | } | ||
| 78 | 82 | ||
| 79 | if (cpuidle_register_device(device)) { | 83 | if (cpuidle_register_device(device)) { |
| 80 | printk(KERN_ERR "CPUidle register device failed\n,"); | 84 | printk(KERN_ERR "CPUidle register device failed\n,"); |
diff --git a/arch/arm/mach-kirkwood/cpuidle.c b/arch/arm/mach-kirkwood/cpuidle.c index 864e569f684e..7088180b018b 100644 --- a/arch/arm/mach-kirkwood/cpuidle.c +++ b/arch/arm/mach-kirkwood/cpuidle.c | |||
| @@ -33,17 +33,18 @@ static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device); | |||
| 33 | 33 | ||
| 34 | /* Actual code that puts the SoC in different idle states */ | 34 | /* Actual code that puts the SoC in different idle states */ |
| 35 | static int kirkwood_enter_idle(struct cpuidle_device *dev, | 35 | static int kirkwood_enter_idle(struct cpuidle_device *dev, |
| 36 | struct cpuidle_state *state) | 36 | struct cpuidle_driver *drv, |
| 37 | int index) | ||
| 37 | { | 38 | { |
| 38 | struct timeval before, after; | 39 | struct timeval before, after; |
| 39 | int idle_time; | 40 | int idle_time; |
| 40 | 41 | ||
| 41 | local_irq_disable(); | 42 | local_irq_disable(); |
| 42 | do_gettimeofday(&before); | 43 | do_gettimeofday(&before); |
| 43 | if (state == &dev->states[0]) | 44 | if (index == 0) |
| 44 | /* Wait for interrupt state */ | 45 | /* Wait for interrupt state */ |
| 45 | cpu_do_idle(); | 46 | cpu_do_idle(); |
| 46 | else if (state == &dev->states[1]) { | 47 | else if (index == 1) { |
| 47 | /* | 48 | /* |
| 48 | * Following write will put DDR in self refresh. | 49 | * Following write will put DDR in self refresh. |
| 49 | * Note that we have 256 cycles before DDR puts it | 50 | * Note that we have 256 cycles before DDR puts it |
| @@ -58,35 +59,40 @@ static int kirkwood_enter_idle(struct cpuidle_device *dev, | |||
| 58 | local_irq_enable(); | 59 | local_irq_enable(); |
| 59 | idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + | 60 | idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + |
| 60 | (after.tv_usec - before.tv_usec); | 61 | (after.tv_usec - before.tv_usec); |
| 61 | return idle_time; | 62 | |
| 63 | /* Update last residency */ | ||
| 64 | dev->last_residency = idle_time; | ||
| 65 | |||
| 66 | return index; | ||
| 62 | } | 67 | } |
| 63 | 68 | ||
| 64 | /* Initialize CPU idle by registering the idle states */ | 69 | /* Initialize CPU idle by registering the idle states */ |
| 65 | static int kirkwood_init_cpuidle(void) | 70 | static int kirkwood_init_cpuidle(void) |
| 66 | { | 71 | { |
| 67 | struct cpuidle_device *device; | 72 | struct cpuidle_device *device; |
| 68 | 73 | struct cpuidle_driver *driver = &kirkwood_idle_driver; | |
| 69 | cpuidle_register_driver(&kirkwood_idle_driver); | ||
| 70 | 74 | ||
| 71 | device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id()); | 75 | device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id()); |
| 72 | device->state_count = KIRKWOOD_MAX_STATES; | 76 | device->state_count = KIRKWOOD_MAX_STATES; |
| 77 | driver->state_count = KIRKWOOD_MAX_STATES; | ||
| 73 | 78 | ||
| 74 | /* Wait for interrupt state */ | 79 | /* Wait for interrupt state */ |
| 75 | device->states[0].enter = kirkwood_enter_idle; | 80 | driver->states[0].enter = kirkwood_enter_idle; |
| 76 | device->states[0].exit_latency = 1; | 81 | driver->states[0].exit_latency = 1; |
| 77 | device->states[0].target_residency = 10000; | 82 | driver->states[0].target_residency = 10000; |
| 78 | device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; | 83 | driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID; |
| 79 | strcpy(device->states[0].name, "WFI"); | 84 | strcpy(driver->states[0].name, "WFI"); |
| 80 | strcpy(device->states[0].desc, "Wait for interrupt"); | 85 | strcpy(driver->states[0].desc, "Wait for interrupt"); |
| 81 | 86 | ||
| 82 | /* Wait for interrupt and DDR self refresh state */ | 87 | /* Wait for interrupt and DDR self refresh state */ |
| 83 | device->states[1].enter = kirkwood_enter_idle; | 88 | driver->states[1].enter = kirkwood_enter_idle; |
| 84 | device->states[1].exit_latency = 10; | 89 | driver->states[1].exit_latency = 10; |
| 85 | device->states[1].target_residency = 10000; | 90 | driver->states[1].target_residency = 10000; |
| 86 | device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; | 91 | driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID; |
| 87 | strcpy(device->states[1].name, "DDR SR"); | 92 | strcpy(driver->states[1].name, "DDR SR"); |
| 88 | strcpy(device->states[1].desc, "WFI and DDR Self Refresh"); | 93 | strcpy(driver->states[1].desc, "WFI and DDR Self Refresh"); |
| 89 | 94 | ||
| 95 | cpuidle_register_driver(&kirkwood_idle_driver); | ||
| 90 | if (cpuidle_register_device(device)) { | 96 | if (cpuidle_register_device(device)) { |
| 91 | printk(KERN_ERR "kirkwood_init_cpuidle: Failed registering\n"); | 97 | printk(KERN_ERR "kirkwood_init_cpuidle: Failed registering\n"); |
| 92 | return -EIO; | 98 | return -EIO; |
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c index 4bf6e6e8b100..1fe35c24fba2 100644 --- a/arch/arm/mach-omap2/cpuidle34xx.c +++ b/arch/arm/mach-omap2/cpuidle34xx.c | |||
| @@ -88,17 +88,21 @@ static int _cpuidle_deny_idle(struct powerdomain *pwrdm, | |||
| 88 | /** | 88 | /** |
| 89 | * omap3_enter_idle - Programs OMAP3 to enter the specified state | 89 | * omap3_enter_idle - Programs OMAP3 to enter the specified state |
| 90 | * @dev: cpuidle device | 90 | * @dev: cpuidle device |
| 91 | * @state: The target state to be programmed | 91 | * @drv: cpuidle driver |
| 92 | * @index: the index of state to be entered | ||
| 92 | * | 93 | * |
| 93 | * Called from the CPUidle framework to program the device to the | 94 | * Called from the CPUidle framework to program the device to the |
| 94 | * specified target state selected by the governor. | 95 | * specified target state selected by the governor. |
| 95 | */ | 96 | */ |
| 96 | static int omap3_enter_idle(struct cpuidle_device *dev, | 97 | static int omap3_enter_idle(struct cpuidle_device *dev, |
| 97 | struct cpuidle_state *state) | 98 | struct cpuidle_driver *drv, |
| 99 | int index) | ||
| 98 | { | 100 | { |
| 99 | struct omap3_idle_statedata *cx = cpuidle_get_statedata(state); | 101 | struct omap3_idle_statedata *cx = |
| 102 | cpuidle_get_statedata(&dev->states_usage[index]); | ||
| 100 | struct timespec ts_preidle, ts_postidle, ts_idle; | 103 | struct timespec ts_preidle, ts_postidle, ts_idle; |
| 101 | u32 mpu_state = cx->mpu_state, core_state = cx->core_state; | 104 | u32 mpu_state = cx->mpu_state, core_state = cx->core_state; |
| 105 | int idle_time; | ||
| 102 | 106 | ||
| 103 | /* Used to keep track of the total time in idle */ | 107 | /* Used to keep track of the total time in idle */ |
| 104 | getnstimeofday(&ts_preidle); | 108 | getnstimeofday(&ts_preidle); |
| @@ -113,7 +117,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev, | |||
| 113 | goto return_sleep_time; | 117 | goto return_sleep_time; |
| 114 | 118 | ||
| 115 | /* Deny idle for C1 */ | 119 | /* Deny idle for C1 */ |
| 116 | if (state == &dev->states[0]) { | 120 | if (index == 0) { |
| 117 | pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); | 121 | pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); |
| 118 | pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); | 122 | pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); |
| 119 | } | 123 | } |
| @@ -122,7 +126,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev, | |||
| 122 | omap_sram_idle(); | 126 | omap_sram_idle(); |
| 123 | 127 | ||
| 124 | /* Re-allow idle for C1 */ | 128 | /* Re-allow idle for C1 */ |
| 125 | if (state == &dev->states[0]) { | 129 | if (index == 0) { |
| 126 | pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); | 130 | pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); |
| 127 | pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); | 131 | pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); |
| 128 | } | 132 | } |
| @@ -134,28 +138,38 @@ return_sleep_time: | |||
| 134 | local_irq_enable(); | 138 | local_irq_enable(); |
| 135 | local_fiq_enable(); | 139 | local_fiq_enable(); |
| 136 | 140 | ||
| 137 | return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC; | 141 | idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \ |
| 142 | USEC_PER_SEC; | ||
| 143 | |||
| 144 | /* Update cpuidle counters */ | ||
| 145 | dev->last_residency = idle_time; | ||
| 146 | |||
| 147 | return index; | ||
| 138 | } | 148 | } |
| 139 | 149 | ||
| 140 | /** | 150 | /** |
| 141 | * next_valid_state - Find next valid C-state | 151 | * next_valid_state - Find next valid C-state |
| 142 | * @dev: cpuidle device | 152 | * @dev: cpuidle device |
| 143 | * @state: Currently selected C-state | 153 | * @drv: cpuidle driver |
| 154 | * @index: Index of currently selected c-state | ||
| 144 | * | 155 | * |
| 145 | * If the current state is valid, it is returned back to the caller. | 156 | * If the state corresponding to index is valid, index is returned back |
| 146 | * Else, this function searches for a lower c-state which is still | 157 | * to the caller. Else, this function searches for a lower c-state which is |
| 147 | * valid. | 158 | * still valid (as defined in omap3_power_states[]) and returns its index. |
| 148 | * | 159 | * |
| 149 | * A state is valid if the 'valid' field is enabled and | 160 | * A state is valid if the 'valid' field is enabled and |
| 150 | * if it satisfies the enable_off_mode condition. | 161 | * if it satisfies the enable_off_mode condition. |
| 151 | */ | 162 | */ |
| 152 | static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev, | 163 | static int next_valid_state(struct cpuidle_device *dev, |
| 153 | struct cpuidle_state *curr) | 164 | struct cpuidle_driver *drv, |
| 165 | int index) | ||
| 154 | { | 166 | { |
| 155 | struct cpuidle_state *next = NULL; | 167 | struct cpuidle_state_usage *curr_usage = &dev->states_usage[index]; |
| 156 | struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr); | 168 | struct cpuidle_state *curr = &drv->states[index]; |
| 169 | struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr_usage); | ||
| 157 | u32 mpu_deepest_state = PWRDM_POWER_RET; | 170 | u32 mpu_deepest_state = PWRDM_POWER_RET; |
| 158 | u32 core_deepest_state = PWRDM_POWER_RET; | 171 | u32 core_deepest_state = PWRDM_POWER_RET; |
| 172 | int next_index = -1; | ||
| 159 | 173 | ||
| 160 | if (enable_off_mode) { | 174 | if (enable_off_mode) { |
| 161 | mpu_deepest_state = PWRDM_POWER_OFF; | 175 | mpu_deepest_state = PWRDM_POWER_OFF; |
| @@ -172,20 +186,20 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev, | |||
| 172 | if ((cx->valid) && | 186 | if ((cx->valid) && |
| 173 | (cx->mpu_state >= mpu_deepest_state) && | 187 | (cx->mpu_state >= mpu_deepest_state) && |
| 174 | (cx->core_state >= core_deepest_state)) { | 188 | (cx->core_state >= core_deepest_state)) { |
| 175 | return curr; | 189 | return index; |
| 176 | } else { | 190 | } else { |
| 177 | int idx = OMAP3_NUM_STATES - 1; | 191 | int idx = OMAP3_NUM_STATES - 1; |
| 178 | 192 | ||
| 179 | /* Reach the current state starting at highest C-state */ | 193 | /* Reach the current state starting at highest C-state */ |
| 180 | for (; idx >= 0; idx--) { | 194 | for (; idx >= 0; idx--) { |
| 181 | if (&dev->states[idx] == curr) { | 195 | if (&drv->states[idx] == curr) { |
| 182 | next = &dev->states[idx]; | 196 | next_index = idx; |
| 183 | break; | 197 | break; |
| 184 | } | 198 | } |
| 185 | } | 199 | } |
| 186 | 200 | ||
| 187 | /* Should never hit this condition */ | 201 | /* Should never hit this condition */ |
| 188 | WARN_ON(next == NULL); | 202 | WARN_ON(next_index == -1); |
| 189 | 203 | ||
| 190 | /* | 204 | /* |
| 191 | * Drop to next valid state. | 205 | * Drop to next valid state. |
| @@ -193,41 +207,44 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev, | |||
| 193 | */ | 207 | */ |
| 194 | idx--; | 208 | idx--; |
| 195 | for (; idx >= 0; idx--) { | 209 | for (; idx >= 0; idx--) { |
| 196 | cx = cpuidle_get_statedata(&dev->states[idx]); | 210 | cx = cpuidle_get_statedata(&dev->states_usage[idx]); |
| 197 | if ((cx->valid) && | 211 | if ((cx->valid) && |
| 198 | (cx->mpu_state >= mpu_deepest_state) && | 212 | (cx->mpu_state >= mpu_deepest_state) && |
| 199 | (cx->core_state >= core_deepest_state)) { | 213 | (cx->core_state >= core_deepest_state)) { |
| 200 | next = &dev->states[idx]; | 214 | next_index = idx; |
| 201 | break; | 215 | break; |
| 202 | } | 216 | } |
| 203 | } | 217 | } |
| 204 | /* | 218 | /* |
| 205 | * C1 is always valid. | 219 | * C1 is always valid. |
| 206 | * So, no need to check for 'next==NULL' outside this loop. | 220 | * So, no need to check for 'next_index == -1' outside |
| 221 | * this loop. | ||
| 207 | */ | 222 | */ |
| 208 | } | 223 | } |
| 209 | 224 | ||
| 210 | return next; | 225 | return next_index; |
| 211 | } | 226 | } |
| 212 | 227 | ||
| 213 | /** | 228 | /** |
| 214 | * omap3_enter_idle_bm - Checks for any bus activity | 229 | * omap3_enter_idle_bm - Checks for any bus activity |
| 215 | * @dev: cpuidle device | 230 | * @dev: cpuidle device |
| 216 | * @state: The target state to be programmed | 231 | * @drv: cpuidle driver |
| 232 | * @index: array index of target state to be programmed | ||
| 217 | * | 233 | * |
| 218 | * This function checks for any pending activity and then programs | 234 | * This function checks for any pending activity and then programs |
| 219 | * the device to the specified or a safer state. | 235 | * the device to the specified or a safer state. |
| 220 | */ | 236 | */ |
| 221 | static int omap3_enter_idle_bm(struct cpuidle_device *dev, | 237 | static int omap3_enter_idle_bm(struct cpuidle_device *dev, |
| 222 | struct cpuidle_state *state) | 238 | struct cpuidle_driver *drv, |
| 239 | int index) | ||
| 223 | { | 240 | { |
| 224 | struct cpuidle_state *new_state; | 241 | int new_state_idx; |
| 225 | u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state; | 242 | u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state; |
| 226 | struct omap3_idle_statedata *cx; | 243 | struct omap3_idle_statedata *cx; |
| 227 | int ret; | 244 | int ret; |
| 228 | 245 | ||
| 229 | if (!omap3_can_sleep()) { | 246 | if (!omap3_can_sleep()) { |
| 230 | new_state = dev->safe_state; | 247 | new_state_idx = drv->safe_state_index; |
| 231 | goto select_state; | 248 | goto select_state; |
| 232 | } | 249 | } |
| 233 | 250 | ||
| @@ -237,7 +254,7 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev, | |||
| 237 | */ | 254 | */ |
| 238 | cam_state = pwrdm_read_pwrst(cam_pd); | 255 | cam_state = pwrdm_read_pwrst(cam_pd); |
| 239 | if (cam_state == PWRDM_POWER_ON) { | 256 | if (cam_state == PWRDM_POWER_ON) { |
| 240 | new_state = dev->safe_state; | 257 | new_state_idx = drv->safe_state_index; |
| 241 | goto select_state; | 258 | goto select_state; |
| 242 | } | 259 | } |
| 243 | 260 | ||
| @@ -253,7 +270,7 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev, | |||
| 253 | * Prevent PER off if CORE is not in retention or off as this | 270 | * Prevent PER off if CORE is not in retention or off as this |
| 254 | * would disable PER wakeups completely. | 271 | * would disable PER wakeups completely. |
| 255 | */ | 272 | */ |
| 256 | cx = cpuidle_get_statedata(state); | 273 | cx = cpuidle_get_statedata(&dev->states_usage[index]); |
| 257 | core_next_state = cx->core_state; | 274 | core_next_state = cx->core_state; |
| 258 | per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd); | 275 | per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd); |
| 259 | if ((per_next_state == PWRDM_POWER_OFF) && | 276 | if ((per_next_state == PWRDM_POWER_OFF) && |
| @@ -264,11 +281,10 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev, | |||
| 264 | if (per_next_state != per_saved_state) | 281 | if (per_next_state != per_saved_state) |
| 265 | pwrdm_set_next_pwrst(per_pd, per_next_state); | 282 | pwrdm_set_next_pwrst(per_pd, per_next_state); |
| 266 | 283 | ||
| 267 | new_state = next_valid_state(dev, state); | 284 | new_state_idx = next_valid_state(dev, drv, index); |
| 268 | 285 | ||
| 269 | select_state: | 286 | select_state: |
| 270 | dev->last_state = new_state; | 287 | ret = omap3_enter_idle(dev, drv, new_state_idx); |
| 271 | ret = omap3_enter_idle(dev, new_state); | ||
| 272 | 288 | ||
| 273 | /* Restore original PER state if it was modified */ | 289 | /* Restore original PER state if it was modified */ |
| 274 | if (per_next_state != per_saved_state) | 290 | if (per_next_state != per_saved_state) |
| @@ -301,22 +317,31 @@ struct cpuidle_driver omap3_idle_driver = { | |||
| 301 | .owner = THIS_MODULE, | 317 | .owner = THIS_MODULE, |
| 302 | }; | 318 | }; |
| 303 | 319 | ||
| 304 | /* Helper to fill the C-state common data and register the driver_data */ | 320 | /* Helper to fill the C-state common data*/ |
| 305 | static inline struct omap3_idle_statedata *_fill_cstate( | 321 | static inline void _fill_cstate(struct cpuidle_driver *drv, |
| 306 | struct cpuidle_device *dev, | ||
| 307 | int idx, const char *descr) | 322 | int idx, const char *descr) |
| 308 | { | 323 | { |
| 309 | struct omap3_idle_statedata *cx = &omap3_idle_data[idx]; | 324 | struct cpuidle_state *state = &drv->states[idx]; |
| 310 | struct cpuidle_state *state = &dev->states[idx]; | ||
| 311 | 325 | ||
| 312 | state->exit_latency = cpuidle_params_table[idx].exit_latency; | 326 | state->exit_latency = cpuidle_params_table[idx].exit_latency; |
| 313 | state->target_residency = cpuidle_params_table[idx].target_residency; | 327 | state->target_residency = cpuidle_params_table[idx].target_residency; |
| 314 | state->flags = CPUIDLE_FLAG_TIME_VALID; | 328 | state->flags = CPUIDLE_FLAG_TIME_VALID; |
| 315 | state->enter = omap3_enter_idle_bm; | 329 | state->enter = omap3_enter_idle_bm; |
| 316 | cx->valid = cpuidle_params_table[idx].valid; | ||
| 317 | sprintf(state->name, "C%d", idx + 1); | 330 | sprintf(state->name, "C%d", idx + 1); |
| 318 | strncpy(state->desc, descr, CPUIDLE_DESC_LEN); | 331 | strncpy(state->desc, descr, CPUIDLE_DESC_LEN); |
| 319 | cpuidle_set_statedata(state, cx); | 332 | |
| 333 | } | ||
| 334 | |||
| 335 | /* Helper to register the driver_data */ | ||
| 336 | static inline struct omap3_idle_statedata *_fill_cstate_usage( | ||
| 337 | struct cpuidle_device *dev, | ||
| 338 | int idx) | ||
| 339 | { | ||
| 340 | struct omap3_idle_statedata *cx = &omap3_idle_data[idx]; | ||
| 341 | struct cpuidle_state_usage *state_usage = &dev->states_usage[idx]; | ||
| 342 | |||
| 343 | cx->valid = cpuidle_params_table[idx].valid; | ||
| 344 | cpuidle_set_statedata(state_usage, cx); | ||
| 320 | 345 | ||
| 321 | return cx; | 346 | return cx; |
| 322 | } | 347 | } |
| @@ -330,6 +355,7 @@ static inline struct omap3_idle_statedata *_fill_cstate( | |||
| 330 | int __init omap3_idle_init(void) | 355 | int __init omap3_idle_init(void) |
| 331 | { | 356 | { |
| 332 | struct cpuidle_device *dev; | 357 | struct cpuidle_device *dev; |
| 358 | struct cpuidle_driver *drv = &omap3_idle_driver; | ||
| 333 | struct omap3_idle_statedata *cx; | 359 | struct omap3_idle_statedata *cx; |
| 334 | 360 | ||
| 335 | mpu_pd = pwrdm_lookup("mpu_pwrdm"); | 361 | mpu_pd = pwrdm_lookup("mpu_pwrdm"); |
| @@ -337,44 +363,52 @@ int __init omap3_idle_init(void) | |||
| 337 | per_pd = pwrdm_lookup("per_pwrdm"); | 363 | per_pd = pwrdm_lookup("per_pwrdm"); |
| 338 | cam_pd = pwrdm_lookup("cam_pwrdm"); | 364 | cam_pd = pwrdm_lookup("cam_pwrdm"); |
| 339 | 365 | ||
| 340 | cpuidle_register_driver(&omap3_idle_driver); | 366 | |
| 367 | drv->safe_state_index = -1; | ||
| 341 | dev = &per_cpu(omap3_idle_dev, smp_processor_id()); | 368 | dev = &per_cpu(omap3_idle_dev, smp_processor_id()); |
| 342 | 369 | ||
| 343 | /* C1 . MPU WFI + Core active */ | 370 | /* C1 . MPU WFI + Core active */ |
| 344 | cx = _fill_cstate(dev, 0, "MPU ON + CORE ON"); | 371 | _fill_cstate(drv, 0, "MPU ON + CORE ON"); |
| 345 | (&dev->states[0])->enter = omap3_enter_idle; | 372 | (&drv->states[0])->enter = omap3_enter_idle; |
| 346 | dev->safe_state = &dev->states[0]; | 373 | drv->safe_state_index = 0; |
| 374 | cx = _fill_cstate_usage(dev, 0); | ||
| 347 | cx->valid = 1; /* C1 is always valid */ | 375 | cx->valid = 1; /* C1 is always valid */ |
| 348 | cx->mpu_state = PWRDM_POWER_ON; | 376 | cx->mpu_state = PWRDM_POWER_ON; |
| 349 | cx->core_state = PWRDM_POWER_ON; | 377 | cx->core_state = PWRDM_POWER_ON; |
| 350 | 378 | ||
| 351 | /* C2 . MPU WFI + Core inactive */ | 379 | /* C2 . MPU WFI + Core inactive */ |
| 352 | cx = _fill_cstate(dev, 1, "MPU ON + CORE ON"); | 380 | _fill_cstate(drv, 1, "MPU ON + CORE ON"); |
| 381 | cx = _fill_cstate_usage(dev, 1); | ||
| 353 | cx->mpu_state = PWRDM_POWER_ON; | 382 | cx->mpu_state = PWRDM_POWER_ON; |
| 354 | cx->core_state = PWRDM_POWER_ON; | 383 | cx->core_state = PWRDM_POWER_ON; |
| 355 | 384 | ||
| 356 | /* C3 . MPU CSWR + Core inactive */ | 385 | /* C3 . MPU CSWR + Core inactive */ |
| 357 | cx = _fill_cstate(dev, 2, "MPU RET + CORE ON"); | 386 | _fill_cstate(drv, 2, "MPU RET + CORE ON"); |
| 387 | cx = _fill_cstate_usage(dev, 2); | ||
| 358 | cx->mpu_state = PWRDM_POWER_RET; | 388 | cx->mpu_state = PWRDM_POWER_RET; |
| 359 | cx->core_state = PWRDM_POWER_ON; | 389 | cx->core_state = PWRDM_POWER_ON; |
| 360 | 390 | ||
| 361 | /* C4 . MPU OFF + Core inactive */ | 391 | /* C4 . MPU OFF + Core inactive */ |
| 362 | cx = _fill_cstate(dev, 3, "MPU OFF + CORE ON"); | 392 | _fill_cstate(drv, 3, "MPU OFF + CORE ON"); |
| 393 | cx = _fill_cstate_usage(dev, 3); | ||
| 363 | cx->mpu_state = PWRDM_POWER_OFF; | 394 | cx->mpu_state = PWRDM_POWER_OFF; |
| 364 | cx->core_state = PWRDM_POWER_ON; | 395 | cx->core_state = PWRDM_POWER_ON; |
| 365 | 396 | ||
| 366 | /* C5 . MPU RET + Core RET */ | 397 | /* C5 . MPU RET + Core RET */ |
| 367 | cx = _fill_cstate(dev, 4, "MPU RET + CORE RET"); | 398 | _fill_cstate(drv, 4, "MPU RET + CORE RET"); |
| 399 | cx = _fill_cstate_usage(dev, 4); | ||
| 368 | cx->mpu_state = PWRDM_POWER_RET; | 400 | cx->mpu_state = PWRDM_POWER_RET; |
| 369 | cx->core_state = PWRDM_POWER_RET; | 401 | cx->core_state = PWRDM_POWER_RET; |
| 370 | 402 | ||
| 371 | /* C6 . MPU OFF + Core RET */ | 403 | /* C6 . MPU OFF + Core RET */ |
| 372 | cx = _fill_cstate(dev, 5, "MPU OFF + CORE RET"); | 404 | _fill_cstate(drv, 5, "MPU OFF + CORE RET"); |
| 405 | cx = _fill_cstate_usage(dev, 5); | ||
| 373 | cx->mpu_state = PWRDM_POWER_OFF; | 406 | cx->mpu_state = PWRDM_POWER_OFF; |
| 374 | cx->core_state = PWRDM_POWER_RET; | 407 | cx->core_state = PWRDM_POWER_RET; |
| 375 | 408 | ||
| 376 | /* C7 . MPU OFF + Core OFF */ | 409 | /* C7 . MPU OFF + Core OFF */ |
| 377 | cx = _fill_cstate(dev, 6, "MPU OFF + CORE OFF"); | 410 | _fill_cstate(drv, 6, "MPU OFF + CORE OFF"); |
| 411 | cx = _fill_cstate_usage(dev, 6); | ||
| 378 | /* | 412 | /* |
| 379 | * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot | 413 | * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot |
| 380 | * enable OFF mode in a stable form for previous revisions. | 414 | * enable OFF mode in a stable form for previous revisions. |
| @@ -388,6 +422,9 @@ int __init omap3_idle_init(void) | |||
| 388 | cx->mpu_state = PWRDM_POWER_OFF; | 422 | cx->mpu_state = PWRDM_POWER_OFF; |
| 389 | cx->core_state = PWRDM_POWER_OFF; | 423 | cx->core_state = PWRDM_POWER_OFF; |
| 390 | 424 | ||
| 425 | drv->state_count = OMAP3_NUM_STATES; | ||
| 426 | cpuidle_register_driver(&omap3_idle_driver); | ||
| 427 | |||
| 391 | dev->state_count = OMAP3_NUM_STATES; | 428 | dev->state_count = OMAP3_NUM_STATES; |
| 392 | if (cpuidle_register_device(dev)) { | 429 | if (cpuidle_register_device(dev)) { |
| 393 | printk(KERN_ERR "%s: CPUidle register device failed\n", | 430 | printk(KERN_ERR "%s: CPUidle register device failed\n", |
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c index 7d98f909a8ac..1cc257c9b1e3 100644 --- a/arch/sh/kernel/cpu/shmobile/cpuidle.c +++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c | |||
| @@ -26,11 +26,12 @@ static unsigned long cpuidle_mode[] = { | |||
| 26 | }; | 26 | }; |
| 27 | 27 | ||
| 28 | static int cpuidle_sleep_enter(struct cpuidle_device *dev, | 28 | static int cpuidle_sleep_enter(struct cpuidle_device *dev, |
| 29 | struct cpuidle_state *state) | 29 | struct cpuidle_driver *drv, |
| 30 | int index) | ||
| 30 | { | 31 | { |
| 31 | unsigned long allowed_mode = arch_hwblk_sleep_mode(); | 32 | unsigned long allowed_mode = arch_hwblk_sleep_mode(); |
| 32 | ktime_t before, after; | 33 | ktime_t before, after; |
| 33 | int requested_state = state - &dev->states[0]; | 34 | int requested_state = index; |
| 34 | int allowed_state; | 35 | int allowed_state; |
| 35 | int k; | 36 | int k; |
| 36 | 37 | ||
| @@ -47,11 +48,13 @@ static int cpuidle_sleep_enter(struct cpuidle_device *dev, | |||
| 47 | */ | 48 | */ |
| 48 | k = min_t(int, allowed_state, requested_state); | 49 | k = min_t(int, allowed_state, requested_state); |
| 49 | 50 | ||
| 50 | dev->last_state = &dev->states[k]; | ||
| 51 | before = ktime_get(); | 51 | before = ktime_get(); |
| 52 | sh_mobile_call_standby(cpuidle_mode[k]); | 52 | sh_mobile_call_standby(cpuidle_mode[k]); |
| 53 | after = ktime_get(); | 53 | after = ktime_get(); |
| 54 | return ktime_to_ns(ktime_sub(after, before)) >> 10; | 54 | |
| 55 | dev->last_residency = (int)ktime_to_ns(ktime_sub(after, before)) >> 10; | ||
| 56 | |||
| 57 | return k; | ||
| 55 | } | 58 | } |
| 56 | 59 | ||
| 57 | static struct cpuidle_device cpuidle_dev; | 60 | static struct cpuidle_device cpuidle_dev; |
| @@ -63,19 +66,19 @@ static struct cpuidle_driver cpuidle_driver = { | |||
| 63 | void sh_mobile_setup_cpuidle(void) | 66 | void sh_mobile_setup_cpuidle(void) |
| 64 | { | 67 | { |
| 65 | struct cpuidle_device *dev = &cpuidle_dev; | 68 | struct cpuidle_device *dev = &cpuidle_dev; |
| 69 | struct cpuidle_driver *drv = &cpuidle_driver; | ||
| 66 | struct cpuidle_state *state; | 70 | struct cpuidle_state *state; |
| 67 | int i; | 71 | int i; |
| 68 | 72 | ||
| 69 | cpuidle_register_driver(&cpuidle_driver); | ||
| 70 | 73 | ||
| 71 | for (i = 0; i < CPUIDLE_STATE_MAX; i++) { | 74 | for (i = 0; i < CPUIDLE_STATE_MAX; i++) { |
| 72 | dev->states[i].name[0] = '\0'; | 75 | drv->states[i].name[0] = '\0'; |
| 73 | dev->states[i].desc[0] = '\0'; | 76 | drv->states[i].desc[0] = '\0'; |
| 74 | } | 77 | } |
| 75 | 78 | ||
| 76 | i = CPUIDLE_DRIVER_STATE_START; | 79 | i = CPUIDLE_DRIVER_STATE_START; |
| 77 | 80 | ||
| 78 | state = &dev->states[i++]; | 81 | state = &drv->states[i++]; |
| 79 | snprintf(state->name, CPUIDLE_NAME_LEN, "C1"); | 82 | snprintf(state->name, CPUIDLE_NAME_LEN, "C1"); |
| 80 | strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN); | 83 | strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN); |
| 81 | state->exit_latency = 1; | 84 | state->exit_latency = 1; |
| @@ -85,10 +88,10 @@ void sh_mobile_setup_cpuidle(void) | |||
| 85 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | 88 | state->flags |= CPUIDLE_FLAG_TIME_VALID; |
| 86 | state->enter = cpuidle_sleep_enter; | 89 | state->enter = cpuidle_sleep_enter; |
| 87 | 90 | ||
| 88 | dev->safe_state = state; | 91 | drv->safe_state_index = i-1; |
| 89 | 92 | ||
| 90 | if (sh_mobile_sleep_supported & SUSP_SH_SF) { | 93 | if (sh_mobile_sleep_supported & SUSP_SH_SF) { |
| 91 | state = &dev->states[i++]; | 94 | state = &drv->states[i++]; |
| 92 | snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); | 95 | snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); |
| 93 | strncpy(state->desc, "SuperH Sleep Mode [SF]", | 96 | strncpy(state->desc, "SuperH Sleep Mode [SF]", |
| 94 | CPUIDLE_DESC_LEN); | 97 | CPUIDLE_DESC_LEN); |
| @@ -101,7 +104,7 @@ void sh_mobile_setup_cpuidle(void) | |||
| 101 | } | 104 | } |
| 102 | 105 | ||
| 103 | if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) { | 106 | if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) { |
| 104 | state = &dev->states[i++]; | 107 | state = &drv->states[i++]; |
| 105 | snprintf(state->name, CPUIDLE_NAME_LEN, "C3"); | 108 | snprintf(state->name, CPUIDLE_NAME_LEN, "C3"); |
| 106 | strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", | 109 | strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", |
| 107 | CPUIDLE_DESC_LEN); | 110 | CPUIDLE_DESC_LEN); |
| @@ -113,7 +116,10 @@ void sh_mobile_setup_cpuidle(void) | |||
| 113 | state->enter = cpuidle_sleep_enter; | 116 | state->enter = cpuidle_sleep_enter; |
| 114 | } | 117 | } |
| 115 | 118 | ||
| 119 | drv->state_count = i; | ||
| 116 | dev->state_count = i; | 120 | dev->state_count = i; |
| 117 | 121 | ||
| 122 | cpuidle_register_driver(&cpuidle_driver); | ||
| 123 | |||
| 118 | cpuidle_register_device(dev); | 124 | cpuidle_register_device(dev); |
| 119 | } | 125 | } |
diff --git a/arch/x86/platform/mrst/pmu.c b/arch/x86/platform/mrst/pmu.c index 9281da7d91bd..c0ac06da57ac 100644 --- a/arch/x86/platform/mrst/pmu.c +++ b/arch/x86/platform/mrst/pmu.c | |||
| @@ -70,7 +70,7 @@ static struct mrst_device mrst_devs[] = { | |||
| 70 | /* 24 */ { 0x4110, 0 }, /* Lincroft */ | 70 | /* 24 */ { 0x4110, 0 }, /* Lincroft */ |
| 71 | }; | 71 | }; |
| 72 | 72 | ||
| 73 | /* n.b. We ignore PCI-id 0x815 in LSS9 b/c MeeGo has no driver for it */ | 73 | /* n.b. We ignore PCI-id 0x815 in LSS9 b/c Linux has no driver for it */ |
| 74 | static u16 mrst_lss9_pci_ids[] = {0x080a, 0x0814, 0}; | 74 | static u16 mrst_lss9_pci_ids[] = {0x080a, 0x0814, 0}; |
| 75 | static u16 mrst_lss10_pci_ids[] = {0x0800, 0x0801, 0x0802, 0x0803, | 75 | static u16 mrst_lss10_pci_ids[] = {0x0800, 0x0801, 0x0802, 0x0803, |
| 76 | 0x0804, 0x0805, 0x080f, 0}; | 76 | 0x0804, 0x0805, 0x080f, 0}; |
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index 55accb7018bb..cc70f3fdcdd1 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c | |||
| @@ -269,16 +269,17 @@ acpi_status acpi_hw_clear_acpi_status(void) | |||
| 269 | 269 | ||
| 270 | status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, | 270 | status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, |
| 271 | ACPI_BITMASK_ALL_FIXED_STATUS); | 271 | ACPI_BITMASK_ALL_FIXED_STATUS); |
| 272 | if (ACPI_FAILURE(status)) { | 272 | |
| 273 | goto unlock_and_exit; | 273 | acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); |
| 274 | } | 274 | |
| 275 | if (ACPI_FAILURE(status)) | ||
| 276 | goto exit; | ||
| 275 | 277 | ||
| 276 | /* Clear the GPE Bits in all GPE registers in all GPE blocks */ | 278 | /* Clear the GPE Bits in all GPE registers in all GPE blocks */ |
| 277 | 279 | ||
| 278 | status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL); | 280 | status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL); |
| 279 | 281 | ||
| 280 | unlock_and_exit: | 282 | exit: |
| 281 | acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); | ||
| 282 | return_ACPI_STATUS(status); | 283 | return_ACPI_STATUS(status); |
| 283 | } | 284 | } |
| 284 | 285 | ||
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c index 04ae1c88c03c..cfc0cc10af39 100644 --- a/drivers/acpi/atomicio.c +++ b/drivers/acpi/atomicio.c | |||
| @@ -76,7 +76,7 @@ static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr, | |||
| 76 | { | 76 | { |
| 77 | struct acpi_iomap *map; | 77 | struct acpi_iomap *map; |
| 78 | 78 | ||
| 79 | map = __acpi_find_iomap(paddr, size); | 79 | map = __acpi_find_iomap(paddr, size/8); |
| 80 | if (map) | 80 | if (map) |
| 81 | return map->vaddr + (paddr - map->paddr); | 81 | return map->vaddr + (paddr - map->paddr); |
| 82 | else | 82 | else |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 437ddbf0c49a..9ecec98bc76e 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
| @@ -911,10 +911,7 @@ void __init acpi_early_init(void) | |||
| 911 | } | 911 | } |
| 912 | #endif | 912 | #endif |
| 913 | 913 | ||
| 914 | status = | 914 | status = acpi_enable_subsystem(~ACPI_NO_ACPI_ENABLE); |
| 915 | acpi_enable_subsystem(~ | ||
| 916 | (ACPI_NO_HARDWARE_INIT | | ||
| 917 | ACPI_NO_ACPI_ENABLE)); | ||
| 918 | if (ACPI_FAILURE(status)) { | 915 | if (ACPI_FAILURE(status)) { |
| 919 | printk(KERN_ERR PREFIX "Unable to enable ACPI\n"); | 916 | printk(KERN_ERR PREFIX "Unable to enable ACPI\n"); |
| 920 | goto error0; | 917 | goto error0; |
| @@ -935,8 +932,7 @@ static int __init acpi_bus_init(void) | |||
| 935 | 932 | ||
| 936 | acpi_os_initialize1(); | 933 | acpi_os_initialize1(); |
| 937 | 934 | ||
| 938 | status = | 935 | status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE); |
| 939 | acpi_enable_subsystem(ACPI_NO_HARDWARE_INIT | ACPI_NO_ACPI_ENABLE); | ||
| 940 | if (ACPI_FAILURE(status)) { | 936 | if (ACPI_FAILURE(status)) { |
| 941 | printk(KERN_ERR PREFIX | 937 | printk(KERN_ERR PREFIX |
| 942 | "Unable to start the ACPI Interpreter\n"); | 938 | "Unable to start the ACPI Interpreter\n"); |
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index a4e0f1ba6040..9d7bc9f6b6cc 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c | |||
| @@ -426,7 +426,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, | |||
| 426 | 426 | ||
| 427 | if (action == CPU_ONLINE && pr) { | 427 | if (action == CPU_ONLINE && pr) { |
| 428 | acpi_processor_ppc_has_changed(pr, 0); | 428 | acpi_processor_ppc_has_changed(pr, 0); |
| 429 | acpi_processor_cst_has_changed(pr); | 429 | acpi_processor_hotplug(pr); |
| 430 | acpi_processor_reevaluate_tstate(pr, action); | 430 | acpi_processor_reevaluate_tstate(pr, action); |
| 431 | acpi_processor_tstate_has_changed(pr); | 431 | acpi_processor_tstate_has_changed(pr); |
| 432 | } | 432 | } |
| @@ -503,8 +503,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) | |||
| 503 | acpi_processor_get_throttling_info(pr); | 503 | acpi_processor_get_throttling_info(pr); |
| 504 | acpi_processor_get_limit_info(pr); | 504 | acpi_processor_get_limit_info(pr); |
| 505 | 505 | ||
| 506 | 506 | if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) | |
| 507 | if (cpuidle_get_driver() == &acpi_idle_driver) | ||
| 508 | acpi_processor_power_init(pr, device); | 507 | acpi_processor_power_init(pr, device); |
| 509 | 508 | ||
| 510 | pr->cdev = thermal_cooling_device_register("Processor", device, | 509 | pr->cdev = thermal_cooling_device_register("Processor", device, |
| @@ -800,17 +799,9 @@ static int __init acpi_processor_init(void) | |||
| 800 | 799 | ||
| 801 | memset(&errata, 0, sizeof(errata)); | 800 | memset(&errata, 0, sizeof(errata)); |
| 802 | 801 | ||
| 803 | if (!cpuidle_register_driver(&acpi_idle_driver)) { | ||
| 804 | printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", | ||
| 805 | acpi_idle_driver.name); | ||
| 806 | } else { | ||
| 807 | printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n", | ||
| 808 | cpuidle_get_driver()->name); | ||
| 809 | } | ||
| 810 | |||
| 811 | result = acpi_bus_register_driver(&acpi_processor_driver); | 802 | result = acpi_bus_register_driver(&acpi_processor_driver); |
| 812 | if (result < 0) | 803 | if (result < 0) |
| 813 | goto out_cpuidle; | 804 | return result; |
| 814 | 805 | ||
| 815 | acpi_processor_install_hotplug_notify(); | 806 | acpi_processor_install_hotplug_notify(); |
| 816 | 807 | ||
| @@ -821,11 +812,6 @@ static int __init acpi_processor_init(void) | |||
| 821 | acpi_processor_throttling_init(); | 812 | acpi_processor_throttling_init(); |
| 822 | 813 | ||
| 823 | return 0; | 814 | return 0; |
| 824 | |||
| 825 | out_cpuidle: | ||
| 826 | cpuidle_unregister_driver(&acpi_idle_driver); | ||
| 827 | |||
| 828 | return result; | ||
| 829 | } | 815 | } |
| 830 | 816 | ||
| 831 | static void __exit acpi_processor_exit(void) | 817 | static void __exit acpi_processor_exit(void) |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 9b88f9828d8c..73b2909dddfe 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
| @@ -741,22 +741,25 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | |||
| 741 | /** | 741 | /** |
| 742 | * acpi_idle_enter_c1 - enters an ACPI C1 state-type | 742 | * acpi_idle_enter_c1 - enters an ACPI C1 state-type |
| 743 | * @dev: the target CPU | 743 | * @dev: the target CPU |
| 744 | * @state: the state data | 744 | * @drv: cpuidle driver containing cpuidle state info |
| 745 | * @index: index of target state | ||
| 745 | * | 746 | * |
| 746 | * This is equivalent to the HALT instruction. | 747 | * This is equivalent to the HALT instruction. |
| 747 | */ | 748 | */ |
| 748 | static int acpi_idle_enter_c1(struct cpuidle_device *dev, | 749 | static int acpi_idle_enter_c1(struct cpuidle_device *dev, |
| 749 | struct cpuidle_state *state) | 750 | struct cpuidle_driver *drv, int index) |
| 750 | { | 751 | { |
| 751 | ktime_t kt1, kt2; | 752 | ktime_t kt1, kt2; |
| 752 | s64 idle_time; | 753 | s64 idle_time; |
| 753 | struct acpi_processor *pr; | 754 | struct acpi_processor *pr; |
| 754 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | 755 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
| 756 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); | ||
| 755 | 757 | ||
| 756 | pr = __this_cpu_read(processors); | 758 | pr = __this_cpu_read(processors); |
| 759 | dev->last_residency = 0; | ||
| 757 | 760 | ||
| 758 | if (unlikely(!pr)) | 761 | if (unlikely(!pr)) |
| 759 | return 0; | 762 | return -EINVAL; |
| 760 | 763 | ||
| 761 | local_irq_disable(); | 764 | local_irq_disable(); |
| 762 | 765 | ||
| @@ -764,7 +767,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, | |||
| 764 | if (acpi_idle_suspend) { | 767 | if (acpi_idle_suspend) { |
| 765 | local_irq_enable(); | 768 | local_irq_enable(); |
| 766 | cpu_relax(); | 769 | cpu_relax(); |
| 767 | return 0; | 770 | return -EINVAL; |
| 768 | } | 771 | } |
| 769 | 772 | ||
| 770 | lapic_timer_state_broadcast(pr, cx, 1); | 773 | lapic_timer_state_broadcast(pr, cx, 1); |
| @@ -773,37 +776,47 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, | |||
| 773 | kt2 = ktime_get_real(); | 776 | kt2 = ktime_get_real(); |
| 774 | idle_time = ktime_to_us(ktime_sub(kt2, kt1)); | 777 | idle_time = ktime_to_us(ktime_sub(kt2, kt1)); |
| 775 | 778 | ||
| 779 | /* Update device last_residency*/ | ||
| 780 | dev->last_residency = (int)idle_time; | ||
| 781 | |||
| 776 | local_irq_enable(); | 782 | local_irq_enable(); |
| 777 | cx->usage++; | 783 | cx->usage++; |
| 778 | lapic_timer_state_broadcast(pr, cx, 0); | 784 | lapic_timer_state_broadcast(pr, cx, 0); |
| 779 | 785 | ||
| 780 | return idle_time; | 786 | return index; |
| 781 | } | 787 | } |
| 782 | 788 | ||
| 783 | /** | 789 | /** |
| 784 | * acpi_idle_enter_simple - enters an ACPI state without BM handling | 790 | * acpi_idle_enter_simple - enters an ACPI state without BM handling |
| 785 | * @dev: the target CPU | 791 | * @dev: the target CPU |
| 786 | * @state: the state data | 792 | * @drv: cpuidle driver with cpuidle state information |
| 793 | * @index: the index of suggested state | ||
| 787 | */ | 794 | */ |
| 788 | static int acpi_idle_enter_simple(struct cpuidle_device *dev, | 795 | static int acpi_idle_enter_simple(struct cpuidle_device *dev, |
| 789 | struct cpuidle_state *state) | 796 | struct cpuidle_driver *drv, int index) |
| 790 | { | 797 | { |
| 791 | struct acpi_processor *pr; | 798 | struct acpi_processor *pr; |
| 792 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | 799 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
| 800 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); | ||
| 793 | ktime_t kt1, kt2; | 801 | ktime_t kt1, kt2; |
| 794 | s64 idle_time_ns; | 802 | s64 idle_time_ns; |
| 795 | s64 idle_time; | 803 | s64 idle_time; |
| 796 | 804 | ||
| 797 | pr = __this_cpu_read(processors); | 805 | pr = __this_cpu_read(processors); |
| 806 | dev->last_residency = 0; | ||
| 798 | 807 | ||
| 799 | if (unlikely(!pr)) | 808 | if (unlikely(!pr)) |
| 800 | return 0; | 809 | return -EINVAL; |
| 801 | |||
| 802 | if (acpi_idle_suspend) | ||
| 803 | return(acpi_idle_enter_c1(dev, state)); | ||
| 804 | 810 | ||
| 805 | local_irq_disable(); | 811 | local_irq_disable(); |
| 806 | 812 | ||
| 813 | if (acpi_idle_suspend) { | ||
| 814 | local_irq_enable(); | ||
| 815 | cpu_relax(); | ||
| 816 | return -EINVAL; | ||
| 817 | } | ||
| 818 | |||
| 819 | |||
| 807 | if (cx->entry_method != ACPI_CSTATE_FFH) { | 820 | if (cx->entry_method != ACPI_CSTATE_FFH) { |
| 808 | current_thread_info()->status &= ~TS_POLLING; | 821 | current_thread_info()->status &= ~TS_POLLING; |
| 809 | /* | 822 | /* |
| @@ -815,7 +828,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
| 815 | if (unlikely(need_resched())) { | 828 | if (unlikely(need_resched())) { |
| 816 | current_thread_info()->status |= TS_POLLING; | 829 | current_thread_info()->status |= TS_POLLING; |
| 817 | local_irq_enable(); | 830 | local_irq_enable(); |
| 818 | return 0; | 831 | return -EINVAL; |
| 819 | } | 832 | } |
| 820 | } | 833 | } |
| 821 | 834 | ||
| @@ -837,6 +850,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
| 837 | idle_time = idle_time_ns; | 850 | idle_time = idle_time_ns; |
| 838 | do_div(idle_time, NSEC_PER_USEC); | 851 | do_div(idle_time, NSEC_PER_USEC); |
| 839 | 852 | ||
| 853 | /* Update device last_residency*/ | ||
| 854 | dev->last_residency = (int)idle_time; | ||
| 855 | |||
| 840 | /* Tell the scheduler how much we idled: */ | 856 | /* Tell the scheduler how much we idled: */ |
| 841 | sched_clock_idle_wakeup_event(idle_time_ns); | 857 | sched_clock_idle_wakeup_event(idle_time_ns); |
| 842 | 858 | ||
| @@ -848,7 +864,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
| 848 | 864 | ||
| 849 | lapic_timer_state_broadcast(pr, cx, 0); | 865 | lapic_timer_state_broadcast(pr, cx, 0); |
| 850 | cx->time += idle_time; | 866 | cx->time += idle_time; |
| 851 | return idle_time; | 867 | return index; |
| 852 | } | 868 | } |
| 853 | 869 | ||
| 854 | static int c3_cpu_count; | 870 | static int c3_cpu_count; |
| @@ -857,37 +873,43 @@ static DEFINE_RAW_SPINLOCK(c3_lock); | |||
| 857 | /** | 873 | /** |
| 858 | * acpi_idle_enter_bm - enters C3 with proper BM handling | 874 | * acpi_idle_enter_bm - enters C3 with proper BM handling |
| 859 | * @dev: the target CPU | 875 | * @dev: the target CPU |
| 860 | * @state: the state data | 876 | * @drv: cpuidle driver containing state data |
| 877 | * @index: the index of suggested state | ||
| 861 | * | 878 | * |
| 862 | * If BM is detected, the deepest non-C3 idle state is entered instead. | 879 | * If BM is detected, the deepest non-C3 idle state is entered instead. |
| 863 | */ | 880 | */ |
| 864 | static int acpi_idle_enter_bm(struct cpuidle_device *dev, | 881 | static int acpi_idle_enter_bm(struct cpuidle_device *dev, |
| 865 | struct cpuidle_state *state) | 882 | struct cpuidle_driver *drv, int index) |
| 866 | { | 883 | { |
| 867 | struct acpi_processor *pr; | 884 | struct acpi_processor *pr; |
| 868 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | 885 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
| 886 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); | ||
| 869 | ktime_t kt1, kt2; | 887 | ktime_t kt1, kt2; |
| 870 | s64 idle_time_ns; | 888 | s64 idle_time_ns; |
| 871 | s64 idle_time; | 889 | s64 idle_time; |
| 872 | 890 | ||
| 873 | 891 | ||
| 874 | pr = __this_cpu_read(processors); | 892 | pr = __this_cpu_read(processors); |
| 893 | dev->last_residency = 0; | ||
| 875 | 894 | ||
| 876 | if (unlikely(!pr)) | 895 | if (unlikely(!pr)) |
| 877 | return 0; | 896 | return -EINVAL; |
| 897 | |||
| 878 | 898 | ||
| 879 | if (acpi_idle_suspend) | 899 | if (acpi_idle_suspend) { |
| 880 | return(acpi_idle_enter_c1(dev, state)); | 900 | cpu_relax(); |
| 901 | return -EINVAL; | ||
| 902 | } | ||
| 881 | 903 | ||
| 882 | if (!cx->bm_sts_skip && acpi_idle_bm_check()) { | 904 | if (!cx->bm_sts_skip && acpi_idle_bm_check()) { |
| 883 | if (dev->safe_state) { | 905 | if (drv->safe_state_index >= 0) { |
| 884 | dev->last_state = dev->safe_state; | 906 | return drv->states[drv->safe_state_index].enter(dev, |
| 885 | return dev->safe_state->enter(dev, dev->safe_state); | 907 | drv, drv->safe_state_index); |
| 886 | } else { | 908 | } else { |
| 887 | local_irq_disable(); | 909 | local_irq_disable(); |
| 888 | acpi_safe_halt(); | 910 | acpi_safe_halt(); |
| 889 | local_irq_enable(); | 911 | local_irq_enable(); |
| 890 | return 0; | 912 | return -EINVAL; |
| 891 | } | 913 | } |
| 892 | } | 914 | } |
| 893 | 915 | ||
| @@ -904,7 +926,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
| 904 | if (unlikely(need_resched())) { | 926 | if (unlikely(need_resched())) { |
| 905 | current_thread_info()->status |= TS_POLLING; | 927 | current_thread_info()->status |= TS_POLLING; |
| 906 | local_irq_enable(); | 928 | local_irq_enable(); |
| 907 | return 0; | 929 | return -EINVAL; |
| 908 | } | 930 | } |
| 909 | } | 931 | } |
| 910 | 932 | ||
| @@ -954,6 +976,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
| 954 | idle_time = idle_time_ns; | 976 | idle_time = idle_time_ns; |
| 955 | do_div(idle_time, NSEC_PER_USEC); | 977 | do_div(idle_time, NSEC_PER_USEC); |
| 956 | 978 | ||
| 979 | /* Update device last_residency*/ | ||
| 980 | dev->last_residency = (int)idle_time; | ||
| 981 | |||
| 957 | /* Tell the scheduler how much we idled: */ | 982 | /* Tell the scheduler how much we idled: */ |
| 958 | sched_clock_idle_wakeup_event(idle_time_ns); | 983 | sched_clock_idle_wakeup_event(idle_time_ns); |
| 959 | 984 | ||
| @@ -965,7 +990,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
| 965 | 990 | ||
| 966 | lapic_timer_state_broadcast(pr, cx, 0); | 991 | lapic_timer_state_broadcast(pr, cx, 0); |
| 967 | cx->time += idle_time; | 992 | cx->time += idle_time; |
| 968 | return idle_time; | 993 | return index; |
| 969 | } | 994 | } |
| 970 | 995 | ||
| 971 | struct cpuidle_driver acpi_idle_driver = { | 996 | struct cpuidle_driver acpi_idle_driver = { |
| @@ -974,14 +999,16 @@ struct cpuidle_driver acpi_idle_driver = { | |||
| 974 | }; | 999 | }; |
| 975 | 1000 | ||
| 976 | /** | 1001 | /** |
| 977 | * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE | 1002 | * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE |
| 1003 | * device i.e. per-cpu data | ||
| 1004 | * | ||
| 978 | * @pr: the ACPI processor | 1005 | * @pr: the ACPI processor |
| 979 | */ | 1006 | */ |
| 980 | static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | 1007 | static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr) |
| 981 | { | 1008 | { |
| 982 | int i, count = CPUIDLE_DRIVER_STATE_START; | 1009 | int i, count = CPUIDLE_DRIVER_STATE_START; |
| 983 | struct acpi_processor_cx *cx; | 1010 | struct acpi_processor_cx *cx; |
| 984 | struct cpuidle_state *state; | 1011 | struct cpuidle_state_usage *state_usage; |
| 985 | struct cpuidle_device *dev = &pr->power.dev; | 1012 | struct cpuidle_device *dev = &pr->power.dev; |
| 986 | 1013 | ||
| 987 | if (!pr->flags.power_setup_done) | 1014 | if (!pr->flags.power_setup_done) |
| @@ -992,9 +1019,62 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
| 992 | } | 1019 | } |
| 993 | 1020 | ||
| 994 | dev->cpu = pr->id; | 1021 | dev->cpu = pr->id; |
| 1022 | |||
| 1023 | if (max_cstate == 0) | ||
| 1024 | max_cstate = 1; | ||
| 1025 | |||
| 1026 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { | ||
| 1027 | cx = &pr->power.states[i]; | ||
| 1028 | state_usage = &dev->states_usage[count]; | ||
| 1029 | |||
| 1030 | if (!cx->valid) | ||
| 1031 | continue; | ||
| 1032 | |||
| 1033 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 1034 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && | ||
| 1035 | !pr->flags.has_cst && | ||
| 1036 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | ||
| 1037 | continue; | ||
| 1038 | #endif | ||
| 1039 | |||
| 1040 | cpuidle_set_statedata(state_usage, cx); | ||
| 1041 | |||
| 1042 | count++; | ||
| 1043 | if (count == CPUIDLE_STATE_MAX) | ||
| 1044 | break; | ||
| 1045 | } | ||
| 1046 | |||
| 1047 | dev->state_count = count; | ||
| 1048 | |||
| 1049 | if (!count) | ||
| 1050 | return -EINVAL; | ||
| 1051 | |||
| 1052 | return 0; | ||
| 1053 | } | ||
| 1054 | |||
| 1055 | /** | ||
| 1056 | * acpi_processor_setup_cpuidle states- prepares and configures cpuidle | ||
| 1057 | * global state data i.e. idle routines | ||
| 1058 | * | ||
| 1059 | * @pr: the ACPI processor | ||
| 1060 | */ | ||
| 1061 | static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) | ||
| 1062 | { | ||
| 1063 | int i, count = CPUIDLE_DRIVER_STATE_START; | ||
| 1064 | struct acpi_processor_cx *cx; | ||
| 1065 | struct cpuidle_state *state; | ||
| 1066 | struct cpuidle_driver *drv = &acpi_idle_driver; | ||
| 1067 | |||
| 1068 | if (!pr->flags.power_setup_done) | ||
| 1069 | return -EINVAL; | ||
| 1070 | |||
| 1071 | if (pr->flags.power == 0) | ||
| 1072 | return -EINVAL; | ||
| 1073 | |||
| 1074 | drv->safe_state_index = -1; | ||
| 995 | for (i = 0; i < CPUIDLE_STATE_MAX; i++) { | 1075 | for (i = 0; i < CPUIDLE_STATE_MAX; i++) { |
| 996 | dev->states[i].name[0] = '\0'; | 1076 | drv->states[i].name[0] = '\0'; |
| 997 | dev->states[i].desc[0] = '\0'; | 1077 | drv->states[i].desc[0] = '\0'; |
| 998 | } | 1078 | } |
| 999 | 1079 | ||
| 1000 | if (max_cstate == 0) | 1080 | if (max_cstate == 0) |
| @@ -1002,7 +1082,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
| 1002 | 1082 | ||
| 1003 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { | 1083 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { |
| 1004 | cx = &pr->power.states[i]; | 1084 | cx = &pr->power.states[i]; |
| 1005 | state = &dev->states[count]; | ||
| 1006 | 1085 | ||
| 1007 | if (!cx->valid) | 1086 | if (!cx->valid) |
| 1008 | continue; | 1087 | continue; |
| @@ -1013,8 +1092,8 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
| 1013 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | 1092 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) |
| 1014 | continue; | 1093 | continue; |
| 1015 | #endif | 1094 | #endif |
| 1016 | cpuidle_set_statedata(state, cx); | ||
| 1017 | 1095 | ||
| 1096 | state = &drv->states[count]; | ||
| 1018 | snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); | 1097 | snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); |
| 1019 | strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); | 1098 | strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); |
| 1020 | state->exit_latency = cx->latency; | 1099 | state->exit_latency = cx->latency; |
| @@ -1027,13 +1106,13 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
| 1027 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | 1106 | state->flags |= CPUIDLE_FLAG_TIME_VALID; |
| 1028 | 1107 | ||
| 1029 | state->enter = acpi_idle_enter_c1; | 1108 | state->enter = acpi_idle_enter_c1; |
| 1030 | dev->safe_state = state; | 1109 | drv->safe_state_index = count; |
| 1031 | break; | 1110 | break; |
| 1032 | 1111 | ||
| 1033 | case ACPI_STATE_C2: | 1112 | case ACPI_STATE_C2: |
| 1034 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | 1113 | state->flags |= CPUIDLE_FLAG_TIME_VALID; |
| 1035 | state->enter = acpi_idle_enter_simple; | 1114 | state->enter = acpi_idle_enter_simple; |
| 1036 | dev->safe_state = state; | 1115 | drv->safe_state_index = count; |
| 1037 | break; | 1116 | break; |
| 1038 | 1117 | ||
| 1039 | case ACPI_STATE_C3: | 1118 | case ACPI_STATE_C3: |
| @@ -1049,7 +1128,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
| 1049 | break; | 1128 | break; |
| 1050 | } | 1129 | } |
| 1051 | 1130 | ||
| 1052 | dev->state_count = count; | 1131 | drv->state_count = count; |
| 1053 | 1132 | ||
| 1054 | if (!count) | 1133 | if (!count) |
| 1055 | return -EINVAL; | 1134 | return -EINVAL; |
| @@ -1057,7 +1136,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
| 1057 | return 0; | 1136 | return 0; |
| 1058 | } | 1137 | } |
| 1059 | 1138 | ||
| 1060 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | 1139 | int acpi_processor_hotplug(struct acpi_processor *pr) |
| 1061 | { | 1140 | { |
| 1062 | int ret = 0; | 1141 | int ret = 0; |
| 1063 | 1142 | ||
| @@ -1078,7 +1157,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) | |||
| 1078 | cpuidle_disable_device(&pr->power.dev); | 1157 | cpuidle_disable_device(&pr->power.dev); |
| 1079 | acpi_processor_get_power_info(pr); | 1158 | acpi_processor_get_power_info(pr); |
| 1080 | if (pr->flags.power) { | 1159 | if (pr->flags.power) { |
| 1081 | acpi_processor_setup_cpuidle(pr); | 1160 | acpi_processor_setup_cpuidle_cx(pr); |
| 1082 | ret = cpuidle_enable_device(&pr->power.dev); | 1161 | ret = cpuidle_enable_device(&pr->power.dev); |
| 1083 | } | 1162 | } |
| 1084 | cpuidle_resume_and_unlock(); | 1163 | cpuidle_resume_and_unlock(); |
| @@ -1086,10 +1165,72 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) | |||
| 1086 | return ret; | 1165 | return ret; |
| 1087 | } | 1166 | } |
| 1088 | 1167 | ||
| 1168 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | ||
| 1169 | { | ||
| 1170 | int cpu; | ||
| 1171 | struct acpi_processor *_pr; | ||
| 1172 | |||
| 1173 | if (disabled_by_idle_boot_param()) | ||
| 1174 | return 0; | ||
| 1175 | |||
| 1176 | if (!pr) | ||
| 1177 | return -EINVAL; | ||
| 1178 | |||
| 1179 | if (nocst) | ||
| 1180 | return -ENODEV; | ||
| 1181 | |||
| 1182 | if (!pr->flags.power_setup_done) | ||
| 1183 | return -ENODEV; | ||
| 1184 | |||
| 1185 | /* | ||
| 1186 | * FIXME: Design the ACPI notification to make it once per | ||
| 1187 | * system instead of once per-cpu. This condition is a hack | ||
| 1188 | * to make the code that updates C-States be called once. | ||
| 1189 | */ | ||
| 1190 | |||
| 1191 | if (smp_processor_id() == 0 && | ||
| 1192 | cpuidle_get_driver() == &acpi_idle_driver) { | ||
| 1193 | |||
| 1194 | cpuidle_pause_and_lock(); | ||
| 1195 | /* Protect against cpu-hotplug */ | ||
| 1196 | get_online_cpus(); | ||
| 1197 | |||
| 1198 | /* Disable all cpuidle devices */ | ||
| 1199 | for_each_online_cpu(cpu) { | ||
| 1200 | _pr = per_cpu(processors, cpu); | ||
| 1201 | if (!_pr || !_pr->flags.power_setup_done) | ||
| 1202 | continue; | ||
| 1203 | cpuidle_disable_device(&_pr->power.dev); | ||
| 1204 | } | ||
| 1205 | |||
| 1206 | /* Populate Updated C-state information */ | ||
| 1207 | acpi_processor_setup_cpuidle_states(pr); | ||
| 1208 | |||
| 1209 | /* Enable all cpuidle devices */ | ||
| 1210 | for_each_online_cpu(cpu) { | ||
| 1211 | _pr = per_cpu(processors, cpu); | ||
| 1212 | if (!_pr || !_pr->flags.power_setup_done) | ||
| 1213 | continue; | ||
| 1214 | acpi_processor_get_power_info(_pr); | ||
| 1215 | if (_pr->flags.power) { | ||
| 1216 | acpi_processor_setup_cpuidle_cx(_pr); | ||
| 1217 | cpuidle_enable_device(&_pr->power.dev); | ||
| 1218 | } | ||
| 1219 | } | ||
| 1220 | put_online_cpus(); | ||
| 1221 | cpuidle_resume_and_unlock(); | ||
| 1222 | } | ||
| 1223 | |||
| 1224 | return 0; | ||
| 1225 | } | ||
| 1226 | |||
| 1227 | static int acpi_processor_registered; | ||
| 1228 | |||
| 1089 | int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | 1229 | int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, |
| 1090 | struct acpi_device *device) | 1230 | struct acpi_device *device) |
| 1091 | { | 1231 | { |
| 1092 | acpi_status status = 0; | 1232 | acpi_status status = 0; |
| 1233 | int retval; | ||
| 1093 | static int first_run; | 1234 | static int first_run; |
| 1094 | 1235 | ||
| 1095 | if (disabled_by_idle_boot_param()) | 1236 | if (disabled_by_idle_boot_param()) |
| @@ -1126,9 +1267,26 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
| 1126 | * platforms that only support C1. | 1267 | * platforms that only support C1. |
| 1127 | */ | 1268 | */ |
| 1128 | if (pr->flags.power) { | 1269 | if (pr->flags.power) { |
| 1129 | acpi_processor_setup_cpuidle(pr); | 1270 | /* Register acpi_idle_driver if not already registered */ |
| 1130 | if (cpuidle_register_device(&pr->power.dev)) | 1271 | if (!acpi_processor_registered) { |
| 1131 | return -EIO; | 1272 | acpi_processor_setup_cpuidle_states(pr); |
| 1273 | retval = cpuidle_register_driver(&acpi_idle_driver); | ||
| 1274 | if (retval) | ||
| 1275 | return retval; | ||
| 1276 | printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", | ||
| 1277 | acpi_idle_driver.name); | ||
| 1278 | } | ||
| 1279 | /* Register per-cpu cpuidle_device. Cpuidle driver | ||
| 1280 | * must already be registered before registering device | ||
| 1281 | */ | ||
| 1282 | acpi_processor_setup_cpuidle_cx(pr); | ||
| 1283 | retval = cpuidle_register_device(&pr->power.dev); | ||
| 1284 | if (retval) { | ||
| 1285 | if (acpi_processor_registered == 0) | ||
| 1286 | cpuidle_unregister_driver(&acpi_idle_driver); | ||
| 1287 | return retval; | ||
| 1288 | } | ||
| 1289 | acpi_processor_registered++; | ||
| 1132 | } | 1290 | } |
| 1133 | return 0; | 1291 | return 0; |
| 1134 | } | 1292 | } |
| @@ -1139,8 +1297,13 @@ int acpi_processor_power_exit(struct acpi_processor *pr, | |||
| 1139 | if (disabled_by_idle_boot_param()) | 1297 | if (disabled_by_idle_boot_param()) |
| 1140 | return 0; | 1298 | return 0; |
| 1141 | 1299 | ||
| 1142 | cpuidle_unregister_device(&pr->power.dev); | 1300 | if (pr->flags.power) { |
| 1143 | pr->flags.power_setup_done = 0; | 1301 | cpuidle_unregister_device(&pr->power.dev); |
| 1302 | acpi_processor_registered--; | ||
| 1303 | if (acpi_processor_registered == 0) | ||
| 1304 | cpuidle_unregister_driver(&acpi_idle_driver); | ||
| 1305 | } | ||
| 1144 | 1306 | ||
| 1307 | pr->flags.power_setup_done = 0; | ||
| 1145 | return 0; | 1308 | return 0; |
| 1146 | } | 1309 | } |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 449c556274c0..8ab80bafe3f1 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
| @@ -1062,13 +1062,12 @@ static void acpi_add_id(struct acpi_device *device, const char *dev_id) | |||
| 1062 | if (!id) | 1062 | if (!id) |
| 1063 | return; | 1063 | return; |
| 1064 | 1064 | ||
| 1065 | id->id = kmalloc(strlen(dev_id) + 1, GFP_KERNEL); | 1065 | id->id = kstrdup(dev_id, GFP_KERNEL); |
| 1066 | if (!id->id) { | 1066 | if (!id->id) { |
| 1067 | kfree(id); | 1067 | kfree(id); |
| 1068 | return; | 1068 | return; |
| 1069 | } | 1069 | } |
| 1070 | 1070 | ||
| 1071 | strcpy(id->id, dev_id); | ||
| 1072 | list_add_tail(&id->list, &device->pnp.ids); | 1071 | list_add_tail(&id->list, &device->pnp.ids); |
| 1073 | } | 1072 | } |
| 1074 | 1073 | ||
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index c538d0ef10ff..9f66181c814e 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c | |||
| @@ -706,11 +706,23 @@ static void __exit interrupt_stats_exit(void) | |||
| 706 | return; | 706 | return; |
| 707 | } | 707 | } |
| 708 | 708 | ||
| 709 | static ssize_t | ||
| 710 | acpi_show_profile(struct device *dev, struct device_attribute *attr, | ||
| 711 | char *buf) | ||
| 712 | { | ||
| 713 | return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile); | ||
| 714 | } | ||
| 715 | |||
| 716 | static const struct device_attribute pm_profile_attr = | ||
| 717 | __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL); | ||
| 718 | |||
| 709 | int __init acpi_sysfs_init(void) | 719 | int __init acpi_sysfs_init(void) |
| 710 | { | 720 | { |
| 711 | int result; | 721 | int result; |
| 712 | 722 | ||
| 713 | result = acpi_tables_sysfs_init(); | 723 | result = acpi_tables_sysfs_init(); |
| 714 | 724 | if (result) | |
| 725 | return result; | ||
| 726 | result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr); | ||
| 715 | return result; | 727 | return result; |
| 716 | } | 728 | } |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index becd6d99203b..06ce2680d00d 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
| @@ -62,8 +62,9 @@ static int __cpuidle_register_device(struct cpuidle_device *dev); | |||
| 62 | int cpuidle_idle_call(void) | 62 | int cpuidle_idle_call(void) |
| 63 | { | 63 | { |
| 64 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 64 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
| 65 | struct cpuidle_driver *drv = cpuidle_get_driver(); | ||
| 65 | struct cpuidle_state *target_state; | 66 | struct cpuidle_state *target_state; |
| 66 | int next_state; | 67 | int next_state, entered_state; |
| 67 | 68 | ||
| 68 | if (off) | 69 | if (off) |
| 69 | return -ENODEV; | 70 | return -ENODEV; |
| @@ -84,45 +85,36 @@ int cpuidle_idle_call(void) | |||
| 84 | hrtimer_peek_ahead_timers(); | 85 | hrtimer_peek_ahead_timers(); |
| 85 | #endif | 86 | #endif |
| 86 | 87 | ||
| 87 | /* | ||
| 88 | * Call the device's prepare function before calling the | ||
| 89 | * governor's select function. ->prepare gives the device's | ||
| 90 | * cpuidle driver a chance to update any dynamic information | ||
| 91 | * of its cpuidle states for the current idle period, e.g. | ||
| 92 | * state availability, latencies, residencies, etc. | ||
| 93 | */ | ||
| 94 | if (dev->prepare) | ||
| 95 | dev->prepare(dev); | ||
| 96 | |||
| 97 | /* ask the governor for the next state */ | 88 | /* ask the governor for the next state */ |
| 98 | next_state = cpuidle_curr_governor->select(dev); | 89 | next_state = cpuidle_curr_governor->select(drv, dev); |
| 99 | if (need_resched()) { | 90 | if (need_resched()) { |
| 100 | local_irq_enable(); | 91 | local_irq_enable(); |
| 101 | return 0; | 92 | return 0; |
| 102 | } | 93 | } |
| 103 | 94 | ||
| 104 | target_state = &dev->states[next_state]; | 95 | target_state = &drv->states[next_state]; |
| 105 | |||
| 106 | /* enter the state and update stats */ | ||
| 107 | dev->last_state = target_state; | ||
| 108 | 96 | ||
| 109 | trace_power_start(POWER_CSTATE, next_state, dev->cpu); | 97 | trace_power_start(POWER_CSTATE, next_state, dev->cpu); |
| 110 | trace_cpu_idle(next_state, dev->cpu); | 98 | trace_cpu_idle(next_state, dev->cpu); |
| 111 | 99 | ||
| 112 | dev->last_residency = target_state->enter(dev, target_state); | 100 | entered_state = target_state->enter(dev, drv, next_state); |
| 113 | 101 | ||
| 114 | trace_power_end(dev->cpu); | 102 | trace_power_end(dev->cpu); |
| 115 | trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); | 103 | trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); |
| 116 | 104 | ||
| 117 | if (dev->last_state) | 105 | if (entered_state >= 0) { |
| 118 | target_state = dev->last_state; | 106 | /* Update cpuidle counters */ |
| 119 | 107 | /* This can be moved to within driver enter routine | |
| 120 | target_state->time += (unsigned long long)dev->last_residency; | 108 | * but that results in multiple copies of same code. |
| 121 | target_state->usage++; | 109 | */ |
| 110 | dev->states_usage[entered_state].time += | ||
| 111 | (unsigned long long)dev->last_residency; | ||
| 112 | dev->states_usage[entered_state].usage++; | ||
| 113 | } | ||
| 122 | 114 | ||
| 123 | /* give the governor an opportunity to reflect on the outcome */ | 115 | /* give the governor an opportunity to reflect on the outcome */ |
| 124 | if (cpuidle_curr_governor->reflect) | 116 | if (cpuidle_curr_governor->reflect) |
| 125 | cpuidle_curr_governor->reflect(dev); | 117 | cpuidle_curr_governor->reflect(dev, entered_state); |
| 126 | 118 | ||
| 127 | return 0; | 119 | return 0; |
| 128 | } | 120 | } |
| @@ -173,11 +165,11 @@ void cpuidle_resume_and_unlock(void) | |||
| 173 | EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); | 165 | EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); |
| 174 | 166 | ||
| 175 | #ifdef CONFIG_ARCH_HAS_CPU_RELAX | 167 | #ifdef CONFIG_ARCH_HAS_CPU_RELAX |
| 176 | static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) | 168 | static int poll_idle(struct cpuidle_device *dev, |
| 169 | struct cpuidle_driver *drv, int index) | ||
| 177 | { | 170 | { |
| 178 | ktime_t t1, t2; | 171 | ktime_t t1, t2; |
| 179 | s64 diff; | 172 | s64 diff; |
| 180 | int ret; | ||
| 181 | 173 | ||
| 182 | t1 = ktime_get(); | 174 | t1 = ktime_get(); |
| 183 | local_irq_enable(); | 175 | local_irq_enable(); |
| @@ -189,15 +181,14 @@ static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) | |||
| 189 | if (diff > INT_MAX) | 181 | if (diff > INT_MAX) |
| 190 | diff = INT_MAX; | 182 | diff = INT_MAX; |
| 191 | 183 | ||
| 192 | ret = (int) diff; | 184 | dev->last_residency = (int) diff; |
| 193 | return ret; | 185 | |
| 186 | return index; | ||
| 194 | } | 187 | } |
| 195 | 188 | ||
| 196 | static void poll_idle_init(struct cpuidle_device *dev) | 189 | static void poll_idle_init(struct cpuidle_driver *drv) |
| 197 | { | 190 | { |
| 198 | struct cpuidle_state *state = &dev->states[0]; | 191 | struct cpuidle_state *state = &drv->states[0]; |
| 199 | |||
| 200 | cpuidle_set_statedata(state, NULL); | ||
| 201 | 192 | ||
| 202 | snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); | 193 | snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); |
| 203 | snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); | 194 | snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); |
| @@ -208,7 +199,7 @@ static void poll_idle_init(struct cpuidle_device *dev) | |||
| 208 | state->enter = poll_idle; | 199 | state->enter = poll_idle; |
| 209 | } | 200 | } |
| 210 | #else | 201 | #else |
| 211 | static void poll_idle_init(struct cpuidle_device *dev) {} | 202 | static void poll_idle_init(struct cpuidle_driver *drv) {} |
| 212 | #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ | 203 | #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ |
| 213 | 204 | ||
| 214 | /** | 205 | /** |
| @@ -235,21 +226,20 @@ int cpuidle_enable_device(struct cpuidle_device *dev) | |||
| 235 | return ret; | 226 | return ret; |
| 236 | } | 227 | } |
| 237 | 228 | ||
| 238 | poll_idle_init(dev); | 229 | poll_idle_init(cpuidle_get_driver()); |
| 239 | 230 | ||
| 240 | if ((ret = cpuidle_add_state_sysfs(dev))) | 231 | if ((ret = cpuidle_add_state_sysfs(dev))) |
| 241 | return ret; | 232 | return ret; |
| 242 | 233 | ||
| 243 | if (cpuidle_curr_governor->enable && | 234 | if (cpuidle_curr_governor->enable && |
| 244 | (ret = cpuidle_curr_governor->enable(dev))) | 235 | (ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev))) |
| 245 | goto fail_sysfs; | 236 | goto fail_sysfs; |
| 246 | 237 | ||
| 247 | for (i = 0; i < dev->state_count; i++) { | 238 | for (i = 0; i < dev->state_count; i++) { |
| 248 | dev->states[i].usage = 0; | 239 | dev->states_usage[i].usage = 0; |
| 249 | dev->states[i].time = 0; | 240 | dev->states_usage[i].time = 0; |
| 250 | } | 241 | } |
| 251 | dev->last_residency = 0; | 242 | dev->last_residency = 0; |
| 252 | dev->last_state = NULL; | ||
| 253 | 243 | ||
| 254 | smp_wmb(); | 244 | smp_wmb(); |
| 255 | 245 | ||
| @@ -283,7 +273,7 @@ void cpuidle_disable_device(struct cpuidle_device *dev) | |||
| 283 | dev->enabled = 0; | 273 | dev->enabled = 0; |
| 284 | 274 | ||
| 285 | if (cpuidle_curr_governor->disable) | 275 | if (cpuidle_curr_governor->disable) |
| 286 | cpuidle_curr_governor->disable(dev); | 276 | cpuidle_curr_governor->disable(cpuidle_get_driver(), dev); |
| 287 | 277 | ||
| 288 | cpuidle_remove_state_sysfs(dev); | 278 | cpuidle_remove_state_sysfs(dev); |
| 289 | enabled_devices--; | 279 | enabled_devices--; |
| @@ -311,26 +301,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) | |||
| 311 | 301 | ||
| 312 | init_completion(&dev->kobj_unregister); | 302 | init_completion(&dev->kobj_unregister); |
| 313 | 303 | ||
| 314 | /* | ||
| 315 | * cpuidle driver should set the dev->power_specified bit | ||
| 316 | * before registering the device if the driver provides | ||
| 317 | * power_usage numbers. | ||
| 318 | * | ||
| 319 | * For those devices whose ->power_specified is not set, | ||
| 320 | * we fill in power_usage with decreasing values as the | ||
| 321 | * cpuidle code has an implicit assumption that state Cn | ||
| 322 | * uses less power than C(n-1). | ||
| 323 | * | ||
| 324 | * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned | ||
| 325 | * an power value of -1. So we use -2, -3, etc, for other | ||
| 326 | * c-states. | ||
| 327 | */ | ||
| 328 | if (!dev->power_specified) { | ||
| 329 | int i; | ||
| 330 | for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) | ||
| 331 | dev->states[i].power_usage = -1 - i; | ||
| 332 | } | ||
| 333 | |||
| 334 | per_cpu(cpuidle_devices, dev->cpu) = dev; | 304 | per_cpu(cpuidle_devices, dev->cpu) = dev; |
| 335 | list_add(&dev->device_list, &cpuidle_detected_devices); | 305 | list_add(&dev->device_list, &cpuidle_detected_devices); |
| 336 | if ((ret = cpuidle_add_sysfs(sys_dev))) { | 306 | if ((ret = cpuidle_add_sysfs(sys_dev))) { |
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 3f7e3cedd133..284d7af5a9c8 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c | |||
| @@ -17,6 +17,30 @@ | |||
| 17 | static struct cpuidle_driver *cpuidle_curr_driver; | 17 | static struct cpuidle_driver *cpuidle_curr_driver; |
| 18 | DEFINE_SPINLOCK(cpuidle_driver_lock); | 18 | DEFINE_SPINLOCK(cpuidle_driver_lock); |
| 19 | 19 | ||
| 20 | static void __cpuidle_register_driver(struct cpuidle_driver *drv) | ||
| 21 | { | ||
| 22 | int i; | ||
| 23 | /* | ||
| 24 | * cpuidle driver should set the drv->power_specified bit | ||
| 25 | * before registering if the driver provides | ||
| 26 | * power_usage numbers. | ||
| 27 | * | ||
| 28 | * If power_specified is not set, | ||
| 29 | * we fill in power_usage with decreasing values as the | ||
| 30 | * cpuidle code has an implicit assumption that state Cn | ||
| 31 | * uses less power than C(n-1). | ||
| 32 | * | ||
| 33 | * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned | ||
| 34 | * an power value of -1. So we use -2, -3, etc, for other | ||
| 35 | * c-states. | ||
| 36 | */ | ||
| 37 | if (!drv->power_specified) { | ||
| 38 | for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) | ||
| 39 | drv->states[i].power_usage = -1 - i; | ||
| 40 | } | ||
| 41 | } | ||
| 42 | |||
| 43 | |||
| 20 | /** | 44 | /** |
| 21 | * cpuidle_register_driver - registers a driver | 45 | * cpuidle_register_driver - registers a driver |
| 22 | * @drv: the driver | 46 | * @drv: the driver |
| @@ -34,6 +58,7 @@ int cpuidle_register_driver(struct cpuidle_driver *drv) | |||
| 34 | spin_unlock(&cpuidle_driver_lock); | 58 | spin_unlock(&cpuidle_driver_lock); |
| 35 | return -EBUSY; | 59 | return -EBUSY; |
| 36 | } | 60 | } |
| 61 | __cpuidle_register_driver(drv); | ||
| 37 | cpuidle_curr_driver = drv; | 62 | cpuidle_curr_driver = drv; |
| 38 | spin_unlock(&cpuidle_driver_lock); | 63 | spin_unlock(&cpuidle_driver_lock); |
| 39 | 64 | ||
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index 3b8fce20f023..b6a09ea859b1 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c | |||
| @@ -60,9 +60,11 @@ static inline void ladder_do_selection(struct ladder_device *ldev, | |||
| 60 | 60 | ||
| 61 | /** | 61 | /** |
| 62 | * ladder_select_state - selects the next state to enter | 62 | * ladder_select_state - selects the next state to enter |
| 63 | * @drv: cpuidle driver | ||
| 63 | * @dev: the CPU | 64 | * @dev: the CPU |
| 64 | */ | 65 | */ |
| 65 | static int ladder_select_state(struct cpuidle_device *dev) | 66 | static int ladder_select_state(struct cpuidle_driver *drv, |
| 67 | struct cpuidle_device *dev) | ||
| 66 | { | 68 | { |
| 67 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); | 69 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); |
| 68 | struct ladder_device_state *last_state; | 70 | struct ladder_device_state *last_state; |
| @@ -77,15 +79,17 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
| 77 | 79 | ||
| 78 | last_state = &ldev->states[last_idx]; | 80 | last_state = &ldev->states[last_idx]; |
| 79 | 81 | ||
| 80 | if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) | 82 | if (drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) { |
| 81 | last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency; | 83 | last_residency = cpuidle_get_last_residency(dev) - \ |
| 84 | drv->states[last_idx].exit_latency; | ||
| 85 | } | ||
| 82 | else | 86 | else |
| 83 | last_residency = last_state->threshold.promotion_time + 1; | 87 | last_residency = last_state->threshold.promotion_time + 1; |
| 84 | 88 | ||
| 85 | /* consider promotion */ | 89 | /* consider promotion */ |
| 86 | if (last_idx < dev->state_count - 1 && | 90 | if (last_idx < drv->state_count - 1 && |
| 87 | last_residency > last_state->threshold.promotion_time && | 91 | last_residency > last_state->threshold.promotion_time && |
| 88 | dev->states[last_idx + 1].exit_latency <= latency_req) { | 92 | drv->states[last_idx + 1].exit_latency <= latency_req) { |
| 89 | last_state->stats.promotion_count++; | 93 | last_state->stats.promotion_count++; |
| 90 | last_state->stats.demotion_count = 0; | 94 | last_state->stats.demotion_count = 0; |
| 91 | if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { | 95 | if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { |
| @@ -96,11 +100,11 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
| 96 | 100 | ||
| 97 | /* consider demotion */ | 101 | /* consider demotion */ |
| 98 | if (last_idx > CPUIDLE_DRIVER_STATE_START && | 102 | if (last_idx > CPUIDLE_DRIVER_STATE_START && |
| 99 | dev->states[last_idx].exit_latency > latency_req) { | 103 | drv->states[last_idx].exit_latency > latency_req) { |
| 100 | int i; | 104 | int i; |
| 101 | 105 | ||
| 102 | for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { | 106 | for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { |
| 103 | if (dev->states[i].exit_latency <= latency_req) | 107 | if (drv->states[i].exit_latency <= latency_req) |
| 104 | break; | 108 | break; |
| 105 | } | 109 | } |
| 106 | ladder_do_selection(ldev, last_idx, i); | 110 | ladder_do_selection(ldev, last_idx, i); |
| @@ -123,9 +127,11 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
| 123 | 127 | ||
| 124 | /** | 128 | /** |
| 125 | * ladder_enable_device - setup for the governor | 129 | * ladder_enable_device - setup for the governor |
| 130 | * @drv: cpuidle driver | ||
| 126 | * @dev: the CPU | 131 | * @dev: the CPU |
| 127 | */ | 132 | */ |
| 128 | static int ladder_enable_device(struct cpuidle_device *dev) | 133 | static int ladder_enable_device(struct cpuidle_driver *drv, |
| 134 | struct cpuidle_device *dev) | ||
| 129 | { | 135 | { |
| 130 | int i; | 136 | int i; |
| 131 | struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); | 137 | struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); |
| @@ -134,8 +140,8 @@ static int ladder_enable_device(struct cpuidle_device *dev) | |||
| 134 | 140 | ||
| 135 | ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START; | 141 | ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START; |
| 136 | 142 | ||
| 137 | for (i = 0; i < dev->state_count; i++) { | 143 | for (i = 0; i < drv->state_count; i++) { |
| 138 | state = &dev->states[i]; | 144 | state = &drv->states[i]; |
| 139 | lstate = &ldev->states[i]; | 145 | lstate = &ldev->states[i]; |
| 140 | 146 | ||
| 141 | lstate->stats.promotion_count = 0; | 147 | lstate->stats.promotion_count = 0; |
| @@ -144,7 +150,7 @@ static int ladder_enable_device(struct cpuidle_device *dev) | |||
| 144 | lstate->threshold.promotion_count = PROMOTION_COUNT; | 150 | lstate->threshold.promotion_count = PROMOTION_COUNT; |
| 145 | lstate->threshold.demotion_count = DEMOTION_COUNT; | 151 | lstate->threshold.demotion_count = DEMOTION_COUNT; |
| 146 | 152 | ||
| 147 | if (i < dev->state_count - 1) | 153 | if (i < drv->state_count - 1) |
| 148 | lstate->threshold.promotion_time = state->exit_latency; | 154 | lstate->threshold.promotion_time = state->exit_latency; |
| 149 | if (i > 0) | 155 | if (i > 0) |
| 150 | lstate->threshold.demotion_time = state->exit_latency; | 156 | lstate->threshold.demotion_time = state->exit_latency; |
| @@ -153,11 +159,24 @@ static int ladder_enable_device(struct cpuidle_device *dev) | |||
| 153 | return 0; | 159 | return 0; |
| 154 | } | 160 | } |
| 155 | 161 | ||
| 162 | /** | ||
| 163 | * ladder_reflect - update the correct last_state_idx | ||
| 164 | * @dev: the CPU | ||
| 165 | * @index: the index of actual state entered | ||
| 166 | */ | ||
| 167 | static void ladder_reflect(struct cpuidle_device *dev, int index) | ||
| 168 | { | ||
| 169 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); | ||
| 170 | if (index > 0) | ||
| 171 | ldev->last_state_idx = index; | ||
| 172 | } | ||
| 173 | |||
| 156 | static struct cpuidle_governor ladder_governor = { | 174 | static struct cpuidle_governor ladder_governor = { |
| 157 | .name = "ladder", | 175 | .name = "ladder", |
| 158 | .rating = 10, | 176 | .rating = 10, |
| 159 | .enable = ladder_enable_device, | 177 | .enable = ladder_enable_device, |
| 160 | .select = ladder_select_state, | 178 | .select = ladder_select_state, |
| 179 | .reflect = ladder_reflect, | ||
| 161 | .owner = THIS_MODULE, | 180 | .owner = THIS_MODULE, |
| 162 | }; | 181 | }; |
| 163 | 182 | ||
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 00275244ce2f..ad0952601ae2 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
| @@ -183,7 +183,7 @@ static inline int performance_multiplier(void) | |||
| 183 | 183 | ||
| 184 | static DEFINE_PER_CPU(struct menu_device, menu_devices); | 184 | static DEFINE_PER_CPU(struct menu_device, menu_devices); |
| 185 | 185 | ||
| 186 | static void menu_update(struct cpuidle_device *dev); | 186 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); |
| 187 | 187 | ||
| 188 | /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ | 188 | /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ |
| 189 | static u64 div_round64(u64 dividend, u32 divisor) | 189 | static u64 div_round64(u64 dividend, u32 divisor) |
| @@ -229,9 +229,10 @@ static void detect_repeating_patterns(struct menu_device *data) | |||
| 229 | 229 | ||
| 230 | /** | 230 | /** |
| 231 | * menu_select - selects the next idle state to enter | 231 | * menu_select - selects the next idle state to enter |
| 232 | * @drv: cpuidle driver containing state data | ||
| 232 | * @dev: the CPU | 233 | * @dev: the CPU |
| 233 | */ | 234 | */ |
| 234 | static int menu_select(struct cpuidle_device *dev) | 235 | static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
| 235 | { | 236 | { |
| 236 | struct menu_device *data = &__get_cpu_var(menu_devices); | 237 | struct menu_device *data = &__get_cpu_var(menu_devices); |
| 237 | int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); | 238 | int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); |
| @@ -241,7 +242,7 @@ static int menu_select(struct cpuidle_device *dev) | |||
| 241 | struct timespec t; | 242 | struct timespec t; |
| 242 | 243 | ||
| 243 | if (data->needs_update) { | 244 | if (data->needs_update) { |
| 244 | menu_update(dev); | 245 | menu_update(drv, dev); |
| 245 | data->needs_update = 0; | 246 | data->needs_update = 0; |
| 246 | } | 247 | } |
| 247 | 248 | ||
| @@ -286,11 +287,9 @@ static int menu_select(struct cpuidle_device *dev) | |||
| 286 | * Find the idle state with the lowest power while satisfying | 287 | * Find the idle state with the lowest power while satisfying |
| 287 | * our constraints. | 288 | * our constraints. |
| 288 | */ | 289 | */ |
| 289 | for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) { | 290 | for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { |
| 290 | struct cpuidle_state *s = &dev->states[i]; | 291 | struct cpuidle_state *s = &drv->states[i]; |
| 291 | 292 | ||
| 292 | if (s->flags & CPUIDLE_FLAG_IGNORE) | ||
| 293 | continue; | ||
| 294 | if (s->target_residency > data->predicted_us) | 293 | if (s->target_residency > data->predicted_us) |
| 295 | continue; | 294 | continue; |
| 296 | if (s->exit_latency > latency_req) | 295 | if (s->exit_latency > latency_req) |
| @@ -311,26 +310,30 @@ static int menu_select(struct cpuidle_device *dev) | |||
| 311 | /** | 310 | /** |
| 312 | * menu_reflect - records that data structures need update | 311 | * menu_reflect - records that data structures need update |
| 313 | * @dev: the CPU | 312 | * @dev: the CPU |
| 313 | * @index: the index of actual entered state | ||
| 314 | * | 314 | * |
| 315 | * NOTE: it's important to be fast here because this operation will add to | 315 | * NOTE: it's important to be fast here because this operation will add to |
| 316 | * the overall exit latency. | 316 | * the overall exit latency. |
| 317 | */ | 317 | */ |
| 318 | static void menu_reflect(struct cpuidle_device *dev) | 318 | static void menu_reflect(struct cpuidle_device *dev, int index) |
| 319 | { | 319 | { |
| 320 | struct menu_device *data = &__get_cpu_var(menu_devices); | 320 | struct menu_device *data = &__get_cpu_var(menu_devices); |
| 321 | data->needs_update = 1; | 321 | data->last_state_idx = index; |
| 322 | if (index >= 0) | ||
| 323 | data->needs_update = 1; | ||
| 322 | } | 324 | } |
| 323 | 325 | ||
| 324 | /** | 326 | /** |
| 325 | * menu_update - attempts to guess what happened after entry | 327 | * menu_update - attempts to guess what happened after entry |
| 328 | * @drv: cpuidle driver containing state data | ||
| 326 | * @dev: the CPU | 329 | * @dev: the CPU |
| 327 | */ | 330 | */ |
| 328 | static void menu_update(struct cpuidle_device *dev) | 331 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
| 329 | { | 332 | { |
| 330 | struct menu_device *data = &__get_cpu_var(menu_devices); | 333 | struct menu_device *data = &__get_cpu_var(menu_devices); |
| 331 | int last_idx = data->last_state_idx; | 334 | int last_idx = data->last_state_idx; |
| 332 | unsigned int last_idle_us = cpuidle_get_last_residency(dev); | 335 | unsigned int last_idle_us = cpuidle_get_last_residency(dev); |
| 333 | struct cpuidle_state *target = &dev->states[last_idx]; | 336 | struct cpuidle_state *target = &drv->states[last_idx]; |
| 334 | unsigned int measured_us; | 337 | unsigned int measured_us; |
| 335 | u64 new_factor; | 338 | u64 new_factor; |
| 336 | 339 | ||
| @@ -384,9 +387,11 @@ static void menu_update(struct cpuidle_device *dev) | |||
| 384 | 387 | ||
| 385 | /** | 388 | /** |
| 386 | * menu_enable_device - scans a CPU's states and does setup | 389 | * menu_enable_device - scans a CPU's states and does setup |
| 390 | * @drv: cpuidle driver | ||
| 387 | * @dev: the CPU | 391 | * @dev: the CPU |
| 388 | */ | 392 | */ |
| 389 | static int menu_enable_device(struct cpuidle_device *dev) | 393 | static int menu_enable_device(struct cpuidle_driver *drv, |
| 394 | struct cpuidle_device *dev) | ||
| 390 | { | 395 | { |
| 391 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); | 396 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); |
| 392 | 397 | ||
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index be7917ec40c9..1e756e160dca 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c | |||
| @@ -216,7 +216,8 @@ static struct kobj_type ktype_cpuidle = { | |||
| 216 | 216 | ||
| 217 | struct cpuidle_state_attr { | 217 | struct cpuidle_state_attr { |
| 218 | struct attribute attr; | 218 | struct attribute attr; |
| 219 | ssize_t (*show)(struct cpuidle_state *, char *); | 219 | ssize_t (*show)(struct cpuidle_state *, \ |
| 220 | struct cpuidle_state_usage *, char *); | ||
| 220 | ssize_t (*store)(struct cpuidle_state *, const char *, size_t); | 221 | ssize_t (*store)(struct cpuidle_state *, const char *, size_t); |
| 221 | }; | 222 | }; |
| 222 | 223 | ||
| @@ -224,19 +225,22 @@ struct cpuidle_state_attr { | |||
| 224 | static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL) | 225 | static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL) |
| 225 | 226 | ||
| 226 | #define define_show_state_function(_name) \ | 227 | #define define_show_state_function(_name) \ |
| 227 | static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ | 228 | static ssize_t show_state_##_name(struct cpuidle_state *state, \ |
| 229 | struct cpuidle_state_usage *state_usage, char *buf) \ | ||
| 228 | { \ | 230 | { \ |
| 229 | return sprintf(buf, "%u\n", state->_name);\ | 231 | return sprintf(buf, "%u\n", state->_name);\ |
| 230 | } | 232 | } |
| 231 | 233 | ||
| 232 | #define define_show_state_ull_function(_name) \ | 234 | #define define_show_state_ull_function(_name) \ |
| 233 | static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ | 235 | static ssize_t show_state_##_name(struct cpuidle_state *state, \ |
| 236 | struct cpuidle_state_usage *state_usage, char *buf) \ | ||
| 234 | { \ | 237 | { \ |
| 235 | return sprintf(buf, "%llu\n", state->_name);\ | 238 | return sprintf(buf, "%llu\n", state_usage->_name);\ |
| 236 | } | 239 | } |
| 237 | 240 | ||
| 238 | #define define_show_state_str_function(_name) \ | 241 | #define define_show_state_str_function(_name) \ |
| 239 | static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ | 242 | static ssize_t show_state_##_name(struct cpuidle_state *state, \ |
| 243 | struct cpuidle_state_usage *state_usage, char *buf) \ | ||
| 240 | { \ | 244 | { \ |
| 241 | if (state->_name[0] == '\0')\ | 245 | if (state->_name[0] == '\0')\ |
| 242 | return sprintf(buf, "<null>\n");\ | 246 | return sprintf(buf, "<null>\n");\ |
| @@ -269,16 +273,18 @@ static struct attribute *cpuidle_state_default_attrs[] = { | |||
| 269 | 273 | ||
| 270 | #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) | 274 | #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) |
| 271 | #define kobj_to_state(k) (kobj_to_state_obj(k)->state) | 275 | #define kobj_to_state(k) (kobj_to_state_obj(k)->state) |
| 276 | #define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage) | ||
| 272 | #define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr) | 277 | #define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr) |
| 273 | static ssize_t cpuidle_state_show(struct kobject * kobj, | 278 | static ssize_t cpuidle_state_show(struct kobject * kobj, |
| 274 | struct attribute * attr ,char * buf) | 279 | struct attribute * attr ,char * buf) |
| 275 | { | 280 | { |
| 276 | int ret = -EIO; | 281 | int ret = -EIO; |
| 277 | struct cpuidle_state *state = kobj_to_state(kobj); | 282 | struct cpuidle_state *state = kobj_to_state(kobj); |
| 283 | struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj); | ||
| 278 | struct cpuidle_state_attr * cattr = attr_to_stateattr(attr); | 284 | struct cpuidle_state_attr * cattr = attr_to_stateattr(attr); |
| 279 | 285 | ||
| 280 | if (cattr->show) | 286 | if (cattr->show) |
| 281 | ret = cattr->show(state, buf); | 287 | ret = cattr->show(state, state_usage, buf); |
| 282 | 288 | ||
| 283 | return ret; | 289 | return ret; |
| 284 | } | 290 | } |
| @@ -316,13 +322,15 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device) | |||
| 316 | { | 322 | { |
| 317 | int i, ret = -ENOMEM; | 323 | int i, ret = -ENOMEM; |
| 318 | struct cpuidle_state_kobj *kobj; | 324 | struct cpuidle_state_kobj *kobj; |
| 325 | struct cpuidle_driver *drv = cpuidle_get_driver(); | ||
| 319 | 326 | ||
| 320 | /* state statistics */ | 327 | /* state statistics */ |
| 321 | for (i = 0; i < device->state_count; i++) { | 328 | for (i = 0; i < device->state_count; i++) { |
| 322 | kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); | 329 | kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); |
| 323 | if (!kobj) | 330 | if (!kobj) |
| 324 | goto error_state; | 331 | goto error_state; |
| 325 | kobj->state = &device->states[i]; | 332 | kobj->state = &drv->states[i]; |
| 333 | kobj->state_usage = &device->states_usage[i]; | ||
| 326 | init_completion(&kobj->kobj_unregister); | 334 | init_completion(&kobj->kobj_unregister); |
| 327 | 335 | ||
| 328 | ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj, | 336 | ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj, |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 18767f8ab090..5d2f8e13cf0e 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
| @@ -82,7 +82,8 @@ static unsigned int mwait_substates; | |||
| 82 | static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ | 82 | static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ |
| 83 | 83 | ||
| 84 | static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; | 84 | static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; |
| 85 | static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); | 85 | static int intel_idle(struct cpuidle_device *dev, |
| 86 | struct cpuidle_driver *drv, int index); | ||
| 86 | 87 | ||
| 87 | static struct cpuidle_state *cpuidle_state_table; | 88 | static struct cpuidle_state *cpuidle_state_table; |
| 88 | 89 | ||
| @@ -110,7 +111,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
| 110 | { /* MWAIT C1 */ | 111 | { /* MWAIT C1 */ |
| 111 | .name = "C1-NHM", | 112 | .name = "C1-NHM", |
| 112 | .desc = "MWAIT 0x00", | 113 | .desc = "MWAIT 0x00", |
| 113 | .driver_data = (void *) 0x00, | ||
| 114 | .flags = CPUIDLE_FLAG_TIME_VALID, | 114 | .flags = CPUIDLE_FLAG_TIME_VALID, |
| 115 | .exit_latency = 3, | 115 | .exit_latency = 3, |
| 116 | .target_residency = 6, | 116 | .target_residency = 6, |
| @@ -118,7 +118,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
| 118 | { /* MWAIT C2 */ | 118 | { /* MWAIT C2 */ |
| 119 | .name = "C3-NHM", | 119 | .name = "C3-NHM", |
| 120 | .desc = "MWAIT 0x10", | 120 | .desc = "MWAIT 0x10", |
| 121 | .driver_data = (void *) 0x10, | ||
| 122 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 121 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
| 123 | .exit_latency = 20, | 122 | .exit_latency = 20, |
| 124 | .target_residency = 80, | 123 | .target_residency = 80, |
| @@ -126,7 +125,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
| 126 | { /* MWAIT C3 */ | 125 | { /* MWAIT C3 */ |
| 127 | .name = "C6-NHM", | 126 | .name = "C6-NHM", |
| 128 | .desc = "MWAIT 0x20", | 127 | .desc = "MWAIT 0x20", |
| 129 | .driver_data = (void *) 0x20, | ||
| 130 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 128 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
| 131 | .exit_latency = 200, | 129 | .exit_latency = 200, |
| 132 | .target_residency = 800, | 130 | .target_residency = 800, |
| @@ -138,7 +136,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
| 138 | { /* MWAIT C1 */ | 136 | { /* MWAIT C1 */ |
| 139 | .name = "C1-SNB", | 137 | .name = "C1-SNB", |
| 140 | .desc = "MWAIT 0x00", | 138 | .desc = "MWAIT 0x00", |
| 141 | .driver_data = (void *) 0x00, | ||
| 142 | .flags = CPUIDLE_FLAG_TIME_VALID, | 139 | .flags = CPUIDLE_FLAG_TIME_VALID, |
| 143 | .exit_latency = 1, | 140 | .exit_latency = 1, |
| 144 | .target_residency = 1, | 141 | .target_residency = 1, |
| @@ -146,7 +143,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
| 146 | { /* MWAIT C2 */ | 143 | { /* MWAIT C2 */ |
| 147 | .name = "C3-SNB", | 144 | .name = "C3-SNB", |
| 148 | .desc = "MWAIT 0x10", | 145 | .desc = "MWAIT 0x10", |
| 149 | .driver_data = (void *) 0x10, | ||
| 150 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 146 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
| 151 | .exit_latency = 80, | 147 | .exit_latency = 80, |
| 152 | .target_residency = 211, | 148 | .target_residency = 211, |
| @@ -154,7 +150,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
| 154 | { /* MWAIT C3 */ | 150 | { /* MWAIT C3 */ |
| 155 | .name = "C6-SNB", | 151 | .name = "C6-SNB", |
| 156 | .desc = "MWAIT 0x20", | 152 | .desc = "MWAIT 0x20", |
| 157 | .driver_data = (void *) 0x20, | ||
| 158 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 153 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
| 159 | .exit_latency = 104, | 154 | .exit_latency = 104, |
| 160 | .target_residency = 345, | 155 | .target_residency = 345, |
| @@ -162,7 +157,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
| 162 | { /* MWAIT C4 */ | 157 | { /* MWAIT C4 */ |
| 163 | .name = "C7-SNB", | 158 | .name = "C7-SNB", |
| 164 | .desc = "MWAIT 0x30", | 159 | .desc = "MWAIT 0x30", |
| 165 | .driver_data = (void *) 0x30, | ||
| 166 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 160 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
| 167 | .exit_latency = 109, | 161 | .exit_latency = 109, |
| 168 | .target_residency = 345, | 162 | .target_residency = 345, |
| @@ -174,7 +168,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
| 174 | { /* MWAIT C1 */ | 168 | { /* MWAIT C1 */ |
| 175 | .name = "C1-ATM", | 169 | .name = "C1-ATM", |
| 176 | .desc = "MWAIT 0x00", | 170 | .desc = "MWAIT 0x00", |
| 177 | .driver_data = (void *) 0x00, | ||
| 178 | .flags = CPUIDLE_FLAG_TIME_VALID, | 171 | .flags = CPUIDLE_FLAG_TIME_VALID, |
| 179 | .exit_latency = 1, | 172 | .exit_latency = 1, |
| 180 | .target_residency = 4, | 173 | .target_residency = 4, |
| @@ -182,7 +175,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
| 182 | { /* MWAIT C2 */ | 175 | { /* MWAIT C2 */ |
| 183 | .name = "C2-ATM", | 176 | .name = "C2-ATM", |
| 184 | .desc = "MWAIT 0x10", | 177 | .desc = "MWAIT 0x10", |
| 185 | .driver_data = (void *) 0x10, | ||
| 186 | .flags = CPUIDLE_FLAG_TIME_VALID, | 178 | .flags = CPUIDLE_FLAG_TIME_VALID, |
| 187 | .exit_latency = 20, | 179 | .exit_latency = 20, |
| 188 | .target_residency = 80, | 180 | .target_residency = 80, |
| @@ -191,7 +183,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
| 191 | { /* MWAIT C4 */ | 183 | { /* MWAIT C4 */ |
| 192 | .name = "C4-ATM", | 184 | .name = "C4-ATM", |
| 193 | .desc = "MWAIT 0x30", | 185 | .desc = "MWAIT 0x30", |
| 194 | .driver_data = (void *) 0x30, | ||
| 195 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 186 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
| 196 | .exit_latency = 100, | 187 | .exit_latency = 100, |
| 197 | .target_residency = 400, | 188 | .target_residency = 400, |
| @@ -200,23 +191,55 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
| 200 | { /* MWAIT C6 */ | 191 | { /* MWAIT C6 */ |
| 201 | .name = "C6-ATM", | 192 | .name = "C6-ATM", |
| 202 | .desc = "MWAIT 0x52", | 193 | .desc = "MWAIT 0x52", |
| 203 | .driver_data = (void *) 0x52, | ||
| 204 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 194 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
| 205 | .exit_latency = 140, | 195 | .exit_latency = 140, |
| 206 | .target_residency = 560, | 196 | .target_residency = 560, |
| 207 | .enter = &intel_idle }, | 197 | .enter = &intel_idle }, |
| 208 | }; | 198 | }; |
| 209 | 199 | ||
| 200 | static int get_driver_data(int cstate) | ||
| 201 | { | ||
| 202 | int driver_data; | ||
| 203 | switch (cstate) { | ||
| 204 | |||
| 205 | case 1: /* MWAIT C1 */ | ||
| 206 | driver_data = 0x00; | ||
| 207 | break; | ||
| 208 | case 2: /* MWAIT C2 */ | ||
| 209 | driver_data = 0x10; | ||
| 210 | break; | ||
| 211 | case 3: /* MWAIT C3 */ | ||
| 212 | driver_data = 0x20; | ||
| 213 | break; | ||
| 214 | case 4: /* MWAIT C4 */ | ||
| 215 | driver_data = 0x30; | ||
| 216 | break; | ||
| 217 | case 5: /* MWAIT C5 */ | ||
| 218 | driver_data = 0x40; | ||
| 219 | break; | ||
| 220 | case 6: /* MWAIT C6 */ | ||
| 221 | driver_data = 0x52; | ||
| 222 | break; | ||
| 223 | default: | ||
| 224 | driver_data = 0x00; | ||
| 225 | } | ||
| 226 | return driver_data; | ||
| 227 | } | ||
| 228 | |||
| 210 | /** | 229 | /** |
| 211 | * intel_idle | 230 | * intel_idle |
| 212 | * @dev: cpuidle_device | 231 | * @dev: cpuidle_device |
| 213 | * @state: cpuidle state | 232 | * @drv: cpuidle driver |
| 233 | * @index: index of cpuidle state | ||
| 214 | * | 234 | * |
| 215 | */ | 235 | */ |
| 216 | static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) | 236 | static int intel_idle(struct cpuidle_device *dev, |
| 237 | struct cpuidle_driver *drv, int index) | ||
| 217 | { | 238 | { |
| 218 | unsigned long ecx = 1; /* break on interrupt flag */ | 239 | unsigned long ecx = 1; /* break on interrupt flag */ |
| 219 | unsigned long eax = (unsigned long)cpuidle_get_statedata(state); | 240 | struct cpuidle_state *state = &drv->states[index]; |
| 241 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | ||
| 242 | unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage); | ||
| 220 | unsigned int cstate; | 243 | unsigned int cstate; |
| 221 | ktime_t kt_before, kt_after; | 244 | ktime_t kt_before, kt_after; |
| 222 | s64 usec_delta; | 245 | s64 usec_delta; |
| @@ -257,7 +280,10 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) | |||
| 257 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) | 280 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) |
| 258 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); | 281 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); |
| 259 | 282 | ||
| 260 | return usec_delta; | 283 | /* Update cpuidle counters */ |
| 284 | dev->last_residency = (int)usec_delta; | ||
| 285 | |||
| 286 | return index; | ||
| 261 | } | 287 | } |
| 262 | 288 | ||
| 263 | static void __setup_broadcast_timer(void *arg) | 289 | static void __setup_broadcast_timer(void *arg) |
| @@ -398,6 +424,60 @@ static void intel_idle_cpuidle_devices_uninit(void) | |||
| 398 | return; | 424 | return; |
| 399 | } | 425 | } |
| 400 | /* | 426 | /* |
| 427 | * intel_idle_cpuidle_driver_init() | ||
| 428 | * allocate, initialize cpuidle_states | ||
| 429 | */ | ||
| 430 | static int intel_idle_cpuidle_driver_init(void) | ||
| 431 | { | ||
| 432 | int cstate; | ||
| 433 | struct cpuidle_driver *drv = &intel_idle_driver; | ||
| 434 | |||
| 435 | drv->state_count = 1; | ||
| 436 | |||
| 437 | for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { | ||
| 438 | int num_substates; | ||
| 439 | |||
| 440 | if (cstate > max_cstate) { | ||
| 441 | printk(PREFIX "max_cstate %d reached\n", | ||
| 442 | max_cstate); | ||
| 443 | break; | ||
| 444 | } | ||
| 445 | |||
| 446 | /* does the state exist in CPUID.MWAIT? */ | ||
| 447 | num_substates = (mwait_substates >> ((cstate) * 4)) | ||
| 448 | & MWAIT_SUBSTATE_MASK; | ||
| 449 | if (num_substates == 0) | ||
| 450 | continue; | ||
| 451 | /* is the state not enabled? */ | ||
| 452 | if (cpuidle_state_table[cstate].enter == NULL) { | ||
| 453 | /* does the driver not know about the state? */ | ||
| 454 | if (*cpuidle_state_table[cstate].name == '\0') | ||
| 455 | pr_debug(PREFIX "unaware of model 0x%x" | ||
| 456 | " MWAIT %d please" | ||
| 457 | " contact lenb@kernel.org", | ||
| 458 | boot_cpu_data.x86_model, cstate); | ||
| 459 | continue; | ||
| 460 | } | ||
| 461 | |||
| 462 | if ((cstate > 2) && | ||
| 463 | !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) | ||
| 464 | mark_tsc_unstable("TSC halts in idle" | ||
| 465 | " states deeper than C2"); | ||
| 466 | |||
| 467 | drv->states[drv->state_count] = /* structure copy */ | ||
| 468 | cpuidle_state_table[cstate]; | ||
| 469 | |||
| 470 | drv->state_count += 1; | ||
| 471 | } | ||
| 472 | |||
| 473 | if (auto_demotion_disable_flags) | ||
| 474 | smp_call_function(auto_demotion_disable, NULL, 1); | ||
| 475 | |||
| 476 | return 0; | ||
| 477 | } | ||
| 478 | |||
| 479 | |||
| 480 | /* | ||
| 401 | * intel_idle_cpuidle_devices_init() | 481 | * intel_idle_cpuidle_devices_init() |
| 402 | * allocate, initialize, register cpuidle_devices | 482 | * allocate, initialize, register cpuidle_devices |
| 403 | */ | 483 | */ |
| @@ -431,22 +511,11 @@ static int intel_idle_cpuidle_devices_init(void) | |||
| 431 | continue; | 511 | continue; |
| 432 | /* is the state not enabled? */ | 512 | /* is the state not enabled? */ |
| 433 | if (cpuidle_state_table[cstate].enter == NULL) { | 513 | if (cpuidle_state_table[cstate].enter == NULL) { |
| 434 | /* does the driver not know about the state? */ | ||
| 435 | if (*cpuidle_state_table[cstate].name == '\0') | ||
| 436 | pr_debug(PREFIX "unaware of model 0x%x" | ||
| 437 | " MWAIT %d please" | ||
| 438 | " contact lenb@kernel.org", | ||
| 439 | boot_cpu_data.x86_model, cstate); | ||
| 440 | continue; | 514 | continue; |
| 441 | } | 515 | } |
| 442 | 516 | ||
| 443 | if ((cstate > 2) && | 517 | dev->states_usage[dev->state_count].driver_data = |
| 444 | !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) | 518 | (void *)get_driver_data(cstate); |
| 445 | mark_tsc_unstable("TSC halts in idle" | ||
| 446 | " states deeper than C2"); | ||
| 447 | |||
| 448 | dev->states[dev->state_count] = /* structure copy */ | ||
| 449 | cpuidle_state_table[cstate]; | ||
| 450 | 519 | ||
| 451 | dev->state_count += 1; | 520 | dev->state_count += 1; |
| 452 | } | 521 | } |
| @@ -459,8 +528,6 @@ static int intel_idle_cpuidle_devices_init(void) | |||
| 459 | return -EIO; | 528 | return -EIO; |
| 460 | } | 529 | } |
| 461 | } | 530 | } |
| 462 | if (auto_demotion_disable_flags) | ||
| 463 | smp_call_function(auto_demotion_disable, NULL, 1); | ||
| 464 | 531 | ||
| 465 | return 0; | 532 | return 0; |
| 466 | } | 533 | } |
| @@ -478,6 +545,7 @@ static int __init intel_idle_init(void) | |||
| 478 | if (retval) | 545 | if (retval) |
| 479 | return retval; | 546 | return retval; |
| 480 | 547 | ||
| 548 | intel_idle_cpuidle_driver_init(); | ||
| 481 | retval = cpuidle_register_driver(&intel_idle_driver); | 549 | retval = cpuidle_register_driver(&intel_idle_driver); |
| 482 | if (retval) { | 550 | if (retval) { |
| 483 | printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", | 551 | printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", |
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index bbf3edd85beb..5be4a392a3ae 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c | |||
| @@ -509,15 +509,12 @@ static __init void pnpacpi_parse_dma_option(struct pnp_dev *dev, | |||
| 509 | struct acpi_resource_dma *p) | 509 | struct acpi_resource_dma *p) |
| 510 | { | 510 | { |
| 511 | int i; | 511 | int i; |
| 512 | unsigned char map = 0, flags = 0; | 512 | unsigned char map = 0, flags; |
| 513 | |||
| 514 | if (p->channel_count == 0) | ||
| 515 | flags |= IORESOURCE_DISABLED; | ||
| 516 | 513 | ||
| 517 | for (i = 0; i < p->channel_count; i++) | 514 | for (i = 0; i < p->channel_count; i++) |
| 518 | map |= 1 << p->channels[i]; | 515 | map |= 1 << p->channels[i]; |
| 519 | 516 | ||
| 520 | flags |= dma_flags(dev, p->type, p->bus_master, p->transfer); | 517 | flags = dma_flags(dev, p->type, p->bus_master, p->transfer); |
| 521 | pnp_register_dma_resource(dev, option_flags, map, flags); | 518 | pnp_register_dma_resource(dev, option_flags, map, flags); |
| 522 | } | 519 | } |
| 523 | 520 | ||
| @@ -527,17 +524,14 @@ static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev, | |||
| 527 | { | 524 | { |
| 528 | int i; | 525 | int i; |
| 529 | pnp_irq_mask_t map; | 526 | pnp_irq_mask_t map; |
| 530 | unsigned char flags = 0; | 527 | unsigned char flags; |
| 531 | |||
| 532 | if (p->interrupt_count == 0) | ||
| 533 | flags |= IORESOURCE_DISABLED; | ||
| 534 | 528 | ||
| 535 | bitmap_zero(map.bits, PNP_IRQ_NR); | 529 | bitmap_zero(map.bits, PNP_IRQ_NR); |
| 536 | for (i = 0; i < p->interrupt_count; i++) | 530 | for (i = 0; i < p->interrupt_count; i++) |
| 537 | if (p->interrupts[i]) | 531 | if (p->interrupts[i]) |
| 538 | __set_bit(p->interrupts[i], map.bits); | 532 | __set_bit(p->interrupts[i], map.bits); |
| 539 | 533 | ||
| 540 | flags |= irq_flags(p->triggering, p->polarity, p->sharable); | 534 | flags = irq_flags(p->triggering, p->polarity, p->sharable); |
| 541 | pnp_register_irq_resource(dev, option_flags, &map, flags); | 535 | pnp_register_irq_resource(dev, option_flags, &map, flags); |
| 542 | } | 536 | } |
| 543 | 537 | ||
| @@ -547,10 +541,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev, | |||
| 547 | { | 541 | { |
| 548 | int i; | 542 | int i; |
| 549 | pnp_irq_mask_t map; | 543 | pnp_irq_mask_t map; |
| 550 | unsigned char flags = 0; | 544 | unsigned char flags; |
| 551 | |||
| 552 | if (p->interrupt_count == 0) | ||
| 553 | flags |= IORESOURCE_DISABLED; | ||
| 554 | 545 | ||
| 555 | bitmap_zero(map.bits, PNP_IRQ_NR); | 546 | bitmap_zero(map.bits, PNP_IRQ_NR); |
| 556 | for (i = 0; i < p->interrupt_count; i++) { | 547 | for (i = 0; i < p->interrupt_count; i++) { |
| @@ -564,7 +555,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev, | |||
| 564 | } | 555 | } |
| 565 | } | 556 | } |
| 566 | 557 | ||
| 567 | flags |= irq_flags(p->triggering, p->polarity, p->sharable); | 558 | flags = irq_flags(p->triggering, p->polarity, p->sharable); |
| 568 | pnp_register_irq_resource(dev, option_flags, &map, flags); | 559 | pnp_register_irq_resource(dev, option_flags, &map, flags); |
| 569 | } | 560 | } |
| 570 | 561 | ||
| @@ -574,11 +565,8 @@ static __init void pnpacpi_parse_port_option(struct pnp_dev *dev, | |||
| 574 | { | 565 | { |
| 575 | unsigned char flags = 0; | 566 | unsigned char flags = 0; |
| 576 | 567 | ||
| 577 | if (io->address_length == 0) | ||
| 578 | flags |= IORESOURCE_DISABLED; | ||
| 579 | |||
| 580 | if (io->io_decode == ACPI_DECODE_16) | 568 | if (io->io_decode == ACPI_DECODE_16) |
| 581 | flags |= IORESOURCE_IO_16BIT_ADDR; | 569 | flags = IORESOURCE_IO_16BIT_ADDR; |
| 582 | pnp_register_port_resource(dev, option_flags, io->minimum, io->maximum, | 570 | pnp_register_port_resource(dev, option_flags, io->minimum, io->maximum, |
| 583 | io->alignment, io->address_length, flags); | 571 | io->alignment, io->address_length, flags); |
| 584 | } | 572 | } |
| @@ -587,13 +575,8 @@ static __init void pnpacpi_parse_fixed_port_option(struct pnp_dev *dev, | |||
| 587 | unsigned int option_flags, | 575 | unsigned int option_flags, |
| 588 | struct acpi_resource_fixed_io *io) | 576 | struct acpi_resource_fixed_io *io) |
| 589 | { | 577 | { |
| 590 | unsigned char flags = 0; | ||
| 591 | |||
| 592 | if (io->address_length == 0) | ||
| 593 | flags |= IORESOURCE_DISABLED; | ||
| 594 | |||
| 595 | pnp_register_port_resource(dev, option_flags, io->address, io->address, | 578 | pnp_register_port_resource(dev, option_flags, io->address, io->address, |
| 596 | 0, io->address_length, flags | IORESOURCE_IO_FIXED); | 579 | 0, io->address_length, IORESOURCE_IO_FIXED); |
| 597 | } | 580 | } |
| 598 | 581 | ||
| 599 | static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev, | 582 | static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev, |
| @@ -602,11 +585,8 @@ static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev, | |||
| 602 | { | 585 | { |
| 603 | unsigned char flags = 0; | 586 | unsigned char flags = 0; |
| 604 | 587 | ||
| 605 | if (p->address_length == 0) | ||
| 606 | flags |= IORESOURCE_DISABLED; | ||
| 607 | |||
| 608 | if (p->write_protect == ACPI_READ_WRITE_MEMORY) | 588 | if (p->write_protect == ACPI_READ_WRITE_MEMORY) |
| 609 | flags |= IORESOURCE_MEM_WRITEABLE; | 589 | flags = IORESOURCE_MEM_WRITEABLE; |
| 610 | pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum, | 590 | pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum, |
| 611 | p->alignment, p->address_length, flags); | 591 | p->alignment, p->address_length, flags); |
| 612 | } | 592 | } |
| @@ -617,11 +597,8 @@ static __init void pnpacpi_parse_mem32_option(struct pnp_dev *dev, | |||
| 617 | { | 597 | { |
| 618 | unsigned char flags = 0; | 598 | unsigned char flags = 0; |
| 619 | 599 | ||
| 620 | if (p->address_length == 0) | ||
| 621 | flags |= IORESOURCE_DISABLED; | ||
| 622 | |||
| 623 | if (p->write_protect == ACPI_READ_WRITE_MEMORY) | 600 | if (p->write_protect == ACPI_READ_WRITE_MEMORY) |
| 624 | flags |= IORESOURCE_MEM_WRITEABLE; | 601 | flags = IORESOURCE_MEM_WRITEABLE; |
| 625 | pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum, | 602 | pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum, |
| 626 | p->alignment, p->address_length, flags); | 603 | p->alignment, p->address_length, flags); |
| 627 | } | 604 | } |
| @@ -632,11 +609,8 @@ static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_dev *dev, | |||
| 632 | { | 609 | { |
| 633 | unsigned char flags = 0; | 610 | unsigned char flags = 0; |
| 634 | 611 | ||
| 635 | if (p->address_length == 0) | ||
| 636 | flags |= IORESOURCE_DISABLED; | ||
| 637 | |||
| 638 | if (p->write_protect == ACPI_READ_WRITE_MEMORY) | 612 | if (p->write_protect == ACPI_READ_WRITE_MEMORY) |
| 639 | flags |= IORESOURCE_MEM_WRITEABLE; | 613 | flags = IORESOURCE_MEM_WRITEABLE; |
| 640 | pnp_register_mem_resource(dev, option_flags, p->address, p->address, | 614 | pnp_register_mem_resource(dev, option_flags, p->address, p->address, |
| 641 | 0, p->address_length, flags); | 615 | 0, p->address_length, flags); |
| 642 | } | 616 | } |
| @@ -656,19 +630,16 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev, | |||
| 656 | return; | 630 | return; |
| 657 | } | 631 | } |
| 658 | 632 | ||
| 659 | if (p->address_length == 0) | ||
| 660 | flags |= IORESOURCE_DISABLED; | ||
| 661 | |||
| 662 | if (p->resource_type == ACPI_MEMORY_RANGE) { | 633 | if (p->resource_type == ACPI_MEMORY_RANGE) { |
| 663 | if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) | 634 | if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) |
| 664 | flags |= IORESOURCE_MEM_WRITEABLE; | 635 | flags = IORESOURCE_MEM_WRITEABLE; |
| 665 | pnp_register_mem_resource(dev, option_flags, p->minimum, | 636 | pnp_register_mem_resource(dev, option_flags, p->minimum, |
| 666 | p->minimum, 0, p->address_length, | 637 | p->minimum, 0, p->address_length, |
| 667 | flags); | 638 | flags); |
| 668 | } else if (p->resource_type == ACPI_IO_RANGE) | 639 | } else if (p->resource_type == ACPI_IO_RANGE) |
| 669 | pnp_register_port_resource(dev, option_flags, p->minimum, | 640 | pnp_register_port_resource(dev, option_flags, p->minimum, |
| 670 | p->minimum, 0, p->address_length, | 641 | p->minimum, 0, p->address_length, |
| 671 | flags | IORESOURCE_IO_FIXED); | 642 | IORESOURCE_IO_FIXED); |
| 672 | } | 643 | } |
| 673 | 644 | ||
| 674 | static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev, | 645 | static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev, |
| @@ -678,19 +649,16 @@ static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev, | |||
| 678 | struct acpi_resource_extended_address64 *p = &r->data.ext_address64; | 649 | struct acpi_resource_extended_address64 *p = &r->data.ext_address64; |
| 679 | unsigned char flags = 0; | 650 | unsigned char flags = 0; |
| 680 | 651 | ||
| 681 | if (p->address_length == 0) | ||
| 682 | flags |= IORESOURCE_DISABLED; | ||
| 683 | |||
| 684 | if (p->resource_type == ACPI_MEMORY_RANGE) { | 652 | if (p->resource_type == ACPI_MEMORY_RANGE) { |
| 685 | if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) | 653 | if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) |
| 686 | flags |= IORESOURCE_MEM_WRITEABLE; | 654 | flags = IORESOURCE_MEM_WRITEABLE; |
| 687 | pnp_register_mem_resource(dev, option_flags, p->minimum, | 655 | pnp_register_mem_resource(dev, option_flags, p->minimum, |
| 688 | p->minimum, 0, p->address_length, | 656 | p->minimum, 0, p->address_length, |
| 689 | flags); | 657 | flags); |
| 690 | } else if (p->resource_type == ACPI_IO_RANGE) | 658 | } else if (p->resource_type == ACPI_IO_RANGE) |
| 691 | pnp_register_port_resource(dev, option_flags, p->minimum, | 659 | pnp_register_port_resource(dev, option_flags, p->minimum, |
| 692 | p->minimum, 0, p->address_length, | 660 | p->minimum, 0, p->address_length, |
| 693 | flags | IORESOURCE_IO_FIXED); | 661 | IORESOURCE_IO_FIXED); |
| 694 | } | 662 | } |
| 695 | 663 | ||
| 696 | struct acpipnp_parse_option_s { | 664 | struct acpipnp_parse_option_s { |
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index 708f8e92771a..dd9a5743fa99 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c | |||
| @@ -678,10 +678,10 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, | |||
| 678 | return; | 678 | return; |
| 679 | 679 | ||
| 680 | if (delay > 1000) | 680 | if (delay > 1000) |
| 681 | schedule_delayed_work(&(tz->poll_queue), | 681 | queue_delayed_work(system_freezable_wq, &(tz->poll_queue), |
| 682 | round_jiffies(msecs_to_jiffies(delay))); | 682 | round_jiffies(msecs_to_jiffies(delay))); |
| 683 | else | 683 | else |
| 684 | schedule_delayed_work(&(tz->poll_queue), | 684 | queue_delayed_work(system_freezable_wq, &(tz->poll_queue), |
| 685 | msecs_to_jiffies(delay)); | 685 | msecs_to_jiffies(delay)); |
| 686 | } | 686 | } |
| 687 | 687 | ||
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h index e49c36d38d7e..bb145e4b935e 100644 --- a/include/acpi/acpi_drivers.h +++ b/include/acpi/acpi_drivers.h | |||
| @@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb) | |||
| 144 | { | 144 | { |
| 145 | } | 145 | } |
| 146 | static inline int register_hotplug_dock_device(acpi_handle handle, | 146 | static inline int register_hotplug_dock_device(acpi_handle handle, |
| 147 | struct acpi_dock_ops *ops, | 147 | const struct acpi_dock_ops *ops, |
| 148 | void *context) | 148 | void *context) |
| 149 | { | 149 | { |
| 150 | return -ENODEV; | 150 | return -ENODEV; |
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index b67231bef632..ed73f6705c86 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h | |||
| @@ -470,7 +470,6 @@ typedef u64 acpi_integer; | |||
| 470 | */ | 470 | */ |
| 471 | #define ACPI_FULL_INITIALIZATION 0x00 | 471 | #define ACPI_FULL_INITIALIZATION 0x00 |
| 472 | #define ACPI_NO_ADDRESS_SPACE_INIT 0x01 | 472 | #define ACPI_NO_ADDRESS_SPACE_INIT 0x01 |
| 473 | #define ACPI_NO_HARDWARE_INIT 0x02 | ||
| 474 | #define ACPI_NO_EVENT_INIT 0x04 | 473 | #define ACPI_NO_EVENT_INIT 0x04 |
| 475 | #define ACPI_NO_HANDLER_INIT 0x08 | 474 | #define ACPI_NO_HANDLER_INIT 0x08 |
| 476 | #define ACPI_NO_ACPI_ENABLE 0x10 | 475 | #define ACPI_NO_ACPI_ENABLE 0x10 |
diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 67055f180330..610f6fb1bbc2 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h | |||
| @@ -329,6 +329,7 @@ extern void acpi_processor_throttling_init(void); | |||
| 329 | int acpi_processor_power_init(struct acpi_processor *pr, | 329 | int acpi_processor_power_init(struct acpi_processor *pr, |
| 330 | struct acpi_device *device); | 330 | struct acpi_device *device); |
| 331 | int acpi_processor_cst_has_changed(struct acpi_processor *pr); | 331 | int acpi_processor_cst_has_changed(struct acpi_processor *pr); |
| 332 | int acpi_processor_hotplug(struct acpi_processor *pr); | ||
| 332 | int acpi_processor_power_exit(struct acpi_processor *pr, | 333 | int acpi_processor_power_exit(struct acpi_processor *pr, |
| 333 | struct acpi_device *device); | 334 | struct acpi_device *device); |
| 334 | int acpi_processor_suspend(struct acpi_device * device, pm_message_t state); | 335 | int acpi_processor_suspend(struct acpi_device * device, pm_message_t state); |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 583baf22cad2..7408af843b8a 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
| @@ -23,57 +23,62 @@ | |||
| 23 | struct module; | 23 | struct module; |
| 24 | 24 | ||
| 25 | struct cpuidle_device; | 25 | struct cpuidle_device; |
| 26 | struct cpuidle_driver; | ||
| 26 | 27 | ||
| 27 | 28 | ||
| 28 | /**************************** | 29 | /**************************** |
| 29 | * CPUIDLE DEVICE INTERFACE * | 30 | * CPUIDLE DEVICE INTERFACE * |
| 30 | ****************************/ | 31 | ****************************/ |
| 31 | 32 | ||
| 33 | struct cpuidle_state_usage { | ||
| 34 | void *driver_data; | ||
| 35 | |||
| 36 | unsigned long long usage; | ||
| 37 | unsigned long long time; /* in US */ | ||
| 38 | }; | ||
| 39 | |||
| 32 | struct cpuidle_state { | 40 | struct cpuidle_state { |
| 33 | char name[CPUIDLE_NAME_LEN]; | 41 | char name[CPUIDLE_NAME_LEN]; |
| 34 | char desc[CPUIDLE_DESC_LEN]; | 42 | char desc[CPUIDLE_DESC_LEN]; |
| 35 | void *driver_data; | ||
| 36 | 43 | ||
| 37 | unsigned int flags; | 44 | unsigned int flags; |
| 38 | unsigned int exit_latency; /* in US */ | 45 | unsigned int exit_latency; /* in US */ |
| 39 | unsigned int power_usage; /* in mW */ | 46 | unsigned int power_usage; /* in mW */ |
| 40 | unsigned int target_residency; /* in US */ | 47 | unsigned int target_residency; /* in US */ |
| 41 | 48 | ||
| 42 | unsigned long long usage; | ||
| 43 | unsigned long long time; /* in US */ | ||
| 44 | |||
| 45 | int (*enter) (struct cpuidle_device *dev, | 49 | int (*enter) (struct cpuidle_device *dev, |
| 46 | struct cpuidle_state *state); | 50 | struct cpuidle_driver *drv, |
| 51 | int index); | ||
| 47 | }; | 52 | }; |
| 48 | 53 | ||
| 49 | /* Idle State Flags */ | 54 | /* Idle State Flags */ |
| 50 | #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */ | 55 | #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */ |
| 51 | #define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */ | ||
| 52 | 56 | ||
| 53 | #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) | 57 | #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) |
| 54 | 58 | ||
| 55 | /** | 59 | /** |
| 56 | * cpuidle_get_statedata - retrieves private driver state data | 60 | * cpuidle_get_statedata - retrieves private driver state data |
| 57 | * @state: the state | 61 | * @st_usage: the state usage statistics |
| 58 | */ | 62 | */ |
| 59 | static inline void * cpuidle_get_statedata(struct cpuidle_state *state) | 63 | static inline void *cpuidle_get_statedata(struct cpuidle_state_usage *st_usage) |
| 60 | { | 64 | { |
| 61 | return state->driver_data; | 65 | return st_usage->driver_data; |
| 62 | } | 66 | } |
| 63 | 67 | ||
| 64 | /** | 68 | /** |
| 65 | * cpuidle_set_statedata - stores private driver state data | 69 | * cpuidle_set_statedata - stores private driver state data |
| 66 | * @state: the state | 70 | * @st_usage: the state usage statistics |
| 67 | * @data: the private data | 71 | * @data: the private data |
| 68 | */ | 72 | */ |
| 69 | static inline void | 73 | static inline void |
| 70 | cpuidle_set_statedata(struct cpuidle_state *state, void *data) | 74 | cpuidle_set_statedata(struct cpuidle_state_usage *st_usage, void *data) |
| 71 | { | 75 | { |
| 72 | state->driver_data = data; | 76 | st_usage->driver_data = data; |
| 73 | } | 77 | } |
| 74 | 78 | ||
| 75 | struct cpuidle_state_kobj { | 79 | struct cpuidle_state_kobj { |
| 76 | struct cpuidle_state *state; | 80 | struct cpuidle_state *state; |
| 81 | struct cpuidle_state_usage *state_usage; | ||
| 77 | struct completion kobj_unregister; | 82 | struct completion kobj_unregister; |
| 78 | struct kobject kobj; | 83 | struct kobject kobj; |
| 79 | }; | 84 | }; |
| @@ -81,22 +86,17 @@ struct cpuidle_state_kobj { | |||
| 81 | struct cpuidle_device { | 86 | struct cpuidle_device { |
| 82 | unsigned int registered:1; | 87 | unsigned int registered:1; |
| 83 | unsigned int enabled:1; | 88 | unsigned int enabled:1; |
| 84 | unsigned int power_specified:1; | ||
| 85 | unsigned int cpu; | 89 | unsigned int cpu; |
| 86 | 90 | ||
| 87 | int last_residency; | 91 | int last_residency; |
| 88 | int state_count; | 92 | int state_count; |
| 89 | struct cpuidle_state states[CPUIDLE_STATE_MAX]; | 93 | struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; |
| 90 | struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; | 94 | struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; |
| 91 | struct cpuidle_state *last_state; | ||
| 92 | 95 | ||
| 93 | struct list_head device_list; | 96 | struct list_head device_list; |
| 94 | struct kobject kobj; | 97 | struct kobject kobj; |
| 95 | struct completion kobj_unregister; | 98 | struct completion kobj_unregister; |
| 96 | void *governor_data; | 99 | void *governor_data; |
| 97 | struct cpuidle_state *safe_state; | ||
| 98 | |||
| 99 | int (*prepare) (struct cpuidle_device *dev); | ||
| 100 | }; | 100 | }; |
| 101 | 101 | ||
| 102 | DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); | 102 | DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); |
| @@ -120,6 +120,11 @@ static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) | |||
| 120 | struct cpuidle_driver { | 120 | struct cpuidle_driver { |
| 121 | char name[CPUIDLE_NAME_LEN]; | 121 | char name[CPUIDLE_NAME_LEN]; |
| 122 | struct module *owner; | 122 | struct module *owner; |
| 123 | |||
| 124 | unsigned int power_specified:1; | ||
| 125 | struct cpuidle_state states[CPUIDLE_STATE_MAX]; | ||
| 126 | int state_count; | ||
| 127 | int safe_state_index; | ||
| 123 | }; | 128 | }; |
| 124 | 129 | ||
| 125 | #ifdef CONFIG_CPU_IDLE | 130 | #ifdef CONFIG_CPU_IDLE |
| @@ -166,11 +171,14 @@ struct cpuidle_governor { | |||
| 166 | struct list_head governor_list; | 171 | struct list_head governor_list; |
| 167 | unsigned int rating; | 172 | unsigned int rating; |
| 168 | 173 | ||
| 169 | int (*enable) (struct cpuidle_device *dev); | 174 | int (*enable) (struct cpuidle_driver *drv, |
| 170 | void (*disable) (struct cpuidle_device *dev); | 175 | struct cpuidle_device *dev); |
| 176 | void (*disable) (struct cpuidle_driver *drv, | ||
| 177 | struct cpuidle_device *dev); | ||
| 171 | 178 | ||
| 172 | int (*select) (struct cpuidle_device *dev); | 179 | int (*select) (struct cpuidle_driver *drv, |
| 173 | void (*reflect) (struct cpuidle_device *dev); | 180 | struct cpuidle_device *dev); |
| 181 | void (*reflect) (struct cpuidle_device *dev, int index); | ||
| 174 | 182 | ||
| 175 | struct module *owner; | 183 | struct module *owner; |
| 176 | }; | 184 | }; |
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 8b2d37b59c9e..3c6f7808efae 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c | |||
| @@ -162,19 +162,21 @@ void print_header(void) | |||
| 162 | 162 | ||
| 163 | void dump_cnt(struct counters *cnt) | 163 | void dump_cnt(struct counters *cnt) |
| 164 | { | 164 | { |
| 165 | fprintf(stderr, "package: %d ", cnt->pkg); | 165 | if (!cnt) |
| 166 | fprintf(stderr, "core:: %d ", cnt->core); | 166 | return; |
| 167 | fprintf(stderr, "CPU: %d ", cnt->cpu); | 167 | if (cnt->pkg) fprintf(stderr, "package: %d ", cnt->pkg); |
| 168 | fprintf(stderr, "TSC: %016llX\n", cnt->tsc); | 168 | if (cnt->core) fprintf(stderr, "core:: %d ", cnt->core); |
| 169 | fprintf(stderr, "c3: %016llX\n", cnt->c3); | 169 | if (cnt->cpu) fprintf(stderr, "CPU: %d ", cnt->cpu); |
| 170 | fprintf(stderr, "c6: %016llX\n", cnt->c6); | 170 | if (cnt->tsc) fprintf(stderr, "TSC: %016llX\n", cnt->tsc); |
| 171 | fprintf(stderr, "c7: %016llX\n", cnt->c7); | 171 | if (cnt->c3) fprintf(stderr, "c3: %016llX\n", cnt->c3); |
| 172 | fprintf(stderr, "aperf: %016llX\n", cnt->aperf); | 172 | if (cnt->c6) fprintf(stderr, "c6: %016llX\n", cnt->c6); |
| 173 | fprintf(stderr, "pc2: %016llX\n", cnt->pc2); | 173 | if (cnt->c7) fprintf(stderr, "c7: %016llX\n", cnt->c7); |
| 174 | fprintf(stderr, "pc3: %016llX\n", cnt->pc3); | 174 | if (cnt->aperf) fprintf(stderr, "aperf: %016llX\n", cnt->aperf); |
| 175 | fprintf(stderr, "pc6: %016llX\n", cnt->pc6); | 175 | if (cnt->pc2) fprintf(stderr, "pc2: %016llX\n", cnt->pc2); |
| 176 | fprintf(stderr, "pc7: %016llX\n", cnt->pc7); | 176 | if (cnt->pc3) fprintf(stderr, "pc3: %016llX\n", cnt->pc3); |
| 177 | fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr); | 177 | if (cnt->pc6) fprintf(stderr, "pc6: %016llX\n", cnt->pc6); |
| 178 | if (cnt->pc7) fprintf(stderr, "pc7: %016llX\n", cnt->pc7); | ||
| 179 | if (cnt->extra_msr) fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr); | ||
| 178 | } | 180 | } |
| 179 | 181 | ||
| 180 | void dump_list(struct counters *cnt) | 182 | void dump_list(struct counters *cnt) |
