aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/acpi/cstate.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/acpi/cstate.c')
-rw-r--r--arch/i386/kernel/acpi/cstate.c17
1 files changed, 8 insertions, 9 deletions
diff --git a/arch/i386/kernel/acpi/cstate.c b/arch/i386/kernel/acpi/cstate.c
index 20563e52c622..2d39f55d29a8 100644
--- a/arch/i386/kernel/acpi/cstate.c
+++ b/arch/i386/kernel/acpi/cstate.c
@@ -11,6 +11,7 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/acpi.h> 12#include <linux/acpi.h>
13#include <linux/cpu.h> 13#include <linux/cpu.h>
14#include <linux/sched.h>
14 15
15#include <acpi/processor.h> 16#include <acpi/processor.h>
16#include <asm/acpi.h> 17#include <asm/acpi.h>
@@ -46,13 +47,13 @@ EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
46 47
47/* The code below handles cstate entry with monitor-mwait pair on Intel*/ 48/* The code below handles cstate entry with monitor-mwait pair on Intel*/
48 49
49struct cstate_entry_s { 50struct cstate_entry {
50 struct { 51 struct {
51 unsigned int eax; 52 unsigned int eax;
52 unsigned int ecx; 53 unsigned int ecx;
53 } states[ACPI_PROCESSOR_MAX_POWER]; 54 } states[ACPI_PROCESSOR_MAX_POWER];
54}; 55};
55static struct cstate_entry_s *cpu_cstate_entry; /* per CPU ptr */ 56static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */
56 57
57static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; 58static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
58 59
@@ -70,7 +71,7 @@ static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
70int acpi_processor_ffh_cstate_probe(unsigned int cpu, 71int acpi_processor_ffh_cstate_probe(unsigned int cpu,
71 struct acpi_processor_cx *cx, struct acpi_power_register *reg) 72 struct acpi_processor_cx *cx, struct acpi_power_register *reg)
72{ 73{
73 struct cstate_entry_s *percpu_entry; 74 struct cstate_entry *percpu_entry;
74 struct cpuinfo_x86 *c = cpu_data + cpu; 75 struct cpuinfo_x86 *c = cpu_data + cpu;
75 76
76 cpumask_t saved_mask; 77 cpumask_t saved_mask;
@@ -135,7 +136,7 @@ EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
135void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) 136void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
136{ 137{
137 unsigned int cpu = smp_processor_id(); 138 unsigned int cpu = smp_processor_id();
138 struct cstate_entry_s *percpu_entry; 139 struct cstate_entry *percpu_entry;
139 140
140 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); 141 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
141 mwait_idle_with_hints(percpu_entry->states[cx->index].eax, 142 mwait_idle_with_hints(percpu_entry->states[cx->index].eax,
@@ -149,16 +150,14 @@ static int __init ffh_cstate_init(void)
149 if (c->x86_vendor != X86_VENDOR_INTEL) 150 if (c->x86_vendor != X86_VENDOR_INTEL)
150 return -1; 151 return -1;
151 152
152 cpu_cstate_entry = alloc_percpu(struct cstate_entry_s); 153 cpu_cstate_entry = alloc_percpu(struct cstate_entry);
153 return 0; 154 return 0;
154} 155}
155 156
156static void __exit ffh_cstate_exit(void) 157static void __exit ffh_cstate_exit(void)
157{ 158{
158 if (cpu_cstate_entry) { 159 free_percpu(cpu_cstate_entry);
159 free_percpu(cpu_cstate_entry); 160 cpu_cstate_entry = NULL;
160 cpu_cstate_entry = NULL;
161 }
162} 161}
163 162
164arch_initcall(ffh_cstate_init); 163arch_initcall(ffh_cstate_init);