diff options
40 files changed, 456 insertions, 386 deletions
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 24 | 3 | SUBLEVEL = 24 |
4 | EXTRAVERSION = -rc2 | 4 | EXTRAVERSION = -rc3 |
5 | NAME = Arr Matey! A Hairy Bilge Rat! | 5 | NAME = Arr Matey! A Hairy Bilge Rat! |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
@@ -200,11 +200,9 @@ SRCARCH := $(ARCH) | |||
200 | # Additional ARCH settings for x86 | 200 | # Additional ARCH settings for x86 |
201 | ifeq ($(ARCH),i386) | 201 | ifeq ($(ARCH),i386) |
202 | SRCARCH := x86 | 202 | SRCARCH := x86 |
203 | K64BIT := n | ||
204 | endif | 203 | endif |
205 | ifeq ($(ARCH),x86_64) | 204 | ifeq ($(ARCH),x86_64) |
206 | SRCARCH := x86 | 205 | SRCARCH := x86 |
207 | K64BIT := y | ||
208 | endif | 206 | endif |
209 | 207 | ||
210 | KCONFIG_CONFIG ?= .config | 208 | KCONFIG_CONFIG ?= .config |
@@ -341,7 +339,7 @@ KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) | |||
341 | KERNELVERSION = $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) | 339 | KERNELVERSION = $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) |
342 | 340 | ||
343 | export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION | 341 | export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION |
344 | export ARCH SRCARCH K64BIT CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC | 342 | export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC |
345 | export CPP AR NM STRIP OBJCOPY OBJDUMP MAKE AWK GENKSYMS PERL UTS_MACHINE | 343 | export CPP AR NM STRIP OBJCOPY OBJDUMP MAKE AWK GENKSYMS PERL UTS_MACHINE |
346 | export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS | 344 | export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS |
347 | 345 | ||
@@ -1334,12 +1332,7 @@ else | |||
1334 | ALLINCLUDE_ARCHS := $(ALLSOURCE_ARCHS) | 1332 | ALLINCLUDE_ARCHS := $(ALLSOURCE_ARCHS) |
1335 | endif | 1333 | endif |
1336 | 1334 | ||
1337 | # Take care of arch/x86 | 1335 | ALLSOURCE_ARCHS := $(SRCARCH) |
1338 | ifeq ($(ARCH), $(SRCARCH)) | ||
1339 | ALLSOURCE_ARCHS := $(ARCH) | ||
1340 | else | ||
1341 | ALLSOURCE_ARCHS := $(ARCH) $(SRCARCH) | ||
1342 | endif | ||
1343 | 1336 | ||
1344 | define find-sources | 1337 | define find-sources |
1345 | ( for arch in $(ALLSOURCE_ARCHS) ; do \ | 1338 | ( for arch in $(ALLSOURCE_ARCHS) ; do \ |
@@ -194,8 +194,6 @@ CONFIGURING the kernel: | |||
194 | "make *config" checks for a file named "all{yes/mod/no/random}.config" | 194 | "make *config" checks for a file named "all{yes/mod/no/random}.config" |
195 | for symbol values that are to be forced. If this file is not found, | 195 | for symbol values that are to be forced. If this file is not found, |
196 | it checks for a file named "all.config" to contain forced values. | 196 | it checks for a file named "all.config" to contain forced values. |
197 | Finally it checks the environment variable K64BIT and if found, sets | ||
198 | the config symbol "64BIT" to the value of the K64BIT variable. | ||
199 | 197 | ||
200 | NOTES on "make config": | 198 | NOTES on "make config": |
201 | - having unnecessary drivers will make the kernel bigger, and can | 199 | - having unnecessary drivers will make the kernel bigger, and can |
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index 5da798282a54..61d9c9d69e6b 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c | |||
@@ -150,22 +150,45 @@ static void clk_pxa3xx_cken_disable(struct clk *clk) | |||
150 | local_irq_enable(); | 150 | local_irq_enable(); |
151 | } | 151 | } |
152 | 152 | ||
153 | static const struct clkops clk_pxa3xx_cken_ops = { | ||
154 | .enable = clk_pxa3xx_cken_enable, | ||
155 | .disable = clk_pxa3xx_cken_disable, | ||
156 | }; | ||
157 | |||
153 | static const struct clkops clk_pxa3xx_hsio_ops = { | 158 | static const struct clkops clk_pxa3xx_hsio_ops = { |
154 | .enable = clk_pxa3xx_cken_enable, | 159 | .enable = clk_pxa3xx_cken_enable, |
155 | .disable = clk_pxa3xx_cken_disable, | 160 | .disable = clk_pxa3xx_cken_disable, |
156 | .getrate = clk_pxa3xx_hsio_getrate, | 161 | .getrate = clk_pxa3xx_hsio_getrate, |
157 | }; | 162 | }; |
158 | 163 | ||
164 | #define PXA3xx_CKEN(_name, _cken, _rate, _delay, _dev) \ | ||
165 | { \ | ||
166 | .name = _name, \ | ||
167 | .dev = _dev, \ | ||
168 | .ops = &clk_pxa3xx_cken_ops, \ | ||
169 | .rate = _rate, \ | ||
170 | .cken = CKEN_##_cken, \ | ||
171 | .delay = _delay, \ | ||
172 | } | ||
173 | |||
174 | #define PXA3xx_CK(_name, _cken, _ops, _dev) \ | ||
175 | { \ | ||
176 | .name = _name, \ | ||
177 | .dev = _dev, \ | ||
178 | .ops = _ops, \ | ||
179 | .cken = CKEN_##_cken, \ | ||
180 | } | ||
181 | |||
159 | static struct clk pxa3xx_clks[] = { | 182 | static struct clk pxa3xx_clks[] = { |
160 | INIT_CK("LCDCLK", LCD, &clk_pxa3xx_hsio_ops, &pxa_device_fb.dev), | 183 | PXA3xx_CK("LCDCLK", LCD, &clk_pxa3xx_hsio_ops, &pxa_device_fb.dev), |
161 | INIT_CK("CAMCLK", CAMERA, &clk_pxa3xx_hsio_ops, NULL), | 184 | PXA3xx_CK("CAMCLK", CAMERA, &clk_pxa3xx_hsio_ops, NULL), |
162 | 185 | ||
163 | INIT_CKEN("UARTCLK", FFUART, 14857000, 1, &pxa_device_ffuart.dev), | 186 | PXA3xx_CKEN("UARTCLK", FFUART, 14857000, 1, &pxa_device_ffuart.dev), |
164 | INIT_CKEN("UARTCLK", BTUART, 14857000, 1, &pxa_device_btuart.dev), | 187 | PXA3xx_CKEN("UARTCLK", BTUART, 14857000, 1, &pxa_device_btuart.dev), |
165 | INIT_CKEN("UARTCLK", STUART, 14857000, 1, NULL), | 188 | PXA3xx_CKEN("UARTCLK", STUART, 14857000, 1, NULL), |
166 | 189 | ||
167 | INIT_CKEN("I2CCLK", I2C, 32842000, 0, &pxa_device_i2c.dev), | 190 | PXA3xx_CKEN("I2CCLK", I2C, 32842000, 0, &pxa_device_i2c.dev), |
168 | INIT_CKEN("UDCCLK", UDC, 48000000, 5, &pxa_device_udc.dev), | 191 | PXA3xx_CKEN("UDCCLK", UDC, 48000000, 5, &pxa_device_udc.dev), |
169 | }; | 192 | }; |
170 | 193 | ||
171 | void __init pxa3xx_init_irq(void) | 194 | void __init pxa3xx_init_irq(void) |
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c index cefdf2f9f26e..333a82a3717e 100644 --- a/arch/arm/mm/consistent.c +++ b/arch/arm/mm/consistent.c | |||
@@ -322,7 +322,6 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
322 | 322 | ||
323 | if (off < kern_size && | 323 | if (off < kern_size && |
324 | user_size <= (kern_size - off)) { | 324 | user_size <= (kern_size - off)) { |
325 | vma->vm_flags |= VM_RESERVED; | ||
326 | ret = remap_pfn_range(vma, vma->vm_start, | 325 | ret = remap_pfn_range(vma, vma->vm_start, |
327 | page_to_pfn(c->vm_pages) + off, | 326 | page_to_pfn(c->vm_pages) + off, |
328 | user_size << PAGE_SHIFT, | 327 | user_size << PAGE_SHIFT, |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1eb59971af5d..368864dfe6eb 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -3,8 +3,8 @@ mainmenu "Linux Kernel Configuration for x86" | |||
3 | 3 | ||
4 | # Select 32 or 64 bit | 4 | # Select 32 or 64 bit |
5 | config 64BIT | 5 | config 64BIT |
6 | bool "64-bit kernel" | 6 | bool "64-bit kernel" if ARCH = "x86" |
7 | default n | 7 | default ARCH = "x86_64" |
8 | help | 8 | help |
9 | Say yes to build a 64-bit kernel - formerly known as x86_64 | 9 | Say yes to build a 64-bit kernel - formerly known as x86_64 |
10 | Say no to build a 32-bit kernel - formerly known as i386 | 10 | Say no to build a 32-bit kernel - formerly known as i386 |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 289247d974c6..0ca27c7b0e8d 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -637,6 +637,38 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table) | |||
637 | } | 637 | } |
638 | 638 | ||
639 | hpet_address = hpet_tbl->address.address; | 639 | hpet_address = hpet_tbl->address.address; |
640 | |||
641 | /* | ||
642 | * Some broken BIOSes advertise HPET at 0x0. We really do not | ||
643 | * want to allocate a resource there. | ||
644 | */ | ||
645 | if (!hpet_address) { | ||
646 | printk(KERN_WARNING PREFIX | ||
647 | "HPET id: %#x base: %#lx is invalid\n", | ||
648 | hpet_tbl->id, hpet_address); | ||
649 | return 0; | ||
650 | } | ||
651 | #ifdef CONFIG_X86_64 | ||
652 | /* | ||
653 | * Some even more broken BIOSes advertise HPET at | ||
654 | * 0xfed0000000000000 instead of 0xfed00000. Fix it up and add | ||
655 | * some noise: | ||
656 | */ | ||
657 | if (hpet_address == 0xfed0000000000000UL) { | ||
658 | if (!hpet_force_user) { | ||
659 | printk(KERN_WARNING PREFIX "HPET id: %#x " | ||
660 | "base: 0xfed0000000000000 is bogus\n " | ||
661 | "try hpet=force on the kernel command line to " | ||
662 | "fix it up to 0xfed00000.\n", hpet_tbl->id); | ||
663 | hpet_address = 0; | ||
664 | return 0; | ||
665 | } | ||
666 | printk(KERN_WARNING PREFIX | ||
667 | "HPET id: %#x base: 0xfed0000000000000 fixed up " | ||
668 | "to 0xfed00000.\n", hpet_tbl->id); | ||
669 | hpet_address >>= 32; | ||
670 | } | ||
671 | #endif | ||
640 | printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", | 672 | printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", |
641 | hpet_tbl->id, hpet_address); | 673 | hpet_tbl->id, hpet_address); |
642 | 674 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 9c36a53676b7..99e1ef9939be 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -46,7 +46,7 @@ | |||
46 | 46 | ||
47 | #define PFX "powernow-k8: " | 47 | #define PFX "powernow-k8: " |
48 | #define BFX PFX "BIOS error: " | 48 | #define BFX PFX "BIOS error: " |
49 | #define VERSION "version 2.00.00" | 49 | #define VERSION "version 2.20.00" |
50 | #include "powernow-k8.h" | 50 | #include "powernow-k8.h" |
51 | 51 | ||
52 | /* serialize freq changes */ | 52 | /* serialize freq changes */ |
@@ -73,33 +73,11 @@ static u32 find_khz_freq_from_fid(u32 fid) | |||
73 | return 1000 * find_freq_from_fid(fid); | 73 | return 1000 * find_freq_from_fid(fid); |
74 | } | 74 | } |
75 | 75 | ||
76 | /* Return a frequency in MHz, given an input fid and did */ | 76 | static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, u32 pstate) |
77 | static u32 find_freq_from_fiddid(u32 fid, u32 did) | ||
78 | { | 77 | { |
79 | if (current_cpu_data.x86 == 0x10) | 78 | return data[pstate].frequency; |
80 | return 100 * (fid + 0x10) >> did; | ||
81 | else | ||
82 | return 100 * (fid + 0x8) >> did; | ||
83 | } | ||
84 | |||
85 | static u32 find_khz_freq_from_fiddid(u32 fid, u32 did) | ||
86 | { | ||
87 | return 1000 * find_freq_from_fiddid(fid, did); | ||
88 | } | ||
89 | |||
90 | static u32 find_fid_from_pstate(u32 pstate) | ||
91 | { | ||
92 | u32 hi, lo; | ||
93 | rdmsr(MSR_PSTATE_DEF_BASE + pstate, lo, hi); | ||
94 | return lo & HW_PSTATE_FID_MASK; | ||
95 | } | 79 | } |
96 | 80 | ||
97 | static u32 find_did_from_pstate(u32 pstate) | ||
98 | { | ||
99 | u32 hi, lo; | ||
100 | rdmsr(MSR_PSTATE_DEF_BASE + pstate, lo, hi); | ||
101 | return (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT; | ||
102 | } | ||
103 | 81 | ||
104 | /* Return the vco fid for an input fid | 82 | /* Return the vco fid for an input fid |
105 | * | 83 | * |
@@ -142,9 +120,7 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data) | |||
142 | if (cpu_family == CPU_HW_PSTATE) { | 120 | if (cpu_family == CPU_HW_PSTATE) { |
143 | rdmsr(MSR_PSTATE_STATUS, lo, hi); | 121 | rdmsr(MSR_PSTATE_STATUS, lo, hi); |
144 | i = lo & HW_PSTATE_MASK; | 122 | i = lo & HW_PSTATE_MASK; |
145 | rdmsr(MSR_PSTATE_DEF_BASE + i, lo, hi); | 123 | data->currpstate = i; |
146 | data->currfid = lo & HW_PSTATE_FID_MASK; | ||
147 | data->currdid = (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT; | ||
148 | return 0; | 124 | return 0; |
149 | } | 125 | } |
150 | do { | 126 | do { |
@@ -295,7 +271,7 @@ static int decrease_vid_code_by_step(struct powernow_k8_data *data, u32 reqvid, | |||
295 | static int transition_pstate(struct powernow_k8_data *data, u32 pstate) | 271 | static int transition_pstate(struct powernow_k8_data *data, u32 pstate) |
296 | { | 272 | { |
297 | wrmsr(MSR_PSTATE_CTRL, pstate, 0); | 273 | wrmsr(MSR_PSTATE_CTRL, pstate, 0); |
298 | data->currfid = find_fid_from_pstate(pstate); | 274 | data->currpstate = pstate; |
299 | return 0; | 275 | return 0; |
300 | } | 276 | } |
301 | 277 | ||
@@ -845,17 +821,20 @@ err_out: | |||
845 | static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) | 821 | static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) |
846 | { | 822 | { |
847 | int i; | 823 | int i; |
824 | u32 hi = 0, lo = 0; | ||
825 | rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); | ||
826 | data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; | ||
848 | 827 | ||
849 | for (i = 0; i < data->acpi_data.state_count; i++) { | 828 | for (i = 0; i < data->acpi_data.state_count; i++) { |
850 | u32 index; | 829 | u32 index; |
851 | u32 hi = 0, lo = 0; | 830 | u32 hi = 0, lo = 0; |
852 | u32 fid; | ||
853 | u32 did; | ||
854 | 831 | ||
855 | index = data->acpi_data.states[i].control & HW_PSTATE_MASK; | 832 | index = data->acpi_data.states[i].control & HW_PSTATE_MASK; |
856 | if (index > MAX_HW_PSTATE) { | 833 | if (index > data->max_hw_pstate) { |
857 | printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); | 834 | printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); |
858 | printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); | 835 | printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); |
836 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
837 | continue; | ||
859 | } | 838 | } |
860 | rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); | 839 | rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); |
861 | if (!(hi & HW_PSTATE_VALID_MASK)) { | 840 | if (!(hi & HW_PSTATE_VALID_MASK)) { |
@@ -864,22 +843,9 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf | |||
864 | continue; | 843 | continue; |
865 | } | 844 | } |
866 | 845 | ||
867 | fid = lo & HW_PSTATE_FID_MASK; | 846 | powernow_table[i].index = index; |
868 | did = (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT; | ||
869 | 847 | ||
870 | dprintk(" %d : fid 0x%x, did 0x%x\n", index, fid, did); | 848 | powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; |
871 | |||
872 | powernow_table[i].index = index | (fid << HW_FID_INDEX_SHIFT) | (did << HW_DID_INDEX_SHIFT); | ||
873 | |||
874 | powernow_table[i].frequency = find_khz_freq_from_fiddid(fid, did); | ||
875 | |||
876 | if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) { | ||
877 | printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", | ||
878 | powernow_table[i].frequency, | ||
879 | (unsigned int) (data->acpi_data.states[i].core_frequency * 1000)); | ||
880 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
881 | continue; | ||
882 | } | ||
883 | } | 849 | } |
884 | return 0; | 850 | return 0; |
885 | } | 851 | } |
@@ -1020,22 +986,18 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i | |||
1020 | /* Take a frequency, and issue the hardware pstate transition command */ | 986 | /* Take a frequency, and issue the hardware pstate transition command */ |
1021 | static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned int index) | 987 | static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned int index) |
1022 | { | 988 | { |
1023 | u32 fid = 0; | ||
1024 | u32 did = 0; | ||
1025 | u32 pstate = 0; | 989 | u32 pstate = 0; |
1026 | int res, i; | 990 | int res, i; |
1027 | struct cpufreq_freqs freqs; | 991 | struct cpufreq_freqs freqs; |
1028 | 992 | ||
1029 | dprintk("cpu %d transition to index %u\n", smp_processor_id(), index); | 993 | dprintk("cpu %d transition to index %u\n", smp_processor_id(), index); |
1030 | 994 | ||
1031 | /* get fid did for hardware pstate transition */ | 995 | /* get MSR index for hardware pstate transition */ |
1032 | pstate = index & HW_PSTATE_MASK; | 996 | pstate = index & HW_PSTATE_MASK; |
1033 | if (pstate > MAX_HW_PSTATE) | 997 | if (pstate > data->max_hw_pstate) |
1034 | return 0; | 998 | return 0; |
1035 | fid = (index & HW_FID_INDEX_MASK) >> HW_FID_INDEX_SHIFT; | 999 | freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); |
1036 | did = (index & HW_DID_INDEX_MASK) >> HW_DID_INDEX_SHIFT; | 1000 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); |
1037 | freqs.old = find_khz_freq_from_fiddid(data->currfid, data->currdid); | ||
1038 | freqs.new = find_khz_freq_from_fiddid(fid, did); | ||
1039 | 1001 | ||
1040 | for_each_cpu_mask(i, *(data->available_cores)) { | 1002 | for_each_cpu_mask(i, *(data->available_cores)) { |
1041 | freqs.cpu = i; | 1003 | freqs.cpu = i; |
@@ -1043,9 +1005,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i | |||
1043 | } | 1005 | } |
1044 | 1006 | ||
1045 | res = transition_pstate(data, pstate); | 1007 | res = transition_pstate(data, pstate); |
1046 | data->currfid = find_fid_from_pstate(pstate); | 1008 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); |
1047 | data->currdid = find_did_from_pstate(pstate); | ||
1048 | freqs.new = find_khz_freq_from_fiddid(data->currfid, data->currdid); | ||
1049 | 1009 | ||
1050 | for_each_cpu_mask(i, *(data->available_cores)) { | 1010 | for_each_cpu_mask(i, *(data->available_cores)) { |
1051 | freqs.cpu = i; | 1011 | freqs.cpu = i; |
@@ -1090,10 +1050,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi | |||
1090 | if (query_current_values_with_pending_wait(data)) | 1050 | if (query_current_values_with_pending_wait(data)) |
1091 | goto err_out; | 1051 | goto err_out; |
1092 | 1052 | ||
1093 | if (cpu_family == CPU_HW_PSTATE) | 1053 | if (cpu_family != CPU_HW_PSTATE) { |
1094 | dprintk("targ: curr fid 0x%x, did 0x%x\n", | ||
1095 | data->currfid, data->currdid); | ||
1096 | else { | ||
1097 | dprintk("targ: curr fid 0x%x, vid 0x%x\n", | 1054 | dprintk("targ: curr fid 0x%x, vid 0x%x\n", |
1098 | data->currfid, data->currvid); | 1055 | data->currfid, data->currvid); |
1099 | 1056 | ||
@@ -1124,7 +1081,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi | |||
1124 | mutex_unlock(&fidvid_mutex); | 1081 | mutex_unlock(&fidvid_mutex); |
1125 | 1082 | ||
1126 | if (cpu_family == CPU_HW_PSTATE) | 1083 | if (cpu_family == CPU_HW_PSTATE) |
1127 | pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid); | 1084 | pol->cur = find_khz_freq_from_pstate(data->powernow_table, newstate); |
1128 | else | 1085 | else |
1129 | pol->cur = find_khz_freq_from_fid(data->currfid); | 1086 | pol->cur = find_khz_freq_from_fid(data->currfid); |
1130 | ret = 0; | 1087 | ret = 0; |
@@ -1223,7 +1180,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1223 | + (3 * (1 << data->irt) * 10)) * 1000; | 1180 | + (3 * (1 << data->irt) * 10)) * 1000; |
1224 | 1181 | ||
1225 | if (cpu_family == CPU_HW_PSTATE) | 1182 | if (cpu_family == CPU_HW_PSTATE) |
1226 | pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid); | 1183 | pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); |
1227 | else | 1184 | else |
1228 | pol->cur = find_khz_freq_from_fid(data->currfid); | 1185 | pol->cur = find_khz_freq_from_fid(data->currfid); |
1229 | dprintk("policy current frequency %d kHz\n", pol->cur); | 1186 | dprintk("policy current frequency %d kHz\n", pol->cur); |
@@ -1240,8 +1197,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1240 | cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); | 1197 | cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); |
1241 | 1198 | ||
1242 | if (cpu_family == CPU_HW_PSTATE) | 1199 | if (cpu_family == CPU_HW_PSTATE) |
1243 | dprintk("cpu_init done, current fid 0x%x, did 0x%x\n", | 1200 | dprintk("cpu_init done, current pstate 0x%x\n", data->currpstate); |
1244 | data->currfid, data->currdid); | ||
1245 | else | 1201 | else |
1246 | dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n", | 1202 | dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n", |
1247 | data->currfid, data->currvid); | 1203 | data->currfid, data->currvid); |
@@ -1297,7 +1253,7 @@ static unsigned int powernowk8_get (unsigned int cpu) | |||
1297 | goto out; | 1253 | goto out; |
1298 | 1254 | ||
1299 | if (cpu_family == CPU_HW_PSTATE) | 1255 | if (cpu_family == CPU_HW_PSTATE) |
1300 | khz = find_khz_freq_from_fiddid(data->currfid, data->currdid); | 1256 | khz = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); |
1301 | else | 1257 | else |
1302 | khz = find_khz_freq_from_fid(data->currfid); | 1258 | khz = find_khz_freq_from_fid(data->currfid); |
1303 | 1259 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index 7c4f6e0faed4..afd2b520d35c 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h | |||
@@ -10,6 +10,7 @@ struct powernow_k8_data { | |||
10 | 10 | ||
11 | u32 numps; /* number of p-states */ | 11 | u32 numps; /* number of p-states */ |
12 | u32 batps; /* number of p-states supported on battery */ | 12 | u32 batps; /* number of p-states supported on battery */ |
13 | u32 max_hw_pstate; /* maximum legal hardware pstate */ | ||
13 | 14 | ||
14 | /* these values are constant when the PSB is used to determine | 15 | /* these values are constant when the PSB is used to determine |
15 | * vid/fid pairings, but are modified during the ->target() call | 16 | * vid/fid pairings, but are modified during the ->target() call |
@@ -21,8 +22,8 @@ struct powernow_k8_data { | |||
21 | u32 plllock; /* pll lock time, units 1 us */ | 22 | u32 plllock; /* pll lock time, units 1 us */ |
22 | u32 exttype; /* extended interface = 1 */ | 23 | u32 exttype; /* extended interface = 1 */ |
23 | 24 | ||
24 | /* keep track of the current fid / vid or did */ | 25 | /* keep track of the current fid / vid or pstate */ |
25 | u32 currvid, currfid, currdid; | 26 | u32 currvid, currfid, currpstate; |
26 | 27 | ||
27 | /* the powernow_table includes all frequency and vid/fid pairings: | 28 | /* the powernow_table includes all frequency and vid/fid pairings: |
28 | * fid are the lower 8 bits of the index, vid are the upper 8 bits. | 29 | * fid are the lower 8 bits of the index, vid are the upper 8 bits. |
@@ -87,23 +88,14 @@ struct powernow_k8_data { | |||
87 | 88 | ||
88 | /* Hardware Pstate _PSS and MSR definitions */ | 89 | /* Hardware Pstate _PSS and MSR definitions */ |
89 | #define USE_HW_PSTATE 0x00000080 | 90 | #define USE_HW_PSTATE 0x00000080 |
90 | #define HW_PSTATE_FID_MASK 0x0000003f | ||
91 | #define HW_PSTATE_DID_MASK 0x000001c0 | ||
92 | #define HW_PSTATE_DID_SHIFT 6 | ||
93 | #define HW_PSTATE_MASK 0x00000007 | 91 | #define HW_PSTATE_MASK 0x00000007 |
94 | #define HW_PSTATE_VALID_MASK 0x80000000 | 92 | #define HW_PSTATE_VALID_MASK 0x80000000 |
95 | #define HW_FID_INDEX_SHIFT 8 | 93 | #define HW_PSTATE_MAX_MASK 0x000000f0 |
96 | #define HW_FID_INDEX_MASK 0x0000ff00 | 94 | #define HW_PSTATE_MAX_SHIFT 4 |
97 | #define HW_DID_INDEX_SHIFT 16 | ||
98 | #define HW_DID_INDEX_MASK 0x00ff0000 | ||
99 | #define HW_WATTS_MASK 0xff | ||
100 | #define HW_PWR_DVR_MASK 0x300 | ||
101 | #define HW_PWR_DVR_SHIFT 8 | ||
102 | #define HW_PWR_MAX_MULT 3 | ||
103 | #define MAX_HW_PSTATE 8 /* hw pstate supports up to 8 */ | ||
104 | #define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */ | 95 | #define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */ |
105 | #define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */ | 96 | #define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */ |
106 | #define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */ | 97 | #define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */ |
98 | #define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */ | ||
107 | 99 | ||
108 | /* define the two driver architectures */ | 100 | /* define the two driver architectures */ |
109 | #define CPU_OPTERON 0 | 101 | #define CPU_OPTERON 0 |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 447b351f1f2a..4b21d29fb5aa 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -810,7 +810,7 @@ static __cpuinit int mce_create_device(unsigned int cpu) | |||
810 | int err; | 810 | int err; |
811 | int i; | 811 | int i; |
812 | 812 | ||
813 | if (!mce_available(&cpu_data(cpu))) | 813 | if (!mce_available(&boot_cpu_data)) |
814 | return -EIO; | 814 | return -EIO; |
815 | 815 | ||
816 | memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject)); | 816 | memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject)); |
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 066f8c6af4df..3900e46d66db 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
@@ -89,8 +89,6 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
89 | int fpu_exception; | 89 | int fpu_exception; |
90 | 90 | ||
91 | #ifdef CONFIG_SMP | 91 | #ifdef CONFIG_SMP |
92 | if (!cpu_online(n)) | ||
93 | return 0; | ||
94 | n = c->cpu_index; | 92 | n = c->cpu_index; |
95 | #endif | 93 | #endif |
96 | seq_printf(m, "processor\t: %d\n" | 94 | seq_printf(m, "processor\t: %d\n" |
@@ -177,14 +175,14 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
177 | static void *c_start(struct seq_file *m, loff_t *pos) | 175 | static void *c_start(struct seq_file *m, loff_t *pos) |
178 | { | 176 | { |
179 | if (*pos == 0) /* just in case, cpu 0 is not the first */ | 177 | if (*pos == 0) /* just in case, cpu 0 is not the first */ |
180 | *pos = first_cpu(cpu_possible_map); | 178 | *pos = first_cpu(cpu_online_map); |
181 | if ((*pos) < NR_CPUS && cpu_possible(*pos)) | 179 | if ((*pos) < NR_CPUS && cpu_online(*pos)) |
182 | return &cpu_data(*pos); | 180 | return &cpu_data(*pos); |
183 | return NULL; | 181 | return NULL; |
184 | } | 182 | } |
185 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | 183 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) |
186 | { | 184 | { |
187 | *pos = next_cpu(*pos, cpu_possible_map); | 185 | *pos = next_cpu(*pos, cpu_online_map); |
188 | return c_start(m, pos); | 186 | return c_start(m, pos); |
189 | } | 187 | } |
190 | static void c_stop(struct seq_file *m, void *v) | 188 | static void c_stop(struct seq_file *m, void *v) |
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index f35c6eb33da9..6bb80ea5f4ee 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c | |||
@@ -962,7 +962,7 @@ static int EISA_ELCR(unsigned int irq) | |||
962 | #define default_MCA_trigger(idx) (1) | 962 | #define default_MCA_trigger(idx) (1) |
963 | #define default_MCA_polarity(idx) (0) | 963 | #define default_MCA_polarity(idx) (0) |
964 | 964 | ||
965 | static int __init MPBIOS_polarity(int idx) | 965 | static int MPBIOS_polarity(int idx) |
966 | { | 966 | { |
967 | int bus = mp_irqs[idx].mpc_srcbus; | 967 | int bus = mp_irqs[idx].mpc_srcbus; |
968 | int polarity; | 968 | int polarity; |
@@ -2830,6 +2830,25 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a | |||
2830 | return 0; | 2830 | return 0; |
2831 | } | 2831 | } |
2832 | 2832 | ||
2833 | int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) | ||
2834 | { | ||
2835 | int i; | ||
2836 | |||
2837 | if (skip_ioapic_setup) | ||
2838 | return -1; | ||
2839 | |||
2840 | for (i = 0; i < mp_irq_entries; i++) | ||
2841 | if (mp_irqs[i].mpc_irqtype == mp_INT && | ||
2842 | mp_irqs[i].mpc_srcbusirq == bus_irq) | ||
2843 | break; | ||
2844 | if (i >= mp_irq_entries) | ||
2845 | return -1; | ||
2846 | |||
2847 | *trigger = irq_trigger(i); | ||
2848 | *polarity = irq_polarity(i); | ||
2849 | return 0; | ||
2850 | } | ||
2851 | |||
2833 | #endif /* CONFIG_ACPI */ | 2852 | #endif /* CONFIG_ACPI */ |
2834 | 2853 | ||
2835 | static int __init parse_disable_timer_pin_1(char *arg) | 2854 | static int __init parse_disable_timer_pin_1(char *arg) |
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 953328b55a30..435a8c9b55f8 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c | |||
@@ -546,7 +546,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) | |||
546 | #define default_PCI_trigger(idx) (1) | 546 | #define default_PCI_trigger(idx) (1) |
547 | #define default_PCI_polarity(idx) (1) | 547 | #define default_PCI_polarity(idx) (1) |
548 | 548 | ||
549 | static int __init MPBIOS_polarity(int idx) | 549 | static int MPBIOS_polarity(int idx) |
550 | { | 550 | { |
551 | int bus = mp_irqs[idx].mpc_srcbus; | 551 | int bus = mp_irqs[idx].mpc_srcbus; |
552 | int polarity; | 552 | int polarity; |
@@ -2222,8 +2222,27 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int p | |||
2222 | return 0; | 2222 | return 0; |
2223 | } | 2223 | } |
2224 | 2224 | ||
2225 | #endif /* CONFIG_ACPI */ | ||
2226 | 2225 | ||
2226 | int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) | ||
2227 | { | ||
2228 | int i; | ||
2229 | |||
2230 | if (skip_ioapic_setup) | ||
2231 | return -1; | ||
2232 | |||
2233 | for (i = 0; i < mp_irq_entries; i++) | ||
2234 | if (mp_irqs[i].mpc_irqtype == mp_INT && | ||
2235 | mp_irqs[i].mpc_srcbusirq == bus_irq) | ||
2236 | break; | ||
2237 | if (i >= mp_irq_entries) | ||
2238 | return -1; | ||
2239 | |||
2240 | *trigger = irq_trigger(i); | ||
2241 | *polarity = irq_polarity(i); | ||
2242 | return 0; | ||
2243 | } | ||
2244 | |||
2245 | #endif /* CONFIG_ACPI */ | ||
2227 | 2246 | ||
2228 | /* | 2247 | /* |
2229 | * This function currently is only a helper for the i386 smp boot process where | 2248 | * This function currently is only a helper for the i386 smp boot process where |
@@ -2260,3 +2279,4 @@ void __init setup_ioapic_dest(void) | |||
2260 | } | 2279 | } |
2261 | } | 2280 | } |
2262 | #endif | 2281 | #endif |
2282 | |||
diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c index 1a07bbea7be3..f452726c0fe2 100644 --- a/arch/x86/kernel/reboot_fixups_32.c +++ b/arch/x86/kernel/reboot_fixups_32.c | |||
@@ -39,6 +39,7 @@ struct device_fixup { | |||
39 | static struct device_fixup fixups_table[] = { | 39 | static struct device_fixup fixups_table[] = { |
40 | { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, cs5530a_warm_reset }, | 40 | { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, cs5530a_warm_reset }, |
41 | { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, cs5536_warm_reset }, | 41 | { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, cs5536_warm_reset }, |
42 | { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE, cs5530a_warm_reset }, | ||
42 | }; | 43 | }; |
43 | 44 | ||
44 | /* | 45 | /* |
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index 238633d3d09a..30d94d1d5f5f 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c | |||
@@ -892,7 +892,6 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | |||
892 | 892 | ||
893 | #ifdef CONFIG_SMP | 893 | #ifdef CONFIG_SMP |
894 | c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; | 894 | c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; |
895 | c->cpu_index = 0; | ||
896 | #endif | 895 | #endif |
897 | } | 896 | } |
898 | 897 | ||
@@ -1078,8 +1077,6 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1078 | 1077 | ||
1079 | 1078 | ||
1080 | #ifdef CONFIG_SMP | 1079 | #ifdef CONFIG_SMP |
1081 | if (!cpu_online(c->cpu_index)) | ||
1082 | return 0; | ||
1083 | cpu = c->cpu_index; | 1080 | cpu = c->cpu_index; |
1084 | #endif | 1081 | #endif |
1085 | 1082 | ||
@@ -1171,15 +1168,15 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1171 | static void *c_start(struct seq_file *m, loff_t *pos) | 1168 | static void *c_start(struct seq_file *m, loff_t *pos) |
1172 | { | 1169 | { |
1173 | if (*pos == 0) /* just in case, cpu 0 is not the first */ | 1170 | if (*pos == 0) /* just in case, cpu 0 is not the first */ |
1174 | *pos = first_cpu(cpu_possible_map); | 1171 | *pos = first_cpu(cpu_online_map); |
1175 | if ((*pos) < NR_CPUS && cpu_possible(*pos)) | 1172 | if ((*pos) < NR_CPUS && cpu_online(*pos)) |
1176 | return &cpu_data(*pos); | 1173 | return &cpu_data(*pos); |
1177 | return NULL; | 1174 | return NULL; |
1178 | } | 1175 | } |
1179 | 1176 | ||
1180 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | 1177 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) |
1181 | { | 1178 | { |
1182 | *pos = next_cpu(*pos, cpu_possible_map); | 1179 | *pos = next_cpu(*pos, cpu_online_map); |
1183 | return c_start(m, pos); | 1180 | return c_start(m, pos); |
1184 | } | 1181 | } |
1185 | 1182 | ||
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c index c821edc32216..368b1942b39a 100644 --- a/arch/x86/kernel/time_64.c +++ b/arch/x86/kernel/time_64.c | |||
@@ -82,18 +82,15 @@ static int set_rtc_mmss(unsigned long nowtime) | |||
82 | int retval = 0; | 82 | int retval = 0; |
83 | int real_seconds, real_minutes, cmos_minutes; | 83 | int real_seconds, real_minutes, cmos_minutes; |
84 | unsigned char control, freq_select; | 84 | unsigned char control, freq_select; |
85 | unsigned long flags; | ||
85 | 86 | ||
86 | /* | 87 | /* |
87 | * IRQs are disabled when we're called from the timer interrupt, | 88 | * set_rtc_mmss is called when irqs are enabled, so disable irqs here |
88 | * no need for spin_lock_irqsave() | ||
89 | */ | 89 | */ |
90 | 90 | spin_lock_irqsave(&rtc_lock, flags); | |
91 | spin_lock(&rtc_lock); | ||
92 | |||
93 | /* | 91 | /* |
94 | * Tell the clock it's being set and stop it. | 92 | * Tell the clock it's being set and stop it. |
95 | */ | 93 | */ |
96 | |||
97 | control = CMOS_READ(RTC_CONTROL); | 94 | control = CMOS_READ(RTC_CONTROL); |
98 | CMOS_WRITE(control | RTC_SET, RTC_CONTROL); | 95 | CMOS_WRITE(control | RTC_SET, RTC_CONTROL); |
99 | 96 | ||
@@ -138,7 +135,7 @@ static int set_rtc_mmss(unsigned long nowtime) | |||
138 | CMOS_WRITE(control, RTC_CONTROL); | 135 | CMOS_WRITE(control, RTC_CONTROL); |
139 | CMOS_WRITE(freq_select, RTC_FREQ_SELECT); | 136 | CMOS_WRITE(freq_select, RTC_FREQ_SELECT); |
140 | 137 | ||
141 | spin_unlock(&rtc_lock); | 138 | spin_unlock_irqrestore(&rtc_lock, flags); |
142 | 139 | ||
143 | return retval; | 140 | return retval; |
144 | } | 141 | } |
@@ -164,21 +161,27 @@ unsigned long read_persistent_clock(void) | |||
164 | unsigned century = 0; | 161 | unsigned century = 0; |
165 | 162 | ||
166 | spin_lock_irqsave(&rtc_lock, flags); | 163 | spin_lock_irqsave(&rtc_lock, flags); |
164 | /* | ||
165 | * if UIP is clear, then we have >= 244 microseconds before RTC | ||
166 | * registers will be updated. Spec sheet says that this is the | ||
167 | * reliable way to read RTC - registers invalid (off bus) during update | ||
168 | */ | ||
169 | while ((CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) | ||
170 | cpu_relax(); | ||
167 | 171 | ||
168 | do { | 172 | |
169 | sec = CMOS_READ(RTC_SECONDS); | 173 | /* now read all RTC registers while stable with interrupts disabled */ |
170 | min = CMOS_READ(RTC_MINUTES); | 174 | sec = CMOS_READ(RTC_SECONDS); |
171 | hour = CMOS_READ(RTC_HOURS); | 175 | min = CMOS_READ(RTC_MINUTES); |
172 | day = CMOS_READ(RTC_DAY_OF_MONTH); | 176 | hour = CMOS_READ(RTC_HOURS); |
173 | mon = CMOS_READ(RTC_MONTH); | 177 | day = CMOS_READ(RTC_DAY_OF_MONTH); |
174 | year = CMOS_READ(RTC_YEAR); | 178 | mon = CMOS_READ(RTC_MONTH); |
179 | year = CMOS_READ(RTC_YEAR); | ||
175 | #ifdef CONFIG_ACPI | 180 | #ifdef CONFIG_ACPI |
176 | if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && | 181 | if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && |
177 | acpi_gbl_FADT.century) | 182 | acpi_gbl_FADT.century) |
178 | century = CMOS_READ(acpi_gbl_FADT.century); | 183 | century = CMOS_READ(acpi_gbl_FADT.century); |
179 | #endif | 184 | #endif |
180 | } while (sec != CMOS_READ(RTC_SECONDS)); | ||
181 | |||
182 | spin_unlock_irqrestore(&rtc_lock, flags); | 185 | spin_unlock_irqrestore(&rtc_lock, flags); |
183 | 186 | ||
184 | /* | 187 | /* |
diff --git a/arch/x86/mach-voyager/voyager_cat.c b/arch/x86/mach-voyager/voyager_cat.c index 26a2d4c54b68..2132ca652df1 100644 --- a/arch/x86/mach-voyager/voyager_cat.c +++ b/arch/x86/mach-voyager/voyager_cat.c | |||
@@ -568,7 +568,7 @@ static voyager_module_t *voyager_initial_module; | |||
568 | * boot cpu *after* all memory initialisation has been done (so we can | 568 | * boot cpu *after* all memory initialisation has been done (so we can |
569 | * use kmalloc) but before smp initialisation, so we can probe the SMP | 569 | * use kmalloc) but before smp initialisation, so we can probe the SMP |
570 | * configuration and pick up necessary information. */ | 570 | * configuration and pick up necessary information. */ |
571 | void | 571 | void __init |
572 | voyager_cat_init(void) | 572 | voyager_cat_init(void) |
573 | { | 573 | { |
574 | voyager_module_t **modpp = &voyager_initial_module; | 574 | voyager_module_t **modpp = &voyager_initial_module; |
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 69371434b0cf..88124dd35406 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -1900,7 +1900,7 @@ voyager_smp_prepare_cpus(unsigned int max_cpus) | |||
1900 | smp_boot_cpus(); | 1900 | smp_boot_cpus(); |
1901 | } | 1901 | } |
1902 | 1902 | ||
1903 | static void __devinit voyager_smp_prepare_boot_cpu(void) | 1903 | static void __cpuinit voyager_smp_prepare_boot_cpu(void) |
1904 | { | 1904 | { |
1905 | init_gdt(smp_processor_id()); | 1905 | init_gdt(smp_processor_id()); |
1906 | switch_to_new_gdt(); | 1906 | switch_to_new_gdt(); |
@@ -1911,7 +1911,7 @@ static void __devinit voyager_smp_prepare_boot_cpu(void) | |||
1911 | cpu_set(smp_processor_id(), cpu_present_map); | 1911 | cpu_set(smp_processor_id(), cpu_present_map); |
1912 | } | 1912 | } |
1913 | 1913 | ||
1914 | static int __devinit | 1914 | static int __cpuinit |
1915 | voyager_cpu_up(unsigned int cpu) | 1915 | voyager_cpu_up(unsigned int cpu) |
1916 | { | 1916 | { |
1917 | /* This only works at boot for x86. See "rewrite" above. */ | 1917 | /* This only works at boot for x86. See "rewrite" above. */ |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 2d88f7c6d6ac..7e35078673a4 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -77,6 +77,9 @@ count_resource(struct acpi_resource *acpi_res, void *data) | |||
77 | struct acpi_resource_address64 addr; | 77 | struct acpi_resource_address64 addr; |
78 | acpi_status status; | 78 | acpi_status status; |
79 | 79 | ||
80 | if (info->res_num >= PCI_BUS_NUM_RESOURCES) | ||
81 | return AE_OK; | ||
82 | |||
80 | status = resource_to_addr(acpi_res, &addr); | 83 | status = resource_to_addr(acpi_res, &addr); |
81 | if (ACPI_SUCCESS(status)) | 84 | if (ACPI_SUCCESS(status)) |
82 | info->res_num++; | 85 | info->res_num++; |
@@ -93,6 +96,9 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
93 | unsigned long flags; | 96 | unsigned long flags; |
94 | struct resource *root; | 97 | struct resource *root; |
95 | 98 | ||
99 | if (info->res_num >= PCI_BUS_NUM_RESOURCES) | ||
100 | return AE_OK; | ||
101 | |||
96 | status = resource_to_addr(acpi_res, &addr); | 102 | status = resource_to_addr(acpi_res, &addr); |
97 | if (!ACPI_SUCCESS(status)) | 103 | if (!ACPI_SUCCESS(status)) |
98 | return AE_OK; | 104 | return AE_OK; |
diff --git a/arch/x86/vdso/vgetcpu.c b/arch/x86/vdso/vgetcpu.c index 91f6e85d0fc2..3b1ae1abfba9 100644 --- a/arch/x86/vdso/vgetcpu.c +++ b/arch/x86/vdso/vgetcpu.c | |||
@@ -13,32 +13,17 @@ | |||
13 | #include <asm/vgtod.h> | 13 | #include <asm/vgtod.h> |
14 | #include "vextern.h" | 14 | #include "vextern.h" |
15 | 15 | ||
16 | long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache) | 16 | long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused) |
17 | { | 17 | { |
18 | unsigned int dummy, p; | 18 | unsigned int dummy, p; |
19 | unsigned long j = 0; | ||
20 | 19 | ||
21 | /* Fast cache - only recompute value once per jiffies and avoid | 20 | if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) { |
22 | relatively costly rdtscp/cpuid otherwise. | ||
23 | This works because the scheduler usually keeps the process | ||
24 | on the same CPU and this syscall doesn't guarantee its | ||
25 | results anyways. | ||
26 | We do this here because otherwise user space would do it on | ||
27 | its own in a likely inferior way (no access to jiffies). | ||
28 | If you don't like it pass NULL. */ | ||
29 | if (tcache && tcache->blob[0] == (j = *vdso_jiffies)) { | ||
30 | p = tcache->blob[1]; | ||
31 | } else if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) { | ||
32 | /* Load per CPU data from RDTSCP */ | 21 | /* Load per CPU data from RDTSCP */ |
33 | rdtscp(dummy, dummy, p); | 22 | rdtscp(dummy, dummy, p); |
34 | } else { | 23 | } else { |
35 | /* Load per CPU data from GDT */ | 24 | /* Load per CPU data from GDT */ |
36 | asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); | 25 | asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); |
37 | } | 26 | } |
38 | if (tcache) { | ||
39 | tcache->blob[0] = j; | ||
40 | tcache->blob[1] = p; | ||
41 | } | ||
42 | if (cpu) | 27 | if (cpu) |
43 | *cpu = p & 0xfff; | 28 | *cpu = p & 0xfff; |
44 | if (node) | 29 | if (node) |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 087a7028ae84..b9f923ef173d 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -50,7 +50,6 @@ config ACPI_SLEEP | |||
50 | config ACPI_PROCFS | 50 | config ACPI_PROCFS |
51 | bool "Deprecated /proc/acpi files" | 51 | bool "Deprecated /proc/acpi files" |
52 | depends on PROC_FS | 52 | depends on PROC_FS |
53 | default y | ||
54 | ---help--- | 53 | ---help--- |
55 | For backwards compatibility, this option allows | 54 | For backwards compatibility, this option allows |
56 | deprecated /proc/acpi/ files to exist, even when | 55 | deprecated /proc/acpi/ files to exist, even when |
@@ -61,7 +60,6 @@ config ACPI_PROCFS | |||
61 | /proc/acpi/info (/sys/modules/acpi/parameters/acpica_version) | 60 | /proc/acpi/info (/sys/modules/acpi/parameters/acpica_version) |
62 | /proc/acpi/dsdt (/sys/firmware/acpi/tables/DSDT) | 61 | /proc/acpi/dsdt (/sys/firmware/acpi/tables/DSDT) |
63 | /proc/acpi/fadt (/sys/firmware/acpi/tables/FACP) | 62 | /proc/acpi/fadt (/sys/firmware/acpi/tables/FACP) |
64 | /proc/acpi/battery (/sys/class/power_supply) | ||
65 | /proc/acpi/debug_layer (/sys/module/acpi/parameters/debug_layer) | 63 | /proc/acpi/debug_layer (/sys/module/acpi/parameters/debug_layer) |
66 | /proc/acpi/debug_level (/sys/module/acpi/parameters/debug_level) | 64 | /proc/acpi/debug_level (/sys/module/acpi/parameters/debug_level) |
67 | 65 | ||
@@ -69,7 +67,21 @@ config ACPI_PROCFS | |||
69 | and functions which do not yet exist in /sys. | 67 | and functions which do not yet exist in /sys. |
70 | 68 | ||
71 | Say N to delete /proc/acpi/ files that have moved to /sys/ | 69 | Say N to delete /proc/acpi/ files that have moved to /sys/ |
72 | 70 | config ACPI_PROCFS_POWER | |
71 | bool "Deprecated power /proc/acpi folders" | ||
72 | depends on PROC_FS | ||
73 | default y | ||
74 | ---help--- | ||
75 | For backwards compatibility, this option allows | ||
76 | deprecated power /proc/acpi/ folders to exist, even when | ||
77 | they have been replaced by functions in /sys. | ||
78 | The deprecated folders (and their replacements) include: | ||
79 | /proc/acpi/battery/* (/sys/class/power_supply/*) | ||
80 | /proc/acpi/ac_adapter/* (sys/class/power_supply/*) | ||
81 | This option has no effect on /proc/acpi/ folders | ||
82 | and functions, which do not yet exist in /sys | ||
83 | |||
84 | Say N to delete power /proc/acpi/ folders that have moved to /sys/ | ||
73 | config ACPI_PROC_EVENT | 85 | config ACPI_PROC_EVENT |
74 | bool "Deprecated /proc/acpi/event support" | 86 | bool "Deprecated /proc/acpi/event support" |
75 | depends on PROC_FS | 87 | depends on PROC_FS |
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 54e3ab0e5fc0..456446f90077 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile | |||
@@ -58,6 +58,6 @@ obj-$(CONFIG_ACPI_NUMA) += numa.o | |||
58 | obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o | 58 | obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o |
59 | obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o | 59 | obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o |
60 | obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o | 60 | obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o |
61 | obj-y += cm_sbs.o | 61 | obj-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o |
62 | obj-$(CONFIG_ACPI_SBS) += sbs.o | 62 | obj-$(CONFIG_ACPI_SBS) += sbs.o |
63 | obj-$(CONFIG_ACPI_SBS) += sbshc.o | 63 | obj-$(CONFIG_ACPI_SBS) += sbshc.o |
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 30238f6ff232..76ed4f52bebd 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/types.h> | 29 | #include <linux/types.h> |
30 | #ifdef CONFIG_ACPI_PROCFS | 30 | #ifdef CONFIG_ACPI_PROCFS_POWER |
31 | #include <linux/proc_fs.h> | 31 | #include <linux/proc_fs.h> |
32 | #include <linux/seq_file.h> | 32 | #include <linux/seq_file.h> |
33 | #endif | 33 | #endif |
@@ -51,7 +51,7 @@ MODULE_AUTHOR("Paul Diefenbaugh"); | |||
51 | MODULE_DESCRIPTION("ACPI AC Adapter Driver"); | 51 | MODULE_DESCRIPTION("ACPI AC Adapter Driver"); |
52 | MODULE_LICENSE("GPL"); | 52 | MODULE_LICENSE("GPL"); |
53 | 53 | ||
54 | #ifdef CONFIG_ACPI_PROCFS | 54 | #ifdef CONFIG_ACPI_PROCFS_POWER |
55 | extern struct proc_dir_entry *acpi_lock_ac_dir(void); | 55 | extern struct proc_dir_entry *acpi_lock_ac_dir(void); |
56 | extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir); | 56 | extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir); |
57 | static int acpi_ac_open_fs(struct inode *inode, struct file *file); | 57 | static int acpi_ac_open_fs(struct inode *inode, struct file *file); |
@@ -86,7 +86,7 @@ struct acpi_ac { | |||
86 | 86 | ||
87 | #define to_acpi_ac(x) container_of(x, struct acpi_ac, charger); | 87 | #define to_acpi_ac(x) container_of(x, struct acpi_ac, charger); |
88 | 88 | ||
89 | #ifdef CONFIG_ACPI_PROCFS | 89 | #ifdef CONFIG_ACPI_PROCFS_POWER |
90 | static const struct file_operations acpi_ac_fops = { | 90 | static const struct file_operations acpi_ac_fops = { |
91 | .open = acpi_ac_open_fs, | 91 | .open = acpi_ac_open_fs, |
92 | .read = seq_read, | 92 | .read = seq_read, |
@@ -136,7 +136,7 @@ static int acpi_ac_get_state(struct acpi_ac *ac) | |||
136 | return 0; | 136 | return 0; |
137 | } | 137 | } |
138 | 138 | ||
139 | #ifdef CONFIG_ACPI_PROCFS | 139 | #ifdef CONFIG_ACPI_PROCFS_POWER |
140 | /* -------------------------------------------------------------------------- | 140 | /* -------------------------------------------------------------------------- |
141 | FS Interface (/proc) | 141 | FS Interface (/proc) |
142 | -------------------------------------------------------------------------- */ | 142 | -------------------------------------------------------------------------- */ |
@@ -275,7 +275,7 @@ static int acpi_ac_add(struct acpi_device *device) | |||
275 | if (result) | 275 | if (result) |
276 | goto end; | 276 | goto end; |
277 | 277 | ||
278 | #ifdef CONFIG_ACPI_PROCFS | 278 | #ifdef CONFIG_ACPI_PROCFS_POWER |
279 | result = acpi_ac_add_fs(device); | 279 | result = acpi_ac_add_fs(device); |
280 | #endif | 280 | #endif |
281 | if (result) | 281 | if (result) |
@@ -300,7 +300,7 @@ static int acpi_ac_add(struct acpi_device *device) | |||
300 | 300 | ||
301 | end: | 301 | end: |
302 | if (result) { | 302 | if (result) { |
303 | #ifdef CONFIG_ACPI_PROCFS | 303 | #ifdef CONFIG_ACPI_PROCFS_POWER |
304 | acpi_ac_remove_fs(device); | 304 | acpi_ac_remove_fs(device); |
305 | #endif | 305 | #endif |
306 | kfree(ac); | 306 | kfree(ac); |
@@ -339,7 +339,7 @@ static int acpi_ac_remove(struct acpi_device *device, int type) | |||
339 | ACPI_ALL_NOTIFY, acpi_ac_notify); | 339 | ACPI_ALL_NOTIFY, acpi_ac_notify); |
340 | if (ac->charger.dev) | 340 | if (ac->charger.dev) |
341 | power_supply_unregister(&ac->charger); | 341 | power_supply_unregister(&ac->charger); |
342 | #ifdef CONFIG_ACPI_PROCFS | 342 | #ifdef CONFIG_ACPI_PROCFS_POWER |
343 | acpi_ac_remove_fs(device); | 343 | acpi_ac_remove_fs(device); |
344 | #endif | 344 | #endif |
345 | 345 | ||
@@ -355,7 +355,7 @@ static int __init acpi_ac_init(void) | |||
355 | if (acpi_disabled) | 355 | if (acpi_disabled) |
356 | return -ENODEV; | 356 | return -ENODEV; |
357 | 357 | ||
358 | #ifdef CONFIG_ACPI_PROCFS | 358 | #ifdef CONFIG_ACPI_PROCFS_POWER |
359 | acpi_ac_dir = acpi_lock_ac_dir(); | 359 | acpi_ac_dir = acpi_lock_ac_dir(); |
360 | if (!acpi_ac_dir) | 360 | if (!acpi_ac_dir) |
361 | return -ENODEV; | 361 | return -ENODEV; |
@@ -363,7 +363,7 @@ static int __init acpi_ac_init(void) | |||
363 | 363 | ||
364 | result = acpi_bus_register_driver(&acpi_ac_driver); | 364 | result = acpi_bus_register_driver(&acpi_ac_driver); |
365 | if (result < 0) { | 365 | if (result < 0) { |
366 | #ifdef CONFIG_ACPI_PROCFS | 366 | #ifdef CONFIG_ACPI_PROCFS_POWER |
367 | acpi_unlock_ac_dir(acpi_ac_dir); | 367 | acpi_unlock_ac_dir(acpi_ac_dir); |
368 | #endif | 368 | #endif |
369 | return -ENODEV; | 369 | return -ENODEV; |
@@ -377,7 +377,7 @@ static void __exit acpi_ac_exit(void) | |||
377 | 377 | ||
378 | acpi_bus_unregister_driver(&acpi_ac_driver); | 378 | acpi_bus_unregister_driver(&acpi_ac_driver); |
379 | 379 | ||
380 | #ifdef CONFIG_ACPI_PROCFS | 380 | #ifdef CONFIG_ACPI_PROCFS_POWER |
381 | acpi_unlock_ac_dir(acpi_ac_dir); | 381 | acpi_unlock_ac_dir(acpi_ac_dir); |
382 | #endif | 382 | #endif |
383 | 383 | ||
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 192c244f6190..7d6be23eff89 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include <linux/types.h> | 31 | #include <linux/types.h> |
32 | #include <linux/jiffies.h> | 32 | #include <linux/jiffies.h> |
33 | 33 | ||
34 | #ifdef CONFIG_ACPI_PROCFS | 34 | #ifdef CONFIG_ACPI_PROCFS_POWER |
35 | #include <linux/proc_fs.h> | 35 | #include <linux/proc_fs.h> |
36 | #include <linux/seq_file.h> | 36 | #include <linux/seq_file.h> |
37 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
@@ -63,7 +63,7 @@ static unsigned int cache_time = 1000; | |||
63 | module_param(cache_time, uint, 0644); | 63 | module_param(cache_time, uint, 0644); |
64 | MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); | 64 | MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); |
65 | 65 | ||
66 | #ifdef CONFIG_ACPI_PROCFS | 66 | #ifdef CONFIG_ACPI_PROCFS_POWER |
67 | extern struct proc_dir_entry *acpi_lock_battery_dir(void); | 67 | extern struct proc_dir_entry *acpi_lock_battery_dir(void); |
68 | extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir); | 68 | extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir); |
69 | 69 | ||
@@ -153,6 +153,8 @@ static int acpi_battery_get_property(struct power_supply *psy, | |||
153 | val->intval = POWER_SUPPLY_STATUS_CHARGING; | 153 | val->intval = POWER_SUPPLY_STATUS_CHARGING; |
154 | else if (battery->state == 0) | 154 | else if (battery->state == 0) |
155 | val->intval = POWER_SUPPLY_STATUS_FULL; | 155 | val->intval = POWER_SUPPLY_STATUS_FULL; |
156 | else | ||
157 | val->intval = POWER_SUPPLY_STATUS_UNKNOWN; | ||
156 | break; | 158 | break; |
157 | case POWER_SUPPLY_PROP_PRESENT: | 159 | case POWER_SUPPLY_PROP_PRESENT: |
158 | val->intval = acpi_battery_present(battery); | 160 | val->intval = acpi_battery_present(battery); |
@@ -221,7 +223,7 @@ static enum power_supply_property energy_battery_props[] = { | |||
221 | POWER_SUPPLY_PROP_MANUFACTURER, | 223 | POWER_SUPPLY_PROP_MANUFACTURER, |
222 | }; | 224 | }; |
223 | 225 | ||
224 | #ifdef CONFIG_ACPI_PROCFS | 226 | #ifdef CONFIG_ACPI_PROCFS_POWER |
225 | inline char *acpi_battery_units(struct acpi_battery *battery) | 227 | inline char *acpi_battery_units(struct acpi_battery *battery) |
226 | { | 228 | { |
227 | return (battery->power_unit)?"mA":"mW"; | 229 | return (battery->power_unit)?"mA":"mW"; |
@@ -479,7 +481,7 @@ static int acpi_battery_update(struct acpi_battery *battery) | |||
479 | FS Interface (/proc) | 481 | FS Interface (/proc) |
480 | -------------------------------------------------------------------------- */ | 482 | -------------------------------------------------------------------------- */ |
481 | 483 | ||
482 | #ifdef CONFIG_ACPI_PROCFS | 484 | #ifdef CONFIG_ACPI_PROCFS_POWER |
483 | static struct proc_dir_entry *acpi_battery_dir; | 485 | static struct proc_dir_entry *acpi_battery_dir; |
484 | 486 | ||
485 | static int acpi_battery_print_info(struct seq_file *seq, int result) | 487 | static int acpi_battery_print_info(struct seq_file *seq, int result) |
@@ -786,7 +788,7 @@ static int acpi_battery_add(struct acpi_device *device) | |||
786 | acpi_driver_data(device) = battery; | 788 | acpi_driver_data(device) = battery; |
787 | mutex_init(&battery->lock); | 789 | mutex_init(&battery->lock); |
788 | acpi_battery_update(battery); | 790 | acpi_battery_update(battery); |
789 | #ifdef CONFIG_ACPI_PROCFS | 791 | #ifdef CONFIG_ACPI_PROCFS_POWER |
790 | result = acpi_battery_add_fs(device); | 792 | result = acpi_battery_add_fs(device); |
791 | if (result) | 793 | if (result) |
792 | goto end; | 794 | goto end; |
@@ -804,7 +806,7 @@ static int acpi_battery_add(struct acpi_device *device) | |||
804 | device->status.battery_present ? "present" : "absent"); | 806 | device->status.battery_present ? "present" : "absent"); |
805 | end: | 807 | end: |
806 | if (result) { | 808 | if (result) { |
807 | #ifdef CONFIG_ACPI_PROCFS | 809 | #ifdef CONFIG_ACPI_PROCFS_POWER |
808 | acpi_battery_remove_fs(device); | 810 | acpi_battery_remove_fs(device); |
809 | #endif | 811 | #endif |
810 | kfree(battery); | 812 | kfree(battery); |
@@ -823,7 +825,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type) | |||
823 | status = acpi_remove_notify_handler(device->handle, | 825 | status = acpi_remove_notify_handler(device->handle, |
824 | ACPI_ALL_NOTIFY, | 826 | ACPI_ALL_NOTIFY, |
825 | acpi_battery_notify); | 827 | acpi_battery_notify); |
826 | #ifdef CONFIG_ACPI_PROCFS | 828 | #ifdef CONFIG_ACPI_PROCFS_POWER |
827 | acpi_battery_remove_fs(device); | 829 | acpi_battery_remove_fs(device); |
828 | #endif | 830 | #endif |
829 | sysfs_remove_battery(battery); | 831 | sysfs_remove_battery(battery); |
@@ -859,13 +861,13 @@ static int __init acpi_battery_init(void) | |||
859 | { | 861 | { |
860 | if (acpi_disabled) | 862 | if (acpi_disabled) |
861 | return -ENODEV; | 863 | return -ENODEV; |
862 | #ifdef CONFIG_ACPI_PROCFS | 864 | #ifdef CONFIG_ACPI_PROCFS_POWER |
863 | acpi_battery_dir = acpi_lock_battery_dir(); | 865 | acpi_battery_dir = acpi_lock_battery_dir(); |
864 | if (!acpi_battery_dir) | 866 | if (!acpi_battery_dir) |
865 | return -ENODEV; | 867 | return -ENODEV; |
866 | #endif | 868 | #endif |
867 | if (acpi_bus_register_driver(&acpi_battery_driver) < 0) { | 869 | if (acpi_bus_register_driver(&acpi_battery_driver) < 0) { |
868 | #ifdef CONFIG_ACPI_PROCFS | 870 | #ifdef CONFIG_ACPI_PROCFS_POWER |
869 | acpi_unlock_battery_dir(acpi_battery_dir); | 871 | acpi_unlock_battery_dir(acpi_battery_dir); |
870 | #endif | 872 | #endif |
871 | return -ENODEV; | 873 | return -ENODEV; |
@@ -876,7 +878,7 @@ static int __init acpi_battery_init(void) | |||
876 | static void __exit acpi_battery_exit(void) | 878 | static void __exit acpi_battery_exit(void) |
877 | { | 879 | { |
878 | acpi_bus_unregister_driver(&acpi_battery_driver); | 880 | acpi_bus_unregister_driver(&acpi_battery_driver); |
879 | #ifdef CONFIG_ACPI_PROCFS | 881 | #ifdef CONFIG_ACPI_PROCFS_POWER |
880 | acpi_unlock_battery_dir(acpi_battery_dir); | 882 | acpi_unlock_battery_dir(acpi_battery_dir); |
881 | #endif | 883 | #endif |
882 | } | 884 | } |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 06b78e5e33a1..d6ddb547f2d9 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -75,7 +75,8 @@ enum { | |||
75 | EC_FLAGS_WAIT_GPE = 0, /* Don't check status until GPE arrives */ | 75 | EC_FLAGS_WAIT_GPE = 0, /* Don't check status until GPE arrives */ |
76 | EC_FLAGS_QUERY_PENDING, /* Query is pending */ | 76 | EC_FLAGS_QUERY_PENDING, /* Query is pending */ |
77 | EC_FLAGS_GPE_MODE, /* Expect GPE to be sent for status change */ | 77 | EC_FLAGS_GPE_MODE, /* Expect GPE to be sent for status change */ |
78 | EC_FLAGS_ONLY_IBF_GPE, /* Expect GPE only for IBF = 0 event */ | 78 | EC_FLAGS_NO_ADDRESS_GPE, /* Expect GPE only for non-address event */ |
79 | EC_FLAGS_ADDRESS, /* Address is being written */ | ||
79 | }; | 80 | }; |
80 | 81 | ||
81 | static int acpi_ec_remove(struct acpi_device *device, int type); | 82 | static int acpi_ec_remove(struct acpi_device *device, int type); |
@@ -166,38 +167,45 @@ static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event) | |||
166 | 167 | ||
167 | static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll) | 168 | static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll) |
168 | { | 169 | { |
170 | int ret = 0; | ||
171 | if (unlikely(test_bit(EC_FLAGS_ADDRESS, &ec->flags) && | ||
172 | test_bit(EC_FLAGS_NO_ADDRESS_GPE, &ec->flags))) | ||
173 | force_poll = 1; | ||
169 | if (likely(test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) && | 174 | if (likely(test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) && |
170 | likely(!force_poll)) { | 175 | likely(!force_poll)) { |
171 | if (wait_event_timeout(ec->wait, acpi_ec_check_status(ec, event), | 176 | if (wait_event_timeout(ec->wait, acpi_ec_check_status(ec, event), |
172 | msecs_to_jiffies(ACPI_EC_DELAY))) | 177 | msecs_to_jiffies(ACPI_EC_DELAY))) |
173 | return 0; | 178 | goto end; |
174 | clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); | 179 | clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); |
175 | if (acpi_ec_check_status(ec, event)) { | 180 | if (acpi_ec_check_status(ec, event)) { |
176 | if (event == ACPI_EC_EVENT_OBF_1) { | 181 | if (test_bit(EC_FLAGS_ADDRESS, &ec->flags)) { |
177 | /* miss OBF = 1 GPE, don't expect it anymore */ | 182 | /* miss address GPE, don't expect it anymore */ |
178 | printk(KERN_INFO PREFIX "missing OBF_1 confirmation," | 183 | printk(KERN_INFO PREFIX "missing address confirmation," |
179 | "switching to degraded mode.\n"); | 184 | "don't expect it any longer.\n"); |
180 | set_bit(EC_FLAGS_ONLY_IBF_GPE, &ec->flags); | 185 | set_bit(EC_FLAGS_NO_ADDRESS_GPE, &ec->flags); |
181 | } else { | 186 | } else { |
182 | /* missing GPEs, switch back to poll mode */ | 187 | /* missing GPEs, switch back to poll mode */ |
183 | printk(KERN_INFO PREFIX "missing IBF_1 confirmations," | 188 | printk(KERN_INFO PREFIX "missing confirmations," |
184 | "switch off interrupt mode.\n"); | 189 | "switch off interrupt mode.\n"); |
185 | clear_bit(EC_FLAGS_GPE_MODE, &ec->flags); | 190 | clear_bit(EC_FLAGS_GPE_MODE, &ec->flags); |
186 | } | 191 | } |
187 | return 0; | 192 | goto end; |
188 | } | 193 | } |
189 | } else { | 194 | } else { |
190 | unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY); | 195 | unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY); |
191 | clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); | 196 | clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); |
192 | while (time_before(jiffies, delay)) { | 197 | while (time_before(jiffies, delay)) { |
193 | if (acpi_ec_check_status(ec, event)) | 198 | if (acpi_ec_check_status(ec, event)) |
194 | return 0; | 199 | goto end; |
195 | } | 200 | } |
196 | } | 201 | } |
197 | printk(KERN_ERR PREFIX "acpi_ec_wait timeout," | 202 | printk(KERN_ERR PREFIX "acpi_ec_wait timeout," |
198 | " status = %d, expect_event = %d\n", | 203 | " status = %d, expect_event = %d\n", |
199 | acpi_ec_read_status(ec), event); | 204 | acpi_ec_read_status(ec), event); |
200 | return -ETIME; | 205 | ret = -ETIME; |
206 | end: | ||
207 | clear_bit(EC_FLAGS_ADDRESS, &ec->flags); | ||
208 | return ret; | ||
201 | } | 209 | } |
202 | 210 | ||
203 | static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, | 211 | static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, |
@@ -216,6 +224,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, | |||
216 | "write_cmd timeout, command = %d\n", command); | 224 | "write_cmd timeout, command = %d\n", command); |
217 | goto end; | 225 | goto end; |
218 | } | 226 | } |
227 | /* mark the address byte written to EC */ | ||
228 | if (rdata_len + wdata_len > 1) | ||
229 | set_bit(EC_FLAGS_ADDRESS, &ec->flags); | ||
219 | set_bit(EC_FLAGS_WAIT_GPE, &ec->flags); | 230 | set_bit(EC_FLAGS_WAIT_GPE, &ec->flags); |
220 | acpi_ec_write_data(ec, *(wdata++)); | 231 | acpi_ec_write_data(ec, *(wdata++)); |
221 | } | 232 | } |
@@ -231,8 +242,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, | |||
231 | clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); | 242 | clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); |
232 | 243 | ||
233 | for (; rdata_len > 0; --rdata_len) { | 244 | for (; rdata_len > 0; --rdata_len) { |
234 | if (test_bit(EC_FLAGS_ONLY_IBF_GPE, &ec->flags)) | ||
235 | force_poll = 1; | ||
236 | result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, force_poll); | 245 | result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, force_poll); |
237 | if (result) { | 246 | if (result) { |
238 | printk(KERN_ERR PREFIX "read timeout, command = %d\n", | 247 | printk(KERN_ERR PREFIX "read timeout, command = %d\n", |
@@ -881,12 +890,20 @@ int __init acpi_ec_ecdt_probe(void) | |||
881 | boot_ec->gpe = ecdt_ptr->gpe; | 890 | boot_ec->gpe = ecdt_ptr->gpe; |
882 | boot_ec->handle = ACPI_ROOT_OBJECT; | 891 | boot_ec->handle = ACPI_ROOT_OBJECT; |
883 | } else { | 892 | } else { |
893 | /* This workaround is needed only on some broken machines, | ||
894 | * which require early EC, but fail to provide ECDT */ | ||
895 | acpi_handle x; | ||
884 | printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n"); | 896 | printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n"); |
885 | status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, | 897 | status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, |
886 | boot_ec, NULL); | 898 | boot_ec, NULL); |
887 | /* Check that acpi_get_devices actually find something */ | 899 | /* Check that acpi_get_devices actually find something */ |
888 | if (ACPI_FAILURE(status) || !boot_ec->handle) | 900 | if (ACPI_FAILURE(status) || !boot_ec->handle) |
889 | goto error; | 901 | goto error; |
902 | /* We really need to limit this workaround, the only ASUS, | ||
903 | * which needs it, has fake EC._INI method, so use it as flag. | ||
904 | */ | ||
905 | if (ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI", &x))) | ||
906 | goto error; | ||
890 | } | 907 | } |
891 | 908 | ||
892 | ret = ec_install_handlers(boot_ec); | 909 | ret = ec_install_handlers(boot_ec); |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index f996d0e37689..7b6c20eeeaff 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -197,6 +197,19 @@ static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2) | |||
197 | return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); | 197 | return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); |
198 | } | 198 | } |
199 | 199 | ||
200 | static void acpi_safe_halt(void) | ||
201 | { | ||
202 | current_thread_info()->status &= ~TS_POLLING; | ||
203 | /* | ||
204 | * TS_POLLING-cleared state must be visible before we | ||
205 | * test NEED_RESCHED: | ||
206 | */ | ||
207 | smp_mb(); | ||
208 | if (!need_resched()) | ||
209 | safe_halt(); | ||
210 | current_thread_info()->status |= TS_POLLING; | ||
211 | } | ||
212 | |||
200 | #ifndef CONFIG_CPU_IDLE | 213 | #ifndef CONFIG_CPU_IDLE |
201 | 214 | ||
202 | static void | 215 | static void |
@@ -239,19 +252,6 @@ acpi_processor_power_activate(struct acpi_processor *pr, | |||
239 | return; | 252 | return; |
240 | } | 253 | } |
241 | 254 | ||
242 | static void acpi_safe_halt(void) | ||
243 | { | ||
244 | current_thread_info()->status &= ~TS_POLLING; | ||
245 | /* | ||
246 | * TS_POLLING-cleared state must be visible before we | ||
247 | * test NEED_RESCHED: | ||
248 | */ | ||
249 | smp_mb(); | ||
250 | if (!need_resched()) | ||
251 | safe_halt(); | ||
252 | current_thread_info()->status |= TS_POLLING; | ||
253 | } | ||
254 | |||
255 | static atomic_t c3_cpu_count; | 255 | static atomic_t c3_cpu_count; |
256 | 256 | ||
257 | /* Common C-state entry for C2, C3, .. */ | 257 | /* Common C-state entry for C2, C3, .. */ |
@@ -1373,15 +1373,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, | |||
1373 | if (pr->flags.bm_check) | 1373 | if (pr->flags.bm_check) |
1374 | acpi_idle_update_bm_rld(pr, cx); | 1374 | acpi_idle_update_bm_rld(pr, cx); |
1375 | 1375 | ||
1376 | current_thread_info()->status &= ~TS_POLLING; | 1376 | acpi_safe_halt(); |
1377 | /* | ||
1378 | * TS_POLLING-cleared state must be visible before we test | ||
1379 | * NEED_RESCHED: | ||
1380 | */ | ||
1381 | smp_mb(); | ||
1382 | if (!need_resched()) | ||
1383 | safe_halt(); | ||
1384 | current_thread_info()->status |= TS_POLLING; | ||
1385 | 1377 | ||
1386 | cx->usage++; | 1378 | cx->usage++; |
1387 | 1379 | ||
@@ -1399,6 +1391,8 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
1399 | struct acpi_processor *pr; | 1391 | struct acpi_processor *pr; |
1400 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | 1392 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); |
1401 | u32 t1, t2; | 1393 | u32 t1, t2; |
1394 | int sleep_ticks = 0; | ||
1395 | |||
1402 | pr = processors[smp_processor_id()]; | 1396 | pr = processors[smp_processor_id()]; |
1403 | 1397 | ||
1404 | if (unlikely(!pr)) | 1398 | if (unlikely(!pr)) |
@@ -1428,6 +1422,8 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
1428 | ACPI_FLUSH_CPU_CACHE(); | 1422 | ACPI_FLUSH_CPU_CACHE(); |
1429 | 1423 | ||
1430 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | 1424 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); |
1425 | /* Tell the scheduler that we are going deep-idle: */ | ||
1426 | sched_clock_idle_sleep_event(); | ||
1431 | acpi_state_timer_broadcast(pr, cx, 1); | 1427 | acpi_state_timer_broadcast(pr, cx, 1); |
1432 | acpi_idle_do_entry(cx); | 1428 | acpi_idle_do_entry(cx); |
1433 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | 1429 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); |
@@ -1436,6 +1432,10 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
1436 | /* TSC could halt in idle, so notify users */ | 1432 | /* TSC could halt in idle, so notify users */ |
1437 | mark_tsc_unstable("TSC halts in idle");; | 1433 | mark_tsc_unstable("TSC halts in idle");; |
1438 | #endif | 1434 | #endif |
1435 | sleep_ticks = ticks_elapsed(t1, t2); | ||
1436 | |||
1437 | /* Tell the scheduler how much we idled: */ | ||
1438 | sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); | ||
1439 | 1439 | ||
1440 | local_irq_enable(); | 1440 | local_irq_enable(); |
1441 | current_thread_info()->status |= TS_POLLING; | 1441 | current_thread_info()->status |= TS_POLLING; |
@@ -1443,7 +1443,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
1443 | cx->usage++; | 1443 | cx->usage++; |
1444 | 1444 | ||
1445 | acpi_state_timer_broadcast(pr, cx, 0); | 1445 | acpi_state_timer_broadcast(pr, cx, 0); |
1446 | cx->time += ticks_elapsed(t1, t2); | 1446 | cx->time += sleep_ticks; |
1447 | return ticks_elapsed_in_us(t1, t2); | 1447 | return ticks_elapsed_in_us(t1, t2); |
1448 | } | 1448 | } |
1449 | 1449 | ||
@@ -1463,6 +1463,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
1463 | struct acpi_processor *pr; | 1463 | struct acpi_processor *pr; |
1464 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | 1464 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); |
1465 | u32 t1, t2; | 1465 | u32 t1, t2; |
1466 | int sleep_ticks = 0; | ||
1467 | |||
1466 | pr = processors[smp_processor_id()]; | 1468 | pr = processors[smp_processor_id()]; |
1467 | 1469 | ||
1468 | if (unlikely(!pr)) | 1470 | if (unlikely(!pr)) |
@@ -1471,6 +1473,15 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
1471 | if (acpi_idle_suspend) | 1473 | if (acpi_idle_suspend) |
1472 | return(acpi_idle_enter_c1(dev, state)); | 1474 | return(acpi_idle_enter_c1(dev, state)); |
1473 | 1475 | ||
1476 | if (acpi_idle_bm_check()) { | ||
1477 | if (dev->safe_state) { | ||
1478 | return dev->safe_state->enter(dev, dev->safe_state); | ||
1479 | } else { | ||
1480 | acpi_safe_halt(); | ||
1481 | return 0; | ||
1482 | } | ||
1483 | } | ||
1484 | |||
1474 | local_irq_disable(); | 1485 | local_irq_disable(); |
1475 | current_thread_info()->status &= ~TS_POLLING; | 1486 | current_thread_info()->status &= ~TS_POLLING; |
1476 | /* | 1487 | /* |
@@ -1485,38 +1496,45 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
1485 | return 0; | 1496 | return 0; |
1486 | } | 1497 | } |
1487 | 1498 | ||
1499 | /* Tell the scheduler that we are going deep-idle: */ | ||
1500 | sched_clock_idle_sleep_event(); | ||
1488 | /* | 1501 | /* |
1489 | * Must be done before busmaster disable as we might need to | 1502 | * Must be done before busmaster disable as we might need to |
1490 | * access HPET ! | 1503 | * access HPET ! |
1491 | */ | 1504 | */ |
1492 | acpi_state_timer_broadcast(pr, cx, 1); | 1505 | acpi_state_timer_broadcast(pr, cx, 1); |
1493 | 1506 | ||
1494 | if (acpi_idle_bm_check()) { | 1507 | acpi_idle_update_bm_rld(pr, cx); |
1495 | cx = pr->power.bm_state; | ||
1496 | |||
1497 | acpi_idle_update_bm_rld(pr, cx); | ||
1498 | |||
1499 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1500 | acpi_idle_do_entry(cx); | ||
1501 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
1502 | } else { | ||
1503 | acpi_idle_update_bm_rld(pr, cx); | ||
1504 | 1508 | ||
1509 | /* | ||
1510 | * disable bus master | ||
1511 | * bm_check implies we need ARB_DIS | ||
1512 | * !bm_check implies we need cache flush | ||
1513 | * bm_control implies whether we can do ARB_DIS | ||
1514 | * | ||
1515 | * That leaves a case where bm_check is set and bm_control is | ||
1516 | * not set. In that case we cannot do much, we enter C3 | ||
1517 | * without doing anything. | ||
1518 | */ | ||
1519 | if (pr->flags.bm_check && pr->flags.bm_control) { | ||
1505 | spin_lock(&c3_lock); | 1520 | spin_lock(&c3_lock); |
1506 | c3_cpu_count++; | 1521 | c3_cpu_count++; |
1507 | /* Disable bus master arbitration when all CPUs are in C3 */ | 1522 | /* Disable bus master arbitration when all CPUs are in C3 */ |
1508 | if (c3_cpu_count == num_online_cpus()) | 1523 | if (c3_cpu_count == num_online_cpus()) |
1509 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); | 1524 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); |
1510 | spin_unlock(&c3_lock); | 1525 | spin_unlock(&c3_lock); |
1526 | } else if (!pr->flags.bm_check) { | ||
1527 | ACPI_FLUSH_CPU_CACHE(); | ||
1528 | } | ||
1511 | 1529 | ||
1512 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | 1530 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); |
1513 | acpi_idle_do_entry(cx); | 1531 | acpi_idle_do_entry(cx); |
1514 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | 1532 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); |
1515 | 1533 | ||
1534 | /* Re-enable bus master arbitration */ | ||
1535 | if (pr->flags.bm_check && pr->flags.bm_control) { | ||
1516 | spin_lock(&c3_lock); | 1536 | spin_lock(&c3_lock); |
1517 | /* Re-enable bus master arbitration */ | 1537 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); |
1518 | if (c3_cpu_count == num_online_cpus()) | ||
1519 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); | ||
1520 | c3_cpu_count--; | 1538 | c3_cpu_count--; |
1521 | spin_unlock(&c3_lock); | 1539 | spin_unlock(&c3_lock); |
1522 | } | 1540 | } |
@@ -1525,6 +1543,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
1525 | /* TSC could halt in idle, so notify users */ | 1543 | /* TSC could halt in idle, so notify users */ |
1526 | mark_tsc_unstable("TSC halts in idle"); | 1544 | mark_tsc_unstable("TSC halts in idle"); |
1527 | #endif | 1545 | #endif |
1546 | sleep_ticks = ticks_elapsed(t1, t2); | ||
1547 | /* Tell the scheduler how much we idled: */ | ||
1548 | sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); | ||
1528 | 1549 | ||
1529 | local_irq_enable(); | 1550 | local_irq_enable(); |
1530 | current_thread_info()->status |= TS_POLLING; | 1551 | current_thread_info()->status |= TS_POLLING; |
@@ -1532,7 +1553,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
1532 | cx->usage++; | 1553 | cx->usage++; |
1533 | 1554 | ||
1534 | acpi_state_timer_broadcast(pr, cx, 0); | 1555 | acpi_state_timer_broadcast(pr, cx, 0); |
1535 | cx->time += ticks_elapsed(t1, t2); | 1556 | cx->time += sleep_ticks; |
1536 | return ticks_elapsed_in_us(t1, t2); | 1557 | return ticks_elapsed_in_us(t1, t2); |
1537 | } | 1558 | } |
1538 | 1559 | ||
@@ -1584,12 +1605,14 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
1584 | case ACPI_STATE_C1: | 1605 | case ACPI_STATE_C1: |
1585 | state->flags |= CPUIDLE_FLAG_SHALLOW; | 1606 | state->flags |= CPUIDLE_FLAG_SHALLOW; |
1586 | state->enter = acpi_idle_enter_c1; | 1607 | state->enter = acpi_idle_enter_c1; |
1608 | dev->safe_state = state; | ||
1587 | break; | 1609 | break; |
1588 | 1610 | ||
1589 | case ACPI_STATE_C2: | 1611 | case ACPI_STATE_C2: |
1590 | state->flags |= CPUIDLE_FLAG_BALANCED; | 1612 | state->flags |= CPUIDLE_FLAG_BALANCED; |
1591 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | 1613 | state->flags |= CPUIDLE_FLAG_TIME_VALID; |
1592 | state->enter = acpi_idle_enter_simple; | 1614 | state->enter = acpi_idle_enter_simple; |
1615 | dev->safe_state = state; | ||
1593 | break; | 1616 | break; |
1594 | 1617 | ||
1595 | case ACPI_STATE_C3: | 1618 | case ACPI_STATE_C3: |
@@ -1610,14 +1633,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
1610 | if (!count) | 1633 | if (!count) |
1611 | return -EINVAL; | 1634 | return -EINVAL; |
1612 | 1635 | ||
1613 | /* find the deepest state that can handle active BM */ | ||
1614 | if (pr->flags.bm_check) { | ||
1615 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) | ||
1616 | if (pr->power.states[i].type == ACPI_STATE_C3) | ||
1617 | break; | ||
1618 | pr->power.bm_state = &pr->power.states[i-1]; | ||
1619 | } | ||
1620 | |||
1621 | return 0; | 1636 | return 0; |
1622 | } | 1637 | } |
1623 | 1638 | ||
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index 90fd09c65f95..6045cdbe176b 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <linux/moduleparam.h> | 29 | #include <linux/moduleparam.h> |
30 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
31 | 31 | ||
32 | #ifdef CONFIG_ACPI_PROCFS | 32 | #ifdef CONFIG_ACPI_PROCFS_POWER |
33 | #include <linux/proc_fs.h> | 33 | #include <linux/proc_fs.h> |
34 | #include <linux/seq_file.h> | 34 | #include <linux/seq_file.h> |
35 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
@@ -88,7 +88,7 @@ MODULE_DEVICE_TABLE(acpi, sbs_device_ids); | |||
88 | struct acpi_battery { | 88 | struct acpi_battery { |
89 | struct power_supply bat; | 89 | struct power_supply bat; |
90 | struct acpi_sbs *sbs; | 90 | struct acpi_sbs *sbs; |
91 | #ifdef CONFIG_ACPI_PROCFS | 91 | #ifdef CONFIG_ACPI_PROCFS_POWER |
92 | struct proc_dir_entry *proc_entry; | 92 | struct proc_dir_entry *proc_entry; |
93 | #endif | 93 | #endif |
94 | unsigned long update_time; | 94 | unsigned long update_time; |
@@ -113,6 +113,7 @@ struct acpi_battery { | |||
113 | u16 spec; | 113 | u16 spec; |
114 | u8 id; | 114 | u8 id; |
115 | u8 present:1; | 115 | u8 present:1; |
116 | u8 have_sysfs_alarm:1; | ||
116 | }; | 117 | }; |
117 | 118 | ||
118 | #define to_acpi_battery(x) container_of(x, struct acpi_battery, bat); | 119 | #define to_acpi_battery(x) container_of(x, struct acpi_battery, bat); |
@@ -122,7 +123,7 @@ struct acpi_sbs { | |||
122 | struct acpi_device *device; | 123 | struct acpi_device *device; |
123 | struct acpi_smb_hc *hc; | 124 | struct acpi_smb_hc *hc; |
124 | struct mutex lock; | 125 | struct mutex lock; |
125 | #ifdef CONFIG_ACPI_PROCFS | 126 | #ifdef CONFIG_ACPI_PROCFS_POWER |
126 | struct proc_dir_entry *charger_entry; | 127 | struct proc_dir_entry *charger_entry; |
127 | #endif | 128 | #endif |
128 | struct acpi_battery battery[MAX_SBS_BAT]; | 129 | struct acpi_battery battery[MAX_SBS_BAT]; |
@@ -468,7 +469,7 @@ static struct device_attribute alarm_attr = { | |||
468 | FS Interface (/proc/acpi) | 469 | FS Interface (/proc/acpi) |
469 | -------------------------------------------------------------------------- */ | 470 | -------------------------------------------------------------------------- */ |
470 | 471 | ||
471 | #ifdef CONFIG_ACPI_PROCFS | 472 | #ifdef CONFIG_ACPI_PROCFS_POWER |
472 | /* Generic Routines */ | 473 | /* Generic Routines */ |
473 | static int | 474 | static int |
474 | acpi_sbs_add_fs(struct proc_dir_entry **dir, | 475 | acpi_sbs_add_fs(struct proc_dir_entry **dir, |
@@ -789,7 +790,7 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id) | |||
789 | return result; | 790 | return result; |
790 | 791 | ||
791 | sprintf(battery->name, ACPI_BATTERY_DIR_NAME, id); | 792 | sprintf(battery->name, ACPI_BATTERY_DIR_NAME, id); |
792 | #ifdef CONFIG_ACPI_PROCFS | 793 | #ifdef CONFIG_ACPI_PROCFS_POWER |
793 | acpi_sbs_add_fs(&battery->proc_entry, acpi_battery_dir, | 794 | acpi_sbs_add_fs(&battery->proc_entry, acpi_battery_dir, |
794 | battery->name, &acpi_battery_info_fops, | 795 | battery->name, &acpi_battery_info_fops, |
795 | &acpi_battery_state_fops, &acpi_battery_alarm_fops, | 796 | &acpi_battery_state_fops, &acpi_battery_alarm_fops, |
@@ -808,7 +809,13 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id) | |||
808 | } | 809 | } |
809 | battery->bat.get_property = acpi_sbs_battery_get_property; | 810 | battery->bat.get_property = acpi_sbs_battery_get_property; |
810 | result = power_supply_register(&sbs->device->dev, &battery->bat); | 811 | result = power_supply_register(&sbs->device->dev, &battery->bat); |
811 | device_create_file(battery->bat.dev, &alarm_attr); | 812 | if (result) |
813 | goto end; | ||
814 | result = device_create_file(battery->bat.dev, &alarm_attr); | ||
815 | if (result) | ||
816 | goto end; | ||
817 | battery->have_sysfs_alarm = 1; | ||
818 | end: | ||
812 | printk(KERN_INFO PREFIX "%s [%s]: Battery Slot [%s] (battery %s)\n", | 819 | printk(KERN_INFO PREFIX "%s [%s]: Battery Slot [%s] (battery %s)\n", |
813 | ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device), | 820 | ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device), |
814 | battery->name, sbs->battery->present ? "present" : "absent"); | 821 | battery->name, sbs->battery->present ? "present" : "absent"); |
@@ -817,14 +824,16 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id) | |||
817 | 824 | ||
818 | static void acpi_battery_remove(struct acpi_sbs *sbs, int id) | 825 | static void acpi_battery_remove(struct acpi_sbs *sbs, int id) |
819 | { | 826 | { |
820 | if (sbs->battery[id].bat.dev) | 827 | struct acpi_battery *battery = &sbs->battery[id]; |
821 | device_remove_file(sbs->battery[id].bat.dev, &alarm_attr); | 828 | |
822 | power_supply_unregister(&sbs->battery[id].bat); | 829 | if (battery->bat.dev) { |
823 | #ifdef CONFIG_ACPI_PROCFS | 830 | if (battery->have_sysfs_alarm) |
824 | if (sbs->battery[id].proc_entry) { | 831 | device_remove_file(battery->bat.dev, &alarm_attr); |
825 | acpi_sbs_remove_fs(&(sbs->battery[id].proc_entry), | 832 | power_supply_unregister(&battery->bat); |
826 | acpi_battery_dir); | ||
827 | } | 833 | } |
834 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
835 | if (battery->proc_entry) | ||
836 | acpi_sbs_remove_fs(&battery->proc_entry, acpi_battery_dir); | ||
828 | #endif | 837 | #endif |
829 | } | 838 | } |
830 | 839 | ||
@@ -835,7 +844,7 @@ static int acpi_charger_add(struct acpi_sbs *sbs) | |||
835 | result = acpi_ac_get_present(sbs); | 844 | result = acpi_ac_get_present(sbs); |
836 | if (result) | 845 | if (result) |
837 | goto end; | 846 | goto end; |
838 | #ifdef CONFIG_ACPI_PROCFS | 847 | #ifdef CONFIG_ACPI_PROCFS_POWER |
839 | result = acpi_sbs_add_fs(&sbs->charger_entry, acpi_ac_dir, | 848 | result = acpi_sbs_add_fs(&sbs->charger_entry, acpi_ac_dir, |
840 | ACPI_AC_DIR_NAME, NULL, | 849 | ACPI_AC_DIR_NAME, NULL, |
841 | &acpi_ac_state_fops, NULL, sbs); | 850 | &acpi_ac_state_fops, NULL, sbs); |
@@ -859,7 +868,7 @@ static void acpi_charger_remove(struct acpi_sbs *sbs) | |||
859 | { | 868 | { |
860 | if (sbs->charger.dev) | 869 | if (sbs->charger.dev) |
861 | power_supply_unregister(&sbs->charger); | 870 | power_supply_unregister(&sbs->charger); |
862 | #ifdef CONFIG_ACPI_PROCFS | 871 | #ifdef CONFIG_ACPI_PROCFS_POWER |
863 | if (sbs->charger_entry) | 872 | if (sbs->charger_entry) |
864 | acpi_sbs_remove_fs(&sbs->charger_entry, acpi_ac_dir); | 873 | acpi_sbs_remove_fs(&sbs->charger_entry, acpi_ac_dir); |
865 | #endif | 874 | #endif |
@@ -965,7 +974,7 @@ static int acpi_sbs_remove(struct acpi_device *device, int type) | |||
965 | 974 | ||
966 | static void acpi_sbs_rmdirs(void) | 975 | static void acpi_sbs_rmdirs(void) |
967 | { | 976 | { |
968 | #ifdef CONFIG_ACPI_PROCFS | 977 | #ifdef CONFIG_ACPI_PROCFS_POWER |
969 | if (acpi_ac_dir) { | 978 | if (acpi_ac_dir) { |
970 | acpi_unlock_ac_dir(acpi_ac_dir); | 979 | acpi_unlock_ac_dir(acpi_ac_dir); |
971 | acpi_ac_dir = NULL; | 980 | acpi_ac_dir = NULL; |
@@ -1004,7 +1013,7 @@ static int __init acpi_sbs_init(void) | |||
1004 | 1013 | ||
1005 | if (acpi_disabled) | 1014 | if (acpi_disabled) |
1006 | return -ENODEV; | 1015 | return -ENODEV; |
1007 | #ifdef CONFIG_ACPI_PROCFS | 1016 | #ifdef CONFIG_ACPI_PROCFS_POWER |
1008 | acpi_ac_dir = acpi_lock_ac_dir(); | 1017 | acpi_ac_dir = acpi_lock_ac_dir(); |
1009 | if (!acpi_ac_dir) | 1018 | if (!acpi_ac_dir) |
1010 | return -ENODEV; | 1019 | return -ENODEV; |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 4bd33ce8a6f3..1bba99747f5b 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -37,17 +37,17 @@ | |||
37 | #define DEF_FREQUENCY_UP_THRESHOLD (80) | 37 | #define DEF_FREQUENCY_UP_THRESHOLD (80) |
38 | #define DEF_FREQUENCY_DOWN_THRESHOLD (20) | 38 | #define DEF_FREQUENCY_DOWN_THRESHOLD (20) |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * The polling frequency of this governor depends on the capability of | 41 | * The polling frequency of this governor depends on the capability of |
42 | * the processor. Default polling frequency is 1000 times the transition | 42 | * the processor. Default polling frequency is 1000 times the transition |
43 | * latency of the processor. The governor will work on any processor with | 43 | * latency of the processor. The governor will work on any processor with |
44 | * transition latency <= 10mS, using appropriate sampling | 44 | * transition latency <= 10mS, using appropriate sampling |
45 | * rate. | 45 | * rate. |
46 | * For CPUs with transition latency > 10mS (mostly drivers | 46 | * For CPUs with transition latency > 10mS (mostly drivers |
47 | * with CPUFREQ_ETERNAL), this governor will not work. | 47 | * with CPUFREQ_ETERNAL), this governor will not work. |
48 | * All times here are in uS. | 48 | * All times here are in uS. |
49 | */ | 49 | */ |
50 | static unsigned int def_sampling_rate; | 50 | static unsigned int def_sampling_rate; |
51 | #define MIN_SAMPLING_RATE_RATIO (2) | 51 | #define MIN_SAMPLING_RATE_RATIO (2) |
52 | /* for correct statistics, we need at least 10 ticks between each measure */ | 52 | /* for correct statistics, we need at least 10 ticks between each measure */ |
53 | #define MIN_STAT_SAMPLING_RATE \ | 53 | #define MIN_STAT_SAMPLING_RATE \ |
@@ -63,12 +63,12 @@ static unsigned int def_sampling_rate; | |||
63 | static void do_dbs_timer(struct work_struct *work); | 63 | static void do_dbs_timer(struct work_struct *work); |
64 | 64 | ||
65 | struct cpu_dbs_info_s { | 65 | struct cpu_dbs_info_s { |
66 | struct cpufreq_policy *cur_policy; | 66 | struct cpufreq_policy *cur_policy; |
67 | unsigned int prev_cpu_idle_up; | 67 | unsigned int prev_cpu_idle_up; |
68 | unsigned int prev_cpu_idle_down; | 68 | unsigned int prev_cpu_idle_down; |
69 | unsigned int enable; | 69 | unsigned int enable; |
70 | unsigned int down_skip; | 70 | unsigned int down_skip; |
71 | unsigned int requested_freq; | 71 | unsigned int requested_freq; |
72 | }; | 72 | }; |
73 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); | 73 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); |
74 | 74 | ||
@@ -82,24 +82,24 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */ | |||
82 | * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock | 82 | * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock |
83 | * is recursive for the same process. -Venki | 83 | * is recursive for the same process. -Venki |
84 | */ | 84 | */ |
85 | static DEFINE_MUTEX (dbs_mutex); | 85 | static DEFINE_MUTEX (dbs_mutex); |
86 | static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); | 86 | static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); |
87 | 87 | ||
88 | struct dbs_tuners { | 88 | struct dbs_tuners { |
89 | unsigned int sampling_rate; | 89 | unsigned int sampling_rate; |
90 | unsigned int sampling_down_factor; | 90 | unsigned int sampling_down_factor; |
91 | unsigned int up_threshold; | 91 | unsigned int up_threshold; |
92 | unsigned int down_threshold; | 92 | unsigned int down_threshold; |
93 | unsigned int ignore_nice; | 93 | unsigned int ignore_nice; |
94 | unsigned int freq_step; | 94 | unsigned int freq_step; |
95 | }; | 95 | }; |
96 | 96 | ||
97 | static struct dbs_tuners dbs_tuners_ins = { | 97 | static struct dbs_tuners dbs_tuners_ins = { |
98 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, | 98 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, |
99 | .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, | 99 | .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, |
100 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, | 100 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, |
101 | .ignore_nice = 0, | 101 | .ignore_nice = 0, |
102 | .freq_step = 5, | 102 | .freq_step = 5, |
103 | }; | 103 | }; |
104 | 104 | ||
105 | static inline unsigned int get_cpu_idle_time(unsigned int cpu) | 105 | static inline unsigned int get_cpu_idle_time(unsigned int cpu) |
@@ -109,13 +109,34 @@ static inline unsigned int get_cpu_idle_time(unsigned int cpu) | |||
109 | if (dbs_tuners_ins.ignore_nice) | 109 | if (dbs_tuners_ins.ignore_nice) |
110 | add_nice = kstat_cpu(cpu).cpustat.nice; | 110 | add_nice = kstat_cpu(cpu).cpustat.nice; |
111 | 111 | ||
112 | ret = kstat_cpu(cpu).cpustat.idle + | 112 | ret = kstat_cpu(cpu).cpustat.idle + |
113 | kstat_cpu(cpu).cpustat.iowait + | 113 | kstat_cpu(cpu).cpustat.iowait + |
114 | add_nice; | 114 | add_nice; |
115 | 115 | ||
116 | return ret; | 116 | return ret; |
117 | } | 117 | } |
118 | 118 | ||
119 | /* keep track of frequency transitions */ | ||
120 | static int | ||
121 | dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | ||
122 | void *data) | ||
123 | { | ||
124 | struct cpufreq_freqs *freq = data; | ||
125 | struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, | ||
126 | freq->cpu); | ||
127 | |||
128 | if (!this_dbs_info->enable) | ||
129 | return 0; | ||
130 | |||
131 | this_dbs_info->requested_freq = freq->new; | ||
132 | |||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | static struct notifier_block dbs_cpufreq_notifier_block = { | ||
137 | .notifier_call = dbs_cpufreq_notifier | ||
138 | }; | ||
139 | |||
119 | /************************** sysfs interface ************************/ | 140 | /************************** sysfs interface ************************/ |
120 | static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) | 141 | static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) |
121 | { | 142 | { |
@@ -127,8 +148,8 @@ static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) | |||
127 | return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); | 148 | return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); |
128 | } | 149 | } |
129 | 150 | ||
130 | #define define_one_ro(_name) \ | 151 | #define define_one_ro(_name) \ |
131 | static struct freq_attr _name = \ | 152 | static struct freq_attr _name = \ |
132 | __ATTR(_name, 0444, show_##_name, NULL) | 153 | __ATTR(_name, 0444, show_##_name, NULL) |
133 | 154 | ||
134 | define_one_ro(sampling_rate_max); | 155 | define_one_ro(sampling_rate_max); |
@@ -148,7 +169,7 @@ show_one(down_threshold, down_threshold); | |||
148 | show_one(ignore_nice_load, ignore_nice); | 169 | show_one(ignore_nice_load, ignore_nice); |
149 | show_one(freq_step, freq_step); | 170 | show_one(freq_step, freq_step); |
150 | 171 | ||
151 | static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, | 172 | static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, |
152 | const char *buf, size_t count) | 173 | const char *buf, size_t count) |
153 | { | 174 | { |
154 | unsigned int input; | 175 | unsigned int input; |
@@ -164,7 +185,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, | |||
164 | return count; | 185 | return count; |
165 | } | 186 | } |
166 | 187 | ||
167 | static ssize_t store_sampling_rate(struct cpufreq_policy *unused, | 188 | static ssize_t store_sampling_rate(struct cpufreq_policy *unused, |
168 | const char *buf, size_t count) | 189 | const char *buf, size_t count) |
169 | { | 190 | { |
170 | unsigned int input; | 191 | unsigned int input; |
@@ -183,7 +204,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused, | |||
183 | return count; | 204 | return count; |
184 | } | 205 | } |
185 | 206 | ||
186 | static ssize_t store_up_threshold(struct cpufreq_policy *unused, | 207 | static ssize_t store_up_threshold(struct cpufreq_policy *unused, |
187 | const char *buf, size_t count) | 208 | const char *buf, size_t count) |
188 | { | 209 | { |
189 | unsigned int input; | 210 | unsigned int input; |
@@ -202,7 +223,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, | |||
202 | return count; | 223 | return count; |
203 | } | 224 | } |
204 | 225 | ||
205 | static ssize_t store_down_threshold(struct cpufreq_policy *unused, | 226 | static ssize_t store_down_threshold(struct cpufreq_policy *unused, |
206 | const char *buf, size_t count) | 227 | const char *buf, size_t count) |
207 | { | 228 | { |
208 | unsigned int input; | 229 | unsigned int input; |
@@ -228,16 +249,16 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, | |||
228 | int ret; | 249 | int ret; |
229 | 250 | ||
230 | unsigned int j; | 251 | unsigned int j; |
231 | 252 | ||
232 | ret = sscanf (buf, "%u", &input); | 253 | ret = sscanf(buf, "%u", &input); |
233 | if ( ret != 1 ) | 254 | if (ret != 1) |
234 | return -EINVAL; | 255 | return -EINVAL; |
235 | 256 | ||
236 | if ( input > 1 ) | 257 | if (input > 1) |
237 | input = 1; | 258 | input = 1; |
238 | 259 | ||
239 | mutex_lock(&dbs_mutex); | 260 | mutex_lock(&dbs_mutex); |
240 | if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ | 261 | if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ |
241 | mutex_unlock(&dbs_mutex); | 262 | mutex_unlock(&dbs_mutex); |
242 | return count; | 263 | return count; |
243 | } | 264 | } |
@@ -261,14 +282,14 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy, | |||
261 | unsigned int input; | 282 | unsigned int input; |
262 | int ret; | 283 | int ret; |
263 | 284 | ||
264 | ret = sscanf (buf, "%u", &input); | 285 | ret = sscanf(buf, "%u", &input); |
265 | 286 | ||
266 | if ( ret != 1 ) | 287 | if (ret != 1) |
267 | return -EINVAL; | 288 | return -EINVAL; |
268 | 289 | ||
269 | if ( input > 100 ) | 290 | if (input > 100) |
270 | input = 100; | 291 | input = 100; |
271 | 292 | ||
272 | /* no need to test here if freq_step is zero as the user might actually | 293 | /* no need to test here if freq_step is zero as the user might actually |
273 | * want this, they would be crazy though :) */ | 294 | * want this, they would be crazy though :) */ |
274 | mutex_lock(&dbs_mutex); | 295 | mutex_lock(&dbs_mutex); |
@@ -322,18 +343,18 @@ static void dbs_check_cpu(int cpu) | |||
322 | 343 | ||
323 | policy = this_dbs_info->cur_policy; | 344 | policy = this_dbs_info->cur_policy; |
324 | 345 | ||
325 | /* | 346 | /* |
326 | * The default safe range is 20% to 80% | 347 | * The default safe range is 20% to 80% |
327 | * Every sampling_rate, we check | 348 | * Every sampling_rate, we check |
328 | * - If current idle time is less than 20%, then we try to | 349 | * - If current idle time is less than 20%, then we try to |
329 | * increase frequency | 350 | * increase frequency |
330 | * Every sampling_rate*sampling_down_factor, we check | 351 | * Every sampling_rate*sampling_down_factor, we check |
331 | * - If current idle time is more than 80%, then we try to | 352 | * - If current idle time is more than 80%, then we try to |
332 | * decrease frequency | 353 | * decrease frequency |
333 | * | 354 | * |
334 | * Any frequency increase takes it to the maximum frequency. | 355 | * Any frequency increase takes it to the maximum frequency. |
335 | * Frequency reduction happens at minimum steps of | 356 | * Frequency reduction happens at minimum steps of |
336 | * 5% (default) of max_frequency | 357 | * 5% (default) of max_frequency |
337 | */ | 358 | */ |
338 | 359 | ||
339 | /* Check for frequency increase */ | 360 | /* Check for frequency increase */ |
@@ -361,13 +382,13 @@ static void dbs_check_cpu(int cpu) | |||
361 | /* if we are already at full speed then break out early */ | 382 | /* if we are already at full speed then break out early */ |
362 | if (this_dbs_info->requested_freq == policy->max) | 383 | if (this_dbs_info->requested_freq == policy->max) |
363 | return; | 384 | return; |
364 | 385 | ||
365 | freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; | 386 | freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; |
366 | 387 | ||
367 | /* max freq cannot be less than 100. But who knows.... */ | 388 | /* max freq cannot be less than 100. But who knows.... */ |
368 | if (unlikely(freq_step == 0)) | 389 | if (unlikely(freq_step == 0)) |
369 | freq_step = 5; | 390 | freq_step = 5; |
370 | 391 | ||
371 | this_dbs_info->requested_freq += freq_step; | 392 | this_dbs_info->requested_freq += freq_step; |
372 | if (this_dbs_info->requested_freq > policy->max) | 393 | if (this_dbs_info->requested_freq > policy->max) |
373 | this_dbs_info->requested_freq = policy->max; | 394 | this_dbs_info->requested_freq = policy->max; |
@@ -427,15 +448,15 @@ static void dbs_check_cpu(int cpu) | |||
427 | } | 448 | } |
428 | 449 | ||
429 | static void do_dbs_timer(struct work_struct *work) | 450 | static void do_dbs_timer(struct work_struct *work) |
430 | { | 451 | { |
431 | int i; | 452 | int i; |
432 | mutex_lock(&dbs_mutex); | 453 | mutex_lock(&dbs_mutex); |
433 | for_each_online_cpu(i) | 454 | for_each_online_cpu(i) |
434 | dbs_check_cpu(i); | 455 | dbs_check_cpu(i); |
435 | schedule_delayed_work(&dbs_work, | 456 | schedule_delayed_work(&dbs_work, |
436 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); | 457 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); |
437 | mutex_unlock(&dbs_mutex); | 458 | mutex_unlock(&dbs_mutex); |
438 | } | 459 | } |
439 | 460 | ||
440 | static inline void dbs_timer_init(void) | 461 | static inline void dbs_timer_init(void) |
441 | { | 462 | { |
@@ -462,13 +483,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
462 | 483 | ||
463 | switch (event) { | 484 | switch (event) { |
464 | case CPUFREQ_GOV_START: | 485 | case CPUFREQ_GOV_START: |
465 | if ((!cpu_online(cpu)) || | 486 | if ((!cpu_online(cpu)) || (!policy->cur)) |
466 | (!policy->cur)) | ||
467 | return -EINVAL; | 487 | return -EINVAL; |
468 | 488 | ||
469 | if (this_dbs_info->enable) /* Already enabled */ | 489 | if (this_dbs_info->enable) /* Already enabled */ |
470 | break; | 490 | break; |
471 | 491 | ||
472 | mutex_lock(&dbs_mutex); | 492 | mutex_lock(&dbs_mutex); |
473 | 493 | ||
474 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); | 494 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); |
@@ -481,7 +501,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
481 | struct cpu_dbs_info_s *j_dbs_info; | 501 | struct cpu_dbs_info_s *j_dbs_info; |
482 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 502 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
483 | j_dbs_info->cur_policy = policy; | 503 | j_dbs_info->cur_policy = policy; |
484 | 504 | ||
485 | j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); | 505 | j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); |
486 | j_dbs_info->prev_cpu_idle_down | 506 | j_dbs_info->prev_cpu_idle_down |
487 | = j_dbs_info->prev_cpu_idle_up; | 507 | = j_dbs_info->prev_cpu_idle_up; |
@@ -511,8 +531,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
511 | dbs_tuners_ins.sampling_rate = def_sampling_rate; | 531 | dbs_tuners_ins.sampling_rate = def_sampling_rate; |
512 | 532 | ||
513 | dbs_timer_init(); | 533 | dbs_timer_init(); |
534 | cpufreq_register_notifier( | ||
535 | &dbs_cpufreq_notifier_block, | ||
536 | CPUFREQ_TRANSITION_NOTIFIER); | ||
514 | } | 537 | } |
515 | 538 | ||
516 | mutex_unlock(&dbs_mutex); | 539 | mutex_unlock(&dbs_mutex); |
517 | break; | 540 | break; |
518 | 541 | ||
@@ -525,9 +548,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
525 | * Stop the timerschedule work, when this governor | 548 | * Stop the timerschedule work, when this governor |
526 | * is used for first time | 549 | * is used for first time |
527 | */ | 550 | */ |
528 | if (dbs_enable == 0) | 551 | if (dbs_enable == 0) { |
529 | dbs_timer_exit(); | 552 | dbs_timer_exit(); |
530 | 553 | cpufreq_unregister_notifier( | |
554 | &dbs_cpufreq_notifier_block, | ||
555 | CPUFREQ_TRANSITION_NOTIFIER); | ||
556 | } | ||
557 | |||
531 | mutex_unlock(&dbs_mutex); | 558 | mutex_unlock(&dbs_mutex); |
532 | 559 | ||
533 | break; | 560 | break; |
@@ -537,11 +564,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
537 | if (policy->max < this_dbs_info->cur_policy->cur) | 564 | if (policy->max < this_dbs_info->cur_policy->cur) |
538 | __cpufreq_driver_target( | 565 | __cpufreq_driver_target( |
539 | this_dbs_info->cur_policy, | 566 | this_dbs_info->cur_policy, |
540 | policy->max, CPUFREQ_RELATION_H); | 567 | policy->max, CPUFREQ_RELATION_H); |
541 | else if (policy->min > this_dbs_info->cur_policy->cur) | 568 | else if (policy->min > this_dbs_info->cur_policy->cur) |
542 | __cpufreq_driver_target( | 569 | __cpufreq_driver_target( |
543 | this_dbs_info->cur_policy, | 570 | this_dbs_info->cur_policy, |
544 | policy->min, CPUFREQ_RELATION_L); | 571 | policy->min, CPUFREQ_RELATION_L); |
545 | mutex_unlock(&dbs_mutex); | 572 | mutex_unlock(&dbs_mutex); |
546 | break; | 573 | break; |
547 | } | 574 | } |
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index cd0a204d96d1..11adab13f2b7 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c | |||
@@ -75,6 +75,7 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res, | |||
75 | { | 75 | { |
76 | int i = 0; | 76 | int i = 0; |
77 | int irq; | 77 | int irq; |
78 | int p, t; | ||
78 | 79 | ||
79 | if (!valid_IRQ(gsi)) | 80 | if (!valid_IRQ(gsi)) |
80 | return; | 81 | return; |
@@ -85,15 +86,22 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res, | |||
85 | if (i >= PNP_MAX_IRQ) | 86 | if (i >= PNP_MAX_IRQ) |
86 | return; | 87 | return; |
87 | 88 | ||
88 | #ifdef CONFIG_X86 | 89 | /* |
89 | if (gsi < 16 && (triggering != ACPI_EDGE_SENSITIVE || | 90 | * in IO-APIC mode, use overrided attribute. Two reasons: |
90 | polarity != ACPI_ACTIVE_HIGH)) { | 91 | * 1. BIOS bug in DSDT |
91 | pnp_warn("BIOS BUG: legacy PNP IRQ %d should be edge trigger, " | 92 | * 2. BIOS uses IO-APIC mode Interrupt Source Override |
92 | "active high", gsi); | 93 | */ |
93 | triggering = ACPI_EDGE_SENSITIVE; | 94 | if (!acpi_get_override_irq(gsi, &t, &p)) { |
94 | polarity = ACPI_ACTIVE_HIGH; | 95 | t = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; |
96 | p = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; | ||
97 | |||
98 | if (triggering != t || polarity != p) { | ||
99 | pnp_warn("IRQ %d override to %s, %s", | ||
100 | gsi, t ? "edge":"level", p ? "low":"high"); | ||
101 | triggering = t; | ||
102 | polarity = p; | ||
103 | } | ||
95 | } | 104 | } |
96 | #endif | ||
97 | 105 | ||
98 | res->irq_resource[i].flags = IORESOURCE_IRQ; // Also clears _UNSET flag | 106 | res->irq_resource[i].flags = IORESOURCE_IRQ; // Also clears _UNSET flag |
99 | res->irq_resource[i].flags |= irq_flags(triggering, polarity); | 107 | res->irq_resource[i].flags |= irq_flags(triggering, polarity); |
diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 26d79f6db8a0..76411b1fc4fd 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h | |||
@@ -78,7 +78,6 @@ struct acpi_processor_cx { | |||
78 | struct acpi_processor_power { | 78 | struct acpi_processor_power { |
79 | struct cpuidle_device dev; | 79 | struct cpuidle_device dev; |
80 | struct acpi_processor_cx *state; | 80 | struct acpi_processor_cx *state; |
81 | struct acpi_processor_cx *bm_state; | ||
82 | unsigned long bm_check_timestamp; | 81 | unsigned long bm_check_timestamp; |
83 | u32 default_state; | 82 | u32 default_state; |
84 | u32 bm_activity; | 83 | u32 bm_activity; |
diff --git a/include/asm-x86/mach-default/mach_reboot.h b/include/asm-x86/mach-default/mach_reboot.h index e23fd9fbebb3..6adee6a97dec 100644 --- a/include/asm-x86/mach-default/mach_reboot.h +++ b/include/asm-x86/mach-default/mach_reboot.h | |||
@@ -49,7 +49,7 @@ static inline void mach_reboot(void) | |||
49 | udelay(50); | 49 | udelay(50); |
50 | kb_wait(); | 50 | kb_wait(); |
51 | udelay(50); | 51 | udelay(50); |
52 | outb(cmd | 0x04, 0x60); /* set "System flag" */ | 52 | outb(cmd | 0x14, 0x60); /* set "System flag" and "Keyboard Disabled" */ |
53 | udelay(50); | 53 | udelay(50); |
54 | kb_wait(); | 54 | kb_wait(); |
55 | udelay(50); | 55 | udelay(50); |
diff --git a/include/asm-x86/mach-es7000/mach_mpparse.h b/include/asm-x86/mach-es7000/mach_mpparse.h index 8aa10547b4b1..52ee75cd0fe1 100644 --- a/include/asm-x86/mach-es7000/mach_mpparse.h +++ b/include/asm-x86/mach-es7000/mach_mpparse.h | |||
@@ -29,9 +29,9 @@ extern int mps_oem_check(struct mp_config_table *mpc, char *oem, | |||
29 | static inline int es7000_check_dsdt(void) | 29 | static inline int es7000_check_dsdt(void) |
30 | { | 30 | { |
31 | struct acpi_table_header header; | 31 | struct acpi_table_header header; |
32 | memcpy(&header, 0, sizeof(struct acpi_table_header)); | 32 | |
33 | acpi_get_table_header(ACPI_SIG_DSDT, 0, &header); | 33 | if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) && |
34 | if (!strncmp(header.oem_id, "UNISYS", 6)) | 34 | !strncmp(header.oem_id, "UNISYS", 6)) |
35 | return 1; | 35 | return 1; |
36 | return 0; | 36 | return 0; |
37 | } | 37 | } |
diff --git a/include/asm-x86/mach-voyager/setup_arch.h b/include/asm-x86/mach-voyager/setup_arch.h index 1710ae10eb67..71729ca05cd7 100644 --- a/include/asm-x86/mach-voyager/setup_arch.h +++ b/include/asm-x86/mach-voyager/setup_arch.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #include <asm/voyager.h> | 1 | #include <asm/voyager.h> |
2 | #include <asm/setup_32.h> | 2 | #include <asm/setup.h> |
3 | #define VOYAGER_BIOS_INFO ((struct voyager_bios_info *) \ | 3 | #define VOYAGER_BIOS_INFO ((struct voyager_bios_info *) \ |
4 | (&boot_params.apm_bios_info)) | 4 | (&boot_params.apm_bios_info)) |
5 | 5 | ||
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 8ccedf7a0a5a..e3c16c981e46 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -132,6 +132,11 @@ extern unsigned long acpi_realmode_flags; | |||
132 | int acpi_register_gsi (u32 gsi, int triggering, int polarity); | 132 | int acpi_register_gsi (u32 gsi, int triggering, int polarity); |
133 | int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); | 133 | int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); |
134 | 134 | ||
135 | #ifdef CONFIG_X86_IO_APIC | ||
136 | extern int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity); | ||
137 | #else | ||
138 | #define acpi_get_override_irq(bus, trigger, polarity) (-1) | ||
139 | #endif | ||
135 | /* | 140 | /* |
136 | * This function undoes the effect of one call to acpi_register_gsi(). | 141 | * This function undoes the effect of one call to acpi_register_gsi(). |
137 | * If this matches the last registration, any IRQ resources for gsi | 142 | * If this matches the last registration, any IRQ resources for gsi |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 16a51546db44..c4e00161a247 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
@@ -92,6 +92,7 @@ struct cpuidle_device { | |||
92 | struct kobject kobj; | 92 | struct kobject kobj; |
93 | struct completion kobj_unregister; | 93 | struct completion kobj_unregister; |
94 | void *governor_data; | 94 | void *governor_data; |
95 | struct cpuidle_state *safe_state; | ||
95 | }; | 96 | }; |
96 | 97 | ||
97 | DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); | 98 | DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); |
diff --git a/include/linux/selinux.h b/include/linux/selinux.h index d1b7ca6c1c57..6080f73fc85f 100644 --- a/include/linux/selinux.h +++ b/include/linux/selinux.h | |||
@@ -136,7 +136,7 @@ static inline int selinux_audit_rule_init(u32 field, u32 op, | |||
136 | char *rulestr, | 136 | char *rulestr, |
137 | struct selinux_audit_rule **rule) | 137 | struct selinux_audit_rule **rule) |
138 | { | 138 | { |
139 | return -ENOTSUPP; | 139 | return -EOPNOTSUPP; |
140 | } | 140 | } |
141 | 141 | ||
142 | static inline void selinux_audit_rule_free(struct selinux_audit_rule *rule) | 142 | static inline void selinux_audit_rule_free(struct selinux_audit_rule *rule) |
diff --git a/kernel/sys.c b/kernel/sys.c index 304b5410d746..d1fe71eb4546 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1750,7 +1750,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, | |||
1750 | } | 1750 | } |
1751 | 1751 | ||
1752 | asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, | 1752 | asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, |
1753 | struct getcpu_cache __user *cache) | 1753 | struct getcpu_cache __user *unused) |
1754 | { | 1754 | { |
1755 | int err = 0; | 1755 | int err = 0; |
1756 | int cpu = raw_smp_processor_id(); | 1756 | int cpu = raw_smp_processor_id(); |
@@ -1758,24 +1758,6 @@ asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, | |||
1758 | err |= put_user(cpu, cpup); | 1758 | err |= put_user(cpu, cpup); |
1759 | if (nodep) | 1759 | if (nodep) |
1760 | err |= put_user(cpu_to_node(cpu), nodep); | 1760 | err |= put_user(cpu_to_node(cpu), nodep); |
1761 | if (cache) { | ||
1762 | /* | ||
1763 | * The cache is not needed for this implementation, | ||
1764 | * but make sure user programs pass something | ||
1765 | * valid. vsyscall implementations can instead make | ||
1766 | * good use of the cache. Only use t0 and t1 because | ||
1767 | * these are available in both 32bit and 64bit ABI (no | ||
1768 | * need for a compat_getcpu). 32bit has enough | ||
1769 | * padding | ||
1770 | */ | ||
1771 | unsigned long t0, t1; | ||
1772 | get_user(t0, &cache->blob[0]); | ||
1773 | get_user(t1, &cache->blob[1]); | ||
1774 | t0++; | ||
1775 | t1++; | ||
1776 | put_user(t0, &cache->blob[0]); | ||
1777 | put_user(t1, &cache->blob[1]); | ||
1778 | } | ||
1779 | return err ? -EFAULT : 0; | 1761 | return err ? -EFAULT : 0; |
1780 | } | 1762 | } |
1781 | 1763 | ||
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index de6a2d6b3ebb..14a2ecf2b318 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -205,7 +205,7 @@ static void sync_cmos_clock(unsigned long dummy) | |||
205 | return; | 205 | return; |
206 | 206 | ||
207 | getnstimeofday(&now); | 207 | getnstimeofday(&now); |
208 | if (abs(xtime.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) | 208 | if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) |
209 | fail = update_persistent_clock(now); | 209 | fail = update_persistent_clock(now); |
210 | 210 | ||
211 | next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec; | 211 | next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec; |
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c index c6bee85c3962..a38787a881ea 100644 --- a/scripts/kconfig/conf.c +++ b/scripts/kconfig/conf.c | |||
@@ -591,7 +591,6 @@ int main(int ac, char **av) | |||
591 | conf_read_simple(name, S_DEF_USER); | 591 | conf_read_simple(name, S_DEF_USER); |
592 | else if (!stat("all.config", &tmpstat)) | 592 | else if (!stat("all.config", &tmpstat)) |
593 | conf_read_simple("all.config", S_DEF_USER); | 593 | conf_read_simple("all.config", S_DEF_USER); |
594 | conf_set_env_sym("K64BIT", "64BIT", S_DEF_USER); | ||
595 | break; | 594 | break; |
596 | default: | 595 | default: |
597 | break; | 596 | break; |
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index e4fa3f302541..e0f402f3b75d 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c | |||
@@ -145,33 +145,6 @@ static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p) | |||
145 | return 0; | 145 | return 0; |
146 | } | 146 | } |
147 | 147 | ||
148 | /* Read an environment variable and assign the value to the symbol */ | ||
149 | int conf_set_env_sym(const char *env, const char *symname, int def) | ||
150 | { | ||
151 | struct symbol *sym; | ||
152 | char *p; | ||
153 | int def_flags; | ||
154 | |||
155 | p = getenv(env); | ||
156 | if (p) { | ||
157 | char warning[200]; | ||
158 | sprintf(warning, "Environment variable (%s = \"%s\")", env, p); | ||
159 | conf_filename = warning; | ||
160 | def_flags = SYMBOL_DEF << def; | ||
161 | if (def == S_DEF_USER) { | ||
162 | sym = sym_find(symname); | ||
163 | if (!sym) | ||
164 | return 1; | ||
165 | } else { | ||
166 | sym = sym_lookup(symname, 0); | ||
167 | if (sym->type == S_UNKNOWN) | ||
168 | sym->type = S_OTHER; | ||
169 | } | ||
170 | conf_set_sym_val(sym, def, def_flags, p); | ||
171 | } | ||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | int conf_read_simple(const char *name, int def) | 148 | int conf_read_simple(const char *name, int def) |
176 | { | 149 | { |
177 | FILE *in = NULL; | 150 | FILE *in = NULL; |
diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h index dca294e90cc3..4d09f6ddefe3 100644 --- a/scripts/kconfig/lkc_proto.h +++ b/scripts/kconfig/lkc_proto.h | |||
@@ -1,7 +1,6 @@ | |||
1 | 1 | ||
2 | /* confdata.c */ | 2 | /* confdata.c */ |
3 | P(conf_parse,void,(const char *name)); | 3 | P(conf_parse,void,(const char *name)); |
4 | P(conf_set_env_sym,int,(const char *envname, const char *symname, int def)); | ||
5 | P(conf_read,int,(const char *name)); | 4 | P(conf_read,int,(const char *name)); |
6 | P(conf_read_simple,int,(const char *name, int)); | 5 | P(conf_read_simple,int,(const char *name, int)); |
7 | P(conf_write,int,(const char *name)); | 6 | P(conf_write,int,(const char *name)); |