aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/Makefile2
-rw-r--r--arch/i386/kernel/acpi/boot.c4
-rw-r--r--arch/i386/kernel/acpi/earlyquirk.c23
-rw-r--r--arch/i386/kernel/apic.c12
-rw-r--r--arch/i386/kernel/apm.c2
-rw-r--r--arch/i386/kernel/cpu/amd.c2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c10
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c2
-rw-r--r--arch/i386/kernel/cpuid.c2
-rw-r--r--arch/i386/kernel/dmi_scan.c358
-rw-r--r--arch/i386/kernel/io_apic.c5
-rw-r--r--arch/i386/kernel/kprobes.c39
-rw-r--r--arch/i386/kernel/mpparse.c12
-rw-r--r--arch/i386/kernel/msr.c2
-rw-r--r--arch/i386/kernel/ptrace.c7
-rw-r--r--arch/i386/kernel/setup.c17
-rw-r--r--arch/i386/kernel/smpboot.c4
-rw-r--r--arch/i386/kernel/syscall_table.S2
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c4
-rw-r--r--arch/i386/kernel/traps.c4
-rw-r--r--arch/i386/kernel/vm86.c2
21 files changed, 99 insertions, 416 deletions
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 5b9ed21216cf..96fb8a020af2 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds
6 6
7obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ 7obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ 8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
9 pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \ 9 pci-dma.o i386_ksyms.o i387.o bootflag.o \
10 quirks.o i8237.o topology.o alternative.o 10 quirks.o i8237.o topology.o alternative.o
11 11
12obj-y += cpu/ 12obj-y += cpu/
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index 030a0007a4e0..40e5aba3ad3d 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -168,7 +168,7 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
168 unsigned long i; 168 unsigned long i;
169 int config_size; 169 int config_size;
170 170
171 if (!phys_addr || !size || !cpu_has_apic) 171 if (!phys_addr || !size)
172 return -EINVAL; 172 return -EINVAL;
173 173
174 mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size); 174 mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size);
@@ -215,7 +215,7 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
215{ 215{
216 struct acpi_table_madt *madt = NULL; 216 struct acpi_table_madt *madt = NULL;
217 217
218 if (!phys_addr || !size || !cpu_has_apic) 218 if (!phys_addr || !size)
219 return -EINVAL; 219 return -EINVAL;
220 220
221 madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size); 221 madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c
index 2e3b643a4dc4..1649a175a206 100644
--- a/arch/i386/kernel/acpi/earlyquirk.c
+++ b/arch/i386/kernel/acpi/earlyquirk.c
@@ -5,17 +5,34 @@
5#include <linux/init.h> 5#include <linux/init.h>
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/pci.h> 7#include <linux/pci.h>
8#include <linux/acpi.h>
9
8#include <asm/pci-direct.h> 10#include <asm/pci-direct.h>
9#include <asm/acpi.h> 11#include <asm/acpi.h>
10#include <asm/apic.h> 12#include <asm/apic.h>
11 13
14#ifdef CONFIG_ACPI
15
16static int nvidia_hpet_detected __initdata;
17
18static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
19{
20 nvidia_hpet_detected = 1;
21 return 0;
22}
23#endif
24
12static int __init check_bridge(int vendor, int device) 25static int __init check_bridge(int vendor, int device)
13{ 26{
14#ifdef CONFIG_ACPI 27#ifdef CONFIG_ACPI
15 /* According to Nvidia all timer overrides are bogus. Just ignore 28 /* According to Nvidia all timer overrides are bogus unless HPET
16 them all. */ 29 is enabled. */
17 if (vendor == PCI_VENDOR_ID_NVIDIA) { 30 if (vendor == PCI_VENDOR_ID_NVIDIA) {
18 acpi_skip_timer_override = 1; 31 nvidia_hpet_detected = 0;
32 acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
33 if (nvidia_hpet_detected == 0) {
34 acpi_skip_timer_override = 1;
35 }
19 } 36 }
20#endif 37#endif
21 if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) { 38 if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) {
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 254cee9f0b7b..3d4b2f3d116a 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -757,10 +757,6 @@ static int __init apic_set_verbosity(char *str)
757 apic_verbosity = APIC_DEBUG; 757 apic_verbosity = APIC_DEBUG;
758 else if (strcmp("verbose", str) == 0) 758 else if (strcmp("verbose", str) == 0)
759 apic_verbosity = APIC_VERBOSE; 759 apic_verbosity = APIC_VERBOSE;
760 else
761 printk(KERN_WARNING "APIC Verbosity level %s not recognised"
762 " use apic=verbose or apic=debug\n", str);
763
764 return 1; 760 return 1;
765} 761}
766 762
@@ -1345,6 +1341,14 @@ int __init APIC_init_uniprocessor (void)
1345 1341
1346 connect_bsp_APIC(); 1342 connect_bsp_APIC();
1347 1343
1344 /*
1345 * Hack: In case of kdump, after a crash, kernel might be booting
1346 * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
1347 * might be zero if read from MP tables. Get it from LAPIC.
1348 */
1349#ifdef CONFIG_CRASH_DUMP
1350 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
1351#endif
1348 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); 1352 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
1349 1353
1350 setup_local_APIC(); 1354 setup_local_APIC();
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index da30a374dd4e..df0e1745f189 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -1079,7 +1079,7 @@ static int apm_console_blank(int blank)
1079 break; 1079 break;
1080 } 1080 }
1081 1081
1082 if (error == APM_NOT_ENGAGED && state != APM_STATE_READY) { 1082 if (error == APM_NOT_ENGAGED) {
1083 static int tried; 1083 static int tried;
1084 int eng_error; 1084 int eng_error;
1085 if (tried++ == 0) { 1085 if (tried++ == 0) {
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index ff2b2154ac1b..786d1a57048b 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -207,6 +207,8 @@ static void __init init_amd(struct cpuinfo_x86 *c)
207 set_bit(X86_FEATURE_K7, c->x86_capability); 207 set_bit(X86_FEATURE_K7, c->x86_capability);
208 break; 208 break;
209 } 209 }
210 if (c->x86 >= 6)
211 set_bit(X86_FEATURE_FXSAVE_LEAK, c->x86_capability);
210 212
211 display_cacheinfo(c); 213 display_cacheinfo(c);
212 214
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index 7c0e160a2145..71fffa174425 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -905,14 +905,17 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
905{ 905{
906 cpumask_t oldmask = CPU_MASK_ALL; 906 cpumask_t oldmask = CPU_MASK_ALL;
907 struct powernow_k8_data *data = powernow_data[pol->cpu]; 907 struct powernow_k8_data *data = powernow_data[pol->cpu];
908 u32 checkfid = data->currfid; 908 u32 checkfid;
909 u32 checkvid = data->currvid; 909 u32 checkvid;
910 unsigned int newstate; 910 unsigned int newstate;
911 int ret = -EIO; 911 int ret = -EIO;
912 912
913 if (!data) 913 if (!data)
914 return -EINVAL; 914 return -EINVAL;
915 915
916 checkfid = data->currfid;
917 checkvid = data->currvid;
918
916 /* only run on specific CPU from here on */ 919 /* only run on specific CPU from here on */
917 oldmask = current->cpus_allowed; 920 oldmask = current->cpus_allowed;
918 set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); 921 set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
@@ -1109,9 +1112,6 @@ static unsigned int powernowk8_get (unsigned int cpu)
1109 if (!data) 1112 if (!data)
1110 return -EINVAL; 1113 return -EINVAL;
1111 1114
1112 if (!data)
1113 return -EINVAL;
1114
1115 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 1115 set_cpus_allowed(current, cpumask_of_cpu(cpu));
1116 if (smp_processor_id() != cpu) { 1116 if (smp_processor_id() != cpu) {
1117 printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu); 1117 printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu);
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index 9df87b03612c..c8547a6fa7e6 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -642,7 +642,7 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
642 return; 642 return;
643} 643}
644 644
645static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, 645static int cacheinfo_cpu_callback(struct notifier_block *nfb,
646 unsigned long action, void *hcpu) 646 unsigned long action, void *hcpu)
647{ 647{
648 unsigned int cpu = (unsigned long)hcpu; 648 unsigned int cpu = (unsigned long)hcpu;
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
index 006141d1c12a..1d9a4abcdfc7 100644
--- a/arch/i386/kernel/cpuid.c
+++ b/arch/i386/kernel/cpuid.c
@@ -168,7 +168,7 @@ static int cpuid_class_device_create(int i)
168 return err; 168 return err;
169} 169}
170 170
171static int __devinit cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 171static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
172{ 172{
173 unsigned int cpu = (unsigned long)hcpu; 173 unsigned int cpu = (unsigned long)hcpu;
174 174
diff --git a/arch/i386/kernel/dmi_scan.c b/arch/i386/kernel/dmi_scan.c
deleted file mode 100644
index 5efceebc48dc..000000000000
--- a/arch/i386/kernel/dmi_scan.c
+++ /dev/null
@@ -1,358 +0,0 @@
1#include <linux/types.h>
2#include <linux/string.h>
3#include <linux/init.h>
4#include <linux/module.h>
5#include <linux/dmi.h>
6#include <linux/efi.h>
7#include <linux/bootmem.h>
8#include <linux/slab.h>
9#include <asm/dmi.h>
10
11static char * __init dmi_string(struct dmi_header *dm, u8 s)
12{
13 u8 *bp = ((u8 *) dm) + dm->length;
14 char *str = "";
15
16 if (s) {
17 s--;
18 while (s > 0 && *bp) {
19 bp += strlen(bp) + 1;
20 s--;
21 }
22
23 if (*bp != 0) {
24 str = dmi_alloc(strlen(bp) + 1);
25 if (str != NULL)
26 strcpy(str, bp);
27 else
28 printk(KERN_ERR "dmi_string: out of memory.\n");
29 }
30 }
31
32 return str;
33}
34
35/*
36 * We have to be cautious here. We have seen BIOSes with DMI pointers
37 * pointing to completely the wrong place for example
38 */
39static int __init dmi_table(u32 base, int len, int num,
40 void (*decode)(struct dmi_header *))
41{
42 u8 *buf, *data;
43 int i = 0;
44
45 buf = dmi_ioremap(base, len);
46 if (buf == NULL)
47 return -1;
48
49 data = buf;
50
51 /*
52 * Stop when we see all the items the table claimed to have
53 * OR we run off the end of the table (also happens)
54 */
55 while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) {
56 struct dmi_header *dm = (struct dmi_header *)data;
57 /*
58 * We want to know the total length (formated area and strings)
59 * before decoding to make sure we won't run off the table in
60 * dmi_decode or dmi_string
61 */
62 data += dm->length;
63 while ((data - buf < len - 1) && (data[0] || data[1]))
64 data++;
65 if (data - buf < len - 1)
66 decode(dm);
67 data += 2;
68 i++;
69 }
70 dmi_iounmap(buf, len);
71 return 0;
72}
73
74static int __init dmi_checksum(u8 *buf)
75{
76 u8 sum = 0;
77 int a;
78
79 for (a = 0; a < 15; a++)
80 sum += buf[a];
81
82 return sum == 0;
83}
84
85static char *dmi_ident[DMI_STRING_MAX];
86static LIST_HEAD(dmi_devices);
87
88/*
89 * Save a DMI string
90 */
91static void __init dmi_save_ident(struct dmi_header *dm, int slot, int string)
92{
93 char *p, *d = (char*) dm;
94
95 if (dmi_ident[slot])
96 return;
97
98 p = dmi_string(dm, d[string]);
99 if (p == NULL)
100 return;
101
102 dmi_ident[slot] = p;
103}
104
105static void __init dmi_save_devices(struct dmi_header *dm)
106{
107 int i, count = (dm->length - sizeof(struct dmi_header)) / 2;
108 struct dmi_device *dev;
109
110 for (i = 0; i < count; i++) {
111 char *d = (char *)(dm + 1) + (i * 2);
112
113 /* Skip disabled device */
114 if ((*d & 0x80) == 0)
115 continue;
116
117 dev = dmi_alloc(sizeof(*dev));
118 if (!dev) {
119 printk(KERN_ERR "dmi_save_devices: out of memory.\n");
120 break;
121 }
122
123 dev->type = *d++ & 0x7f;
124 dev->name = dmi_string(dm, *d);
125 dev->device_data = NULL;
126
127 list_add(&dev->list, &dmi_devices);
128 }
129}
130
131static void __init dmi_save_ipmi_device(struct dmi_header *dm)
132{
133 struct dmi_device *dev;
134 void * data;
135
136 data = dmi_alloc(dm->length);
137 if (data == NULL) {
138 printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
139 return;
140 }
141
142 memcpy(data, dm, dm->length);
143
144 dev = dmi_alloc(sizeof(*dev));
145 if (!dev) {
146 printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
147 return;
148 }
149
150 dev->type = DMI_DEV_TYPE_IPMI;
151 dev->name = "IPMI controller";
152 dev->device_data = data;
153
154 list_add(&dev->list, &dmi_devices);
155}
156
157/*
158 * Process a DMI table entry. Right now all we care about are the BIOS
159 * and machine entries. For 2.5 we should pull the smbus controller info
160 * out of here.
161 */
162static void __init dmi_decode(struct dmi_header *dm)
163{
164 switch(dm->type) {
165 case 0: /* BIOS Information */
166 dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
167 dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
168 dmi_save_ident(dm, DMI_BIOS_DATE, 8);
169 break;
170 case 1: /* System Information */
171 dmi_save_ident(dm, DMI_SYS_VENDOR, 4);
172 dmi_save_ident(dm, DMI_PRODUCT_NAME, 5);
173 dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
174 dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
175 break;
176 case 2: /* Base Board Information */
177 dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
178 dmi_save_ident(dm, DMI_BOARD_NAME, 5);
179 dmi_save_ident(dm, DMI_BOARD_VERSION, 6);
180 break;
181 case 10: /* Onboard Devices Information */
182 dmi_save_devices(dm);
183 break;
184 case 38: /* IPMI Device Information */
185 dmi_save_ipmi_device(dm);
186 }
187}
188
189static int __init dmi_present(char __iomem *p)
190{
191 u8 buf[15];
192 memcpy_fromio(buf, p, 15);
193 if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) {
194 u16 num = (buf[13] << 8) | buf[12];
195 u16 len = (buf[7] << 8) | buf[6];
196 u32 base = (buf[11] << 24) | (buf[10] << 16) |
197 (buf[9] << 8) | buf[8];
198
199 /*
200 * DMI version 0.0 means that the real version is taken from
201 * the SMBIOS version, which we don't know at this point.
202 */
203 if (buf[14] != 0)
204 printk(KERN_INFO "DMI %d.%d present.\n",
205 buf[14] >> 4, buf[14] & 0xF);
206 else
207 printk(KERN_INFO "DMI present.\n");
208 if (dmi_table(base,len, num, dmi_decode) == 0)
209 return 0;
210 }
211 return 1;
212}
213
214void __init dmi_scan_machine(void)
215{
216 char __iomem *p, *q;
217 int rc;
218
219 if (efi_enabled) {
220 if (efi.smbios == EFI_INVALID_TABLE_ADDR)
221 goto out;
222
223 /* This is called as a core_initcall() because it isn't
224 * needed during early boot. This also means we can
225 * iounmap the space when we're done with it.
226 */
227 p = dmi_ioremap(efi.smbios, 32);
228 if (p == NULL)
229 goto out;
230
231 rc = dmi_present(p + 0x10); /* offset of _DMI_ string */
232 dmi_iounmap(p, 32);
233 if (!rc)
234 return;
235 }
236 else {
237 /*
238 * no iounmap() for that ioremap(); it would be a no-op, but
239 * it's so early in setup that sucker gets confused into doing
240 * what it shouldn't if we actually call it.
241 */
242 p = dmi_ioremap(0xF0000, 0x10000);
243 if (p == NULL)
244 goto out;
245
246 for (q = p; q < p + 0x10000; q += 16) {
247 rc = dmi_present(q);
248 if (!rc)
249 return;
250 }
251 }
252 out: printk(KERN_INFO "DMI not present or invalid.\n");
253}
254
255/**
256 * dmi_check_system - check system DMI data
257 * @list: array of dmi_system_id structures to match against
258 *
259 * Walk the blacklist table running matching functions until someone
260 * returns non zero or we hit the end. Callback function is called for
261 * each successfull match. Returns the number of matches.
262 */
263int dmi_check_system(struct dmi_system_id *list)
264{
265 int i, count = 0;
266 struct dmi_system_id *d = list;
267
268 while (d->ident) {
269 for (i = 0; i < ARRAY_SIZE(d->matches); i++) {
270 int s = d->matches[i].slot;
271 if (s == DMI_NONE)
272 continue;
273 if (dmi_ident[s] && strstr(dmi_ident[s], d->matches[i].substr))
274 continue;
275 /* No match */
276 goto fail;
277 }
278 count++;
279 if (d->callback && d->callback(d))
280 break;
281fail: d++;
282 }
283
284 return count;
285}
286EXPORT_SYMBOL(dmi_check_system);
287
288/**
289 * dmi_get_system_info - return DMI data value
290 * @field: data index (see enum dmi_filed)
291 *
292 * Returns one DMI data value, can be used to perform
293 * complex DMI data checks.
294 */
295char *dmi_get_system_info(int field)
296{
297 return dmi_ident[field];
298}
299EXPORT_SYMBOL(dmi_get_system_info);
300
301/**
302 * dmi_find_device - find onboard device by type/name
303 * @type: device type or %DMI_DEV_TYPE_ANY to match all device types
304 * @desc: device name string or %NULL to match all
305 * @from: previous device found in search, or %NULL for new search.
306 *
307 * Iterates through the list of known onboard devices. If a device is
308 * found with a matching @vendor and @device, a pointer to its device
309 * structure is returned. Otherwise, %NULL is returned.
310 * A new search is initiated by passing %NULL to the @from argument.
311 * If @from is not %NULL, searches continue from next device.
312 */
313struct dmi_device * dmi_find_device(int type, const char *name,
314 struct dmi_device *from)
315{
316 struct list_head *d, *head = from ? &from->list : &dmi_devices;
317
318 for(d = head->next; d != &dmi_devices; d = d->next) {
319 struct dmi_device *dev = list_entry(d, struct dmi_device, list);
320
321 if (((type == DMI_DEV_TYPE_ANY) || (dev->type == type)) &&
322 ((name == NULL) || (strcmp(dev->name, name) == 0)))
323 return dev;
324 }
325
326 return NULL;
327}
328EXPORT_SYMBOL(dmi_find_device);
329
330/**
331 * dmi_get_year - Return year of a DMI date
332 * @field: data index (like dmi_get_system_info)
333 *
334 * Returns -1 when the field doesn't exist. 0 when it is broken.
335 */
336int dmi_get_year(int field)
337{
338 int year;
339 char *s = dmi_get_system_info(field);
340
341 if (!s)
342 return -1;
343 if (*s == '\0')
344 return 0;
345 s = strrchr(s, '/');
346 if (!s)
347 return 0;
348
349 s += 1;
350 year = simple_strtoul(s, NULL, 0);
351 if (year && year < 100) { /* 2-digit year */
352 year += 1900;
353 if (year < 1996) /* no dates < spec 1.0 */
354 year += 100;
355 }
356
357 return year;
358}
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index f8f132aa5472..d70f2ade5cde 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -2238,6 +2238,8 @@ static inline void unlock_ExtINT_logic(void)
2238 spin_unlock_irqrestore(&ioapic_lock, flags); 2238 spin_unlock_irqrestore(&ioapic_lock, flags);
2239} 2239}
2240 2240
2241int timer_uses_ioapic_pin_0;
2242
2241/* 2243/*
2242 * This code may look a bit paranoid, but it's supposed to cooperate with 2244 * This code may look a bit paranoid, but it's supposed to cooperate with
2243 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ 2245 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
@@ -2274,6 +2276,9 @@ static inline void check_timer(void)
2274 pin2 = ioapic_i8259.pin; 2276 pin2 = ioapic_i8259.pin;
2275 apic2 = ioapic_i8259.apic; 2277 apic2 = ioapic_i8259.apic;
2276 2278
2279 if (pin1 == 0)
2280 timer_uses_ioapic_pin_0 = 1;
2281
2277 printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", 2282 printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
2278 vector, apic1, pin1, apic2, pin2); 2283 vector, apic1, pin1, apic2, pin2);
2279 2284
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index f19768789e8a..38806f427849 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -43,7 +43,7 @@ DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
43DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 43DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
44 44
45/* insert a jmp code */ 45/* insert a jmp code */
46static inline void set_jmp_op(void *from, void *to) 46static __always_inline void set_jmp_op(void *from, void *to)
47{ 47{
48 struct __arch_jmp_op { 48 struct __arch_jmp_op {
49 char op; 49 char op;
@@ -57,7 +57,7 @@ static inline void set_jmp_op(void *from, void *to)
57/* 57/*
58 * returns non-zero if opcodes can be boosted. 58 * returns non-zero if opcodes can be boosted.
59 */ 59 */
60static inline int can_boost(kprobe_opcode_t opcode) 60static __always_inline int can_boost(kprobe_opcode_t opcode)
61{ 61{
62 switch (opcode & 0xf0 ) { 62 switch (opcode & 0xf0 ) {
63 case 0x70: 63 case 0x70:
@@ -88,7 +88,7 @@ static inline int can_boost(kprobe_opcode_t opcode)
88/* 88/*
89 * returns non-zero if opcode modifies the interrupt flag. 89 * returns non-zero if opcode modifies the interrupt flag.
90 */ 90 */
91static inline int is_IF_modifier(kprobe_opcode_t opcode) 91static int __kprobes is_IF_modifier(kprobe_opcode_t opcode)
92{ 92{
93 switch (opcode) { 93 switch (opcode) {
94 case 0xfa: /* cli */ 94 case 0xfa: /* cli */
@@ -138,7 +138,7 @@ void __kprobes arch_remove_kprobe(struct kprobe *p)
138 mutex_unlock(&kprobe_mutex); 138 mutex_unlock(&kprobe_mutex);
139} 139}
140 140
141static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) 141static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
142{ 142{
143 kcb->prev_kprobe.kp = kprobe_running(); 143 kcb->prev_kprobe.kp = kprobe_running();
144 kcb->prev_kprobe.status = kcb->kprobe_status; 144 kcb->prev_kprobe.status = kcb->kprobe_status;
@@ -146,7 +146,7 @@ static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
146 kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags; 146 kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags;
147} 147}
148 148
149static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) 149static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
150{ 150{
151 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 151 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
152 kcb->kprobe_status = kcb->prev_kprobe.status; 152 kcb->kprobe_status = kcb->prev_kprobe.status;
@@ -154,7 +154,7 @@ static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
154 kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags; 154 kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags;
155} 155}
156 156
157static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 157static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
158 struct kprobe_ctlblk *kcb) 158 struct kprobe_ctlblk *kcb)
159{ 159{
160 __get_cpu_var(current_kprobe) = p; 160 __get_cpu_var(current_kprobe) = p;
@@ -164,7 +164,7 @@ static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
164 kcb->kprobe_saved_eflags &= ~IF_MASK; 164 kcb->kprobe_saved_eflags &= ~IF_MASK;
165} 165}
166 166
167static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 167static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
168{ 168{
169 regs->eflags |= TF_MASK; 169 regs->eflags |= TF_MASK;
170 regs->eflags &= ~IF_MASK; 170 regs->eflags &= ~IF_MASK;
@@ -242,10 +242,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
242 kcb->kprobe_status = KPROBE_REENTER; 242 kcb->kprobe_status = KPROBE_REENTER;
243 return 1; 243 return 1;
244 } else { 244 } else {
245 if (regs->eflags & VM_MASK) {
246 /* We are in virtual-8086 mode. Return 0 */
247 goto no_kprobe;
248 }
249 if (*addr != BREAKPOINT_INSTRUCTION) { 245 if (*addr != BREAKPOINT_INSTRUCTION) {
250 /* The breakpoint instruction was removed by 246 /* The breakpoint instruction was removed by
251 * another cpu right after we hit, no further 247 * another cpu right after we hit, no further
@@ -265,11 +261,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
265 261
266 p = get_kprobe(addr); 262 p = get_kprobe(addr);
267 if (!p) { 263 if (!p) {
268 if (regs->eflags & VM_MASK) {
269 /* We are in virtual-8086 mode. Return 0 */
270 goto no_kprobe;
271 }
272
273 if (*addr != BREAKPOINT_INSTRUCTION) { 264 if (*addr != BREAKPOINT_INSTRUCTION) {
274 /* 265 /*
275 * The breakpoint instruction was removed right 266 * The breakpoint instruction was removed right
@@ -452,10 +443,11 @@ static void __kprobes resume_execution(struct kprobe *p,
452 *tos &= ~(TF_MASK | IF_MASK); 443 *tos &= ~(TF_MASK | IF_MASK);
453 *tos |= kcb->kprobe_old_eflags; 444 *tos |= kcb->kprobe_old_eflags;
454 break; 445 break;
455 case 0xc3: /* ret/lret */ 446 case 0xc2: /* iret/ret/lret */
456 case 0xcb: 447 case 0xc3:
457 case 0xc2:
458 case 0xca: 448 case 0xca:
449 case 0xcb:
450 case 0xcf:
459 case 0xea: /* jmp absolute -- eip is correct */ 451 case 0xea: /* jmp absolute -- eip is correct */
460 /* eip is already adjusted, no more changes required */ 452 /* eip is already adjusted, no more changes required */
461 p->ainsn.boostable = 1; 453 p->ainsn.boostable = 1;
@@ -463,10 +455,13 @@ static void __kprobes resume_execution(struct kprobe *p,
463 case 0xe8: /* call relative - Fix return addr */ 455 case 0xe8: /* call relative - Fix return addr */
464 *tos = orig_eip + (*tos - copy_eip); 456 *tos = orig_eip + (*tos - copy_eip);
465 break; 457 break;
458 case 0x9a: /* call absolute -- same as call absolute, indirect */
459 *tos = orig_eip + (*tos - copy_eip);
460 goto no_change;
466 case 0xff: 461 case 0xff:
467 if ((p->ainsn.insn[1] & 0x30) == 0x10) { 462 if ((p->ainsn.insn[1] & 0x30) == 0x10) {
468 /* call absolute, indirect */
469 /* 463 /*
464 * call absolute, indirect
470 * Fix return addr; eip is correct. 465 * Fix return addr; eip is correct.
471 * But this is not boostable 466 * But this is not boostable
472 */ 467 */
@@ -507,7 +502,7 @@ no_change:
507 * Interrupts are disabled on entry as trap1 is an interrupt gate and they 502 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
508 * remain disabled thoroughout this function. 503 * remain disabled thoroughout this function.
509 */ 504 */
510static inline int post_kprobe_handler(struct pt_regs *regs) 505static int __kprobes post_kprobe_handler(struct pt_regs *regs)
511{ 506{
512 struct kprobe *cur = kprobe_running(); 507 struct kprobe *cur = kprobe_running();
513 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 508 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -543,7 +538,7 @@ out:
543 return 1; 538 return 1;
544} 539}
545 540
546static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 541static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
547{ 542{
548 struct kprobe *cur = kprobe_running(); 543 struct kprobe *cur = kprobe_running();
549 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 544 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 34d21e21e012..6b1392d33ed5 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -1130,7 +1130,17 @@ int mp_register_gsi (u32 gsi, int triggering, int polarity)
1130 */ 1130 */
1131 int irq = gsi; 1131 int irq = gsi;
1132 if (gsi < MAX_GSI_NUM) { 1132 if (gsi < MAX_GSI_NUM) {
1133 if (gsi > 15) 1133 /*
1134 * Retain the VIA chipset work-around (gsi > 15), but
1135 * avoid a problem where the 8254 timer (IRQ0) is setup
1136 * via an override (so it's not on pin 0 of the ioapic),
1137 * and at the same time, the pin 0 interrupt is a PCI
1138 * type. The gsi > 15 test could cause these two pins
1139 * to be shared as IRQ0, and they are not shareable.
1140 * So test for this condition, and if necessary, avoid
1141 * the pin collision.
1142 */
1143 if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
1134 gsi = pci_irq++; 1144 gsi = pci_irq++;
1135 /* 1145 /*
1136 * Don't assign IRQ used by ACPI SCI 1146 * Don't assign IRQ used by ACPI SCI
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c
index 1d0a55e68760..7a328230e540 100644
--- a/arch/i386/kernel/msr.c
+++ b/arch/i386/kernel/msr.c
@@ -251,7 +251,7 @@ static int msr_class_device_create(int i)
251 return err; 251 return err;
252} 252}
253 253
254static int __devinit msr_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 254static int msr_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
255{ 255{
256 unsigned int cpu = (unsigned long)hcpu; 256 unsigned int cpu = (unsigned long)hcpu;
257 257
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index 506462ef36a0..fd7eaf7866e0 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -671,7 +671,7 @@ int do_syscall_trace(struct pt_regs *regs, int entryexit)
671 671
672 if (unlikely(current->audit_context)) { 672 if (unlikely(current->audit_context)) {
673 if (entryexit) 673 if (entryexit)
674 audit_syscall_exit(current, AUDITSC_RESULT(regs->eax), 674 audit_syscall_exit(AUDITSC_RESULT(regs->eax),
675 regs->eax); 675 regs->eax);
676 /* Debug traps, when using PTRACE_SINGLESTEP, must be sent only 676 /* Debug traps, when using PTRACE_SINGLESTEP, must be sent only
677 * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is 677 * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is
@@ -720,14 +720,13 @@ int do_syscall_trace(struct pt_regs *regs, int entryexit)
720 ret = is_sysemu; 720 ret = is_sysemu;
721out: 721out:
722 if (unlikely(current->audit_context) && !entryexit) 722 if (unlikely(current->audit_context) && !entryexit)
723 audit_syscall_entry(current, AUDIT_ARCH_I386, regs->orig_eax, 723 audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_eax,
724 regs->ebx, regs->ecx, regs->edx, regs->esi); 724 regs->ebx, regs->ecx, regs->edx, regs->esi);
725 if (ret == 0) 725 if (ret == 0)
726 return 0; 726 return 0;
727 727
728 regs->orig_eax = -1; /* force skip of syscall restarting */ 728 regs->orig_eax = -1; /* force skip of syscall restarting */
729 if (unlikely(current->audit_context)) 729 if (unlikely(current->audit_context))
730 audit_syscall_exit(current, AUDITSC_RESULT(regs->eax), 730 audit_syscall_exit(AUDITSC_RESULT(regs->eax), regs->eax);
731 regs->eax);
732 return 1; 731 return 1;
733} 732}
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 80cb3b2d0997..dd6b0e3386ce 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -970,8 +970,10 @@ efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
970 * not-overlapping, which is the case 970 * not-overlapping, which is the case
971 */ 971 */
972int __init 972int __init
973e820_all_mapped(unsigned long start, unsigned long end, unsigned type) 973e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
974{ 974{
975 u64 start = s;
976 u64 end = e;
975 int i; 977 int i;
976 for (i = 0; i < e820.nr_map; i++) { 978 for (i = 0; i < e820.nr_map; i++) {
977 struct e820entry *ei = &e820.map[i]; 979 struct e820entry *ei = &e820.map[i];
@@ -1318,6 +1320,8 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat
1318 probe_roms(); 1320 probe_roms();
1319 for (i = 0; i < e820.nr_map; i++) { 1321 for (i = 0; i < e820.nr_map; i++) {
1320 struct resource *res; 1322 struct resource *res;
1323 if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
1324 continue;
1321 res = kzalloc(sizeof(struct resource), GFP_ATOMIC); 1325 res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
1322 switch (e820.map[i].type) { 1326 switch (e820.map[i].type) {
1323 case E820_RAM: res->name = "System RAM"; break; 1327 case E820_RAM: res->name = "System RAM"; break;
@@ -1543,15 +1547,18 @@ void __init setup_arch(char **cmdline_p)
1543 if (efi_enabled) 1547 if (efi_enabled)
1544 efi_map_memmap(); 1548 efi_map_memmap();
1545 1549
1546#ifdef CONFIG_X86_IO_APIC
1547 check_acpi_pci(); /* Checks more than just ACPI actually */
1548#endif
1549
1550#ifdef CONFIG_ACPI 1550#ifdef CONFIG_ACPI
1551 /* 1551 /*
1552 * Parse the ACPI tables for possible boot-time SMP configuration. 1552 * Parse the ACPI tables for possible boot-time SMP configuration.
1553 */ 1553 */
1554 acpi_boot_table_init(); 1554 acpi_boot_table_init();
1555#endif
1556
1557#ifdef CONFIG_X86_IO_APIC
1558 check_acpi_pci(); /* Checks more than just ACPI actually */
1559#endif
1560
1561#ifdef CONFIG_ACPI
1555 acpi_boot_init(); 1562 acpi_boot_init();
1556 1563
1557#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC) 1564#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index a6969903f2d6..825b2b4ca721 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -313,7 +313,9 @@ static void __init synchronize_tsc_bp (void)
313 if (tsc_values[i] < avg) 313 if (tsc_values[i] < avg)
314 realdelta = -realdelta; 314 realdelta = -realdelta;
315 315
316 printk(KERN_INFO "CPU#%d had %ld usecs TSC skew, fixed it up.\n", i, realdelta); 316 if (realdelta > 0)
317 printk(KERN_INFO "CPU#%d had %ld usecs TSC "
318 "skew, fixed it up.\n", i, realdelta);
317 } 319 }
318 320
319 sum += delta; 321 sum += delta;
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S
index 4f58b9c0efe3..af56987f69b0 100644
--- a/arch/i386/kernel/syscall_table.S
+++ b/arch/i386/kernel/syscall_table.S
@@ -314,3 +314,5 @@ ENTRY(sys_call_table)
314 .long sys_get_robust_list 314 .long sys_get_robust_list
315 .long sys_splice 315 .long sys_splice
316 .long sys_sync_file_range 316 .long sys_sync_file_range
317 .long sys_tee /* 315 */
318 .long sys_vmsplice
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
index 5e41ee29c8cf..f1187ddb0d0f 100644
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ b/arch/i386/kernel/timers/timer_tsc.c
@@ -279,7 +279,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
279{ 279{
280 struct cpufreq_freqs *freq = data; 280 struct cpufreq_freqs *freq = data;
281 281
282 if (val != CPUFREQ_RESUMECHANGE) 282 if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
283 write_seqlock_irq(&xtime_lock); 283 write_seqlock_irq(&xtime_lock);
284 if (!ref_freq) { 284 if (!ref_freq) {
285 if (!freq->old){ 285 if (!freq->old){
@@ -312,7 +312,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
312 } 312 }
313 313
314end: 314end:
315 if (val != CPUFREQ_RESUMECHANGE) 315 if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
316 write_sequnlock_irq(&xtime_lock); 316 write_sequnlock_irq(&xtime_lock);
317 317
318 return 0; 318 return 0;
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 2d22f5761b1d..0e498369f35e 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -130,9 +130,8 @@ static inline int print_addr_and_symbol(unsigned long addr, char *log_lvl,
130 print_symbol("%s", addr); 130 print_symbol("%s", addr);
131 131
132 printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS; 132 printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS;
133
134 if (printed) 133 if (printed)
135 printk(" "); 134 printk(" ");
136 else 135 else
137 printk("\n"); 136 printk("\n");
138 137
@@ -212,7 +211,6 @@ static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp,
212 } 211 }
213 212
214 stack = esp; 213 stack = esp;
215 printk(log_lvl);
216 for(i = 0; i < kstack_depth_to_print; i++) { 214 for(i = 0; i < kstack_depth_to_print; i++) {
217 if (kstack_end(stack)) 215 if (kstack_end(stack))
218 break; 216 break;
diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c
index aee14fafd13d..00e0118e717c 100644
--- a/arch/i386/kernel/vm86.c
+++ b/arch/i386/kernel/vm86.c
@@ -312,7 +312,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
312 312
313 /*call audit_syscall_exit since we do not exit via the normal paths */ 313 /*call audit_syscall_exit since we do not exit via the normal paths */
314 if (unlikely(current->audit_context)) 314 if (unlikely(current->audit_context))
315 audit_syscall_exit(current, AUDITSC_RESULT(eax), eax); 315 audit_syscall_exit(AUDITSC_RESULT(eax), eax);
316 316
317 __asm__ __volatile__( 317 __asm__ __volatile__(
318 "movl %0,%%esp\n\t" 318 "movl %0,%%esp\n\t"