aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/Makefile2
-rw-r--r--arch/i386/kernel/acpi/boot.c11
-rw-r--r--arch/i386/kernel/apic.c23
-rw-r--r--arch/i386/kernel/apm.c2
-rw-r--r--arch/i386/kernel/cpu/amd.c4
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c22
-rw-r--r--arch/i386/kernel/cpuid.c2
-rw-r--r--arch/i386/kernel/dmi_scan.c358
-rw-r--r--arch/i386/kernel/i386_ksyms.c1
-rw-r--r--arch/i386/kernel/kprobes.c18
-rw-r--r--arch/i386/kernel/mpparse.c43
-rw-r--r--arch/i386/kernel/msr.c2
-rw-r--r--arch/i386/kernel/reboot_fixups.c2
-rw-r--r--arch/i386/kernel/setup.c36
-rw-r--r--arch/i386/kernel/syscall_table.S1
-rw-r--r--arch/i386/kernel/traps.c16
16 files changed, 120 insertions, 423 deletions
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 5b9ed21216cf..96fb8a020af2 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds
6 6
7obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ 7obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ 8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
9 pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \ 9 pci-dma.o i386_ksyms.o i387.o bootflag.o \
10 quirks.o i8237.o topology.o alternative.o 10 quirks.o i8237.o topology.o alternative.o
11 11
12obj-y += cpu/ 12obj-y += cpu/
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index 033066176b3e..049a25583793 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -215,7 +215,7 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
215{ 215{
216 struct acpi_table_madt *madt = NULL; 216 struct acpi_table_madt *madt = NULL;
217 217
218 if (!phys_addr || !size) 218 if (!phys_addr || !size || !cpu_has_apic)
219 return -EINVAL; 219 return -EINVAL;
220 220
221 madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size); 221 madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
@@ -693,6 +693,9 @@ static int __init acpi_parse_madt_lapic_entries(void)
693{ 693{
694 int count; 694 int count;
695 695
696 if (!cpu_has_apic)
697 return -ENODEV;
698
696 /* 699 /*
697 * Note that the LAPIC address is obtained from the MADT (32-bit value) 700 * Note that the LAPIC address is obtained from the MADT (32-bit value)
698 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). 701 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
@@ -751,6 +754,9 @@ static int __init acpi_parse_madt_ioapic_entries(void)
751 return -ENODEV; 754 return -ENODEV;
752 } 755 }
753 756
757 if (!cpu_has_apic)
758 return -ENODEV;
759
754 /* 760 /*
755 * if "noapic" boot option, don't look for IO-APICs 761 * if "noapic" boot option, don't look for IO-APICs
756 */ 762 */
@@ -1096,6 +1102,9 @@ int __init acpi_boot_table_init(void)
1096 dmi_check_system(acpi_dmi_table); 1102 dmi_check_system(acpi_dmi_table);
1097#endif 1103#endif
1098 1104
1105 if (!cpu_has_apic)
1106 return -ENODEV;
1107
1099 /* 1108 /*
1100 * If acpi_disabled, bail out 1109 * If acpi_disabled, bail out
1101 * One exception: acpi=ht continues far enough to enumerate LAPICs 1110 * One exception: acpi=ht continues far enough to enumerate LAPICs
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 6273bf74c203..254cee9f0b7b 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -62,6 +62,18 @@ int apic_verbosity;
62 62
63static void apic_pm_activate(void); 63static void apic_pm_activate(void);
64 64
65int modern_apic(void)
66{
67 unsigned int lvr, version;
68 /* AMD systems use old APIC versions, so check the CPU */
69 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
70 boot_cpu_data.x86 >= 0xf)
71 return 1;
72 lvr = apic_read(APIC_LVR);
73 version = GET_APIC_VERSION(lvr);
74 return version >= 0x14;
75}
76
65/* 77/*
66 * 'what should we do if we get a hw irq event on an illegal vector'. 78 * 'what should we do if we get a hw irq event on an illegal vector'.
67 * each architecture has to answer this themselves. 79 * each architecture has to answer this themselves.
@@ -119,10 +131,7 @@ void enable_NMI_through_LVT0 (void * dummy)
119 131
120int get_physical_broadcast(void) 132int get_physical_broadcast(void)
121{ 133{
122 unsigned int lvr, version; 134 if (modern_apic())
123 lvr = apic_read(APIC_LVR);
124 version = GET_APIC_VERSION(lvr);
125 if (!APIC_INTEGRATED(version) || version >= 0x14)
126 return 0xff; 135 return 0xff;
127 else 136 else
128 return 0xf; 137 return 0xf;
@@ -349,9 +358,9 @@ int __init verify_local_APIC(void)
349 358
350void __init sync_Arb_IDs(void) 359void __init sync_Arb_IDs(void)
351{ 360{
352 /* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 */ 361 /* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1
353 unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); 362 And not needed on AMD */
354 if (ver >= 0x14) /* P4 or higher */ 363 if (modern_apic())
355 return; 364 return;
356 /* 365 /*
357 * Wait for idle. 366 * Wait for idle.
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index da30a374dd4e..df0e1745f189 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -1079,7 +1079,7 @@ static int apm_console_blank(int blank)
1079 break; 1079 break;
1080 } 1080 }
1081 1081
1082 if (error == APM_NOT_ENGAGED && state != APM_STATE_READY) { 1082 if (error == APM_NOT_ENGAGED) {
1083 static int tried; 1083 static int tried;
1084 int eng_error; 1084 int eng_error;
1085 if (tried++ == 0) { 1085 if (tried++ == 0) {
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index 0810f81f2a05..786d1a57048b 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -207,13 +207,13 @@ static void __init init_amd(struct cpuinfo_x86 *c)
207 set_bit(X86_FEATURE_K7, c->x86_capability); 207 set_bit(X86_FEATURE_K7, c->x86_capability);
208 break; 208 break;
209 } 209 }
210 if (c->x86 >= 6)
211 set_bit(X86_FEATURE_FXSAVE_LEAK, c->x86_capability);
210 212
211 display_cacheinfo(c); 213 display_cacheinfo(c);
212 214
213 if (cpuid_eax(0x80000000) >= 0x80000008) { 215 if (cpuid_eax(0x80000000) >= 0x80000008) {
214 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; 216 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
215 if (c->x86_max_cores & (c->x86_max_cores - 1))
216 c->x86_max_cores = 1;
217 } 217 }
218 218
219 if (cpuid_eax(0x80000000) >= 0x80000007) { 219 if (cpuid_eax(0x80000000) >= 0x80000007) {
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index 712a26bd4457..71fffa174425 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -46,7 +46,7 @@
46 46
47#define PFX "powernow-k8: " 47#define PFX "powernow-k8: "
48#define BFX PFX "BIOS error: " 48#define BFX PFX "BIOS error: "
49#define VERSION "version 1.60.1" 49#define VERSION "version 1.60.2"
50#include "powernow-k8.h" 50#include "powernow-k8.h"
51 51
52/* serialize freq changes */ 52/* serialize freq changes */
@@ -55,7 +55,7 @@ static DEFINE_MUTEX(fidvid_mutex);
55static struct powernow_k8_data *powernow_data[NR_CPUS]; 55static struct powernow_k8_data *powernow_data[NR_CPUS];
56 56
57#ifndef CONFIG_SMP 57#ifndef CONFIG_SMP
58static cpumask_t cpu_core_map[1] = { CPU_MASK_ALL }; 58static cpumask_t cpu_core_map[1];
59#endif 59#endif
60 60
61/* Return a frequency in MHz, given an input fid */ 61/* Return a frequency in MHz, given an input fid */
@@ -905,11 +905,17 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
905{ 905{
906 cpumask_t oldmask = CPU_MASK_ALL; 906 cpumask_t oldmask = CPU_MASK_ALL;
907 struct powernow_k8_data *data = powernow_data[pol->cpu]; 907 struct powernow_k8_data *data = powernow_data[pol->cpu];
908 u32 checkfid = data->currfid; 908 u32 checkfid;
909 u32 checkvid = data->currvid; 909 u32 checkvid;
910 unsigned int newstate; 910 unsigned int newstate;
911 int ret = -EIO; 911 int ret = -EIO;
912 912
913 if (!data)
914 return -EINVAL;
915
916 checkfid = data->currfid;
917 checkvid = data->currvid;
918
913 /* only run on specific CPU from here on */ 919 /* only run on specific CPU from here on */
914 oldmask = current->cpus_allowed; 920 oldmask = current->cpus_allowed;
915 set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); 921 set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
@@ -969,6 +975,9 @@ static int powernowk8_verify(struct cpufreq_policy *pol)
969{ 975{
970 struct powernow_k8_data *data = powernow_data[pol->cpu]; 976 struct powernow_k8_data *data = powernow_data[pol->cpu];
971 977
978 if (!data)
979 return -EINVAL;
980
972 return cpufreq_frequency_table_verify(pol, data->powernow_table); 981 return cpufreq_frequency_table_verify(pol, data->powernow_table);
973} 982}
974 983
@@ -977,7 +986,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
977{ 986{
978 struct powernow_k8_data *data; 987 struct powernow_k8_data *data;
979 cpumask_t oldmask = CPU_MASK_ALL; 988 cpumask_t oldmask = CPU_MASK_ALL;
980 int rc, i; 989 int rc;
981 990
982 if (!cpu_online(pol->cpu)) 991 if (!cpu_online(pol->cpu))
983 return -ENODEV; 992 return -ENODEV;
@@ -1063,8 +1072,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1063 printk("cpu_init done, current fid 0x%x, vid 0x%x\n", 1072 printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
1064 data->currfid, data->currvid); 1073 data->currfid, data->currvid);
1065 1074
1066 for_each_cpu_mask(i, cpu_core_map[pol->cpu]) 1075 powernow_data[pol->cpu] = data;
1067 powernow_data[i] = data;
1068 1076
1069 return 0; 1077 return 0;
1070 1078
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
index 006141d1c12a..1d9a4abcdfc7 100644
--- a/arch/i386/kernel/cpuid.c
+++ b/arch/i386/kernel/cpuid.c
@@ -168,7 +168,7 @@ static int cpuid_class_device_create(int i)
168 return err; 168 return err;
169} 169}
170 170
171static int __devinit cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 171static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
172{ 172{
173 unsigned int cpu = (unsigned long)hcpu; 173 unsigned int cpu = (unsigned long)hcpu;
174 174
diff --git a/arch/i386/kernel/dmi_scan.c b/arch/i386/kernel/dmi_scan.c
deleted file mode 100644
index 5efceebc48dc..000000000000
--- a/arch/i386/kernel/dmi_scan.c
+++ /dev/null
@@ -1,358 +0,0 @@
1#include <linux/types.h>
2#include <linux/string.h>
3#include <linux/init.h>
4#include <linux/module.h>
5#include <linux/dmi.h>
6#include <linux/efi.h>
7#include <linux/bootmem.h>
8#include <linux/slab.h>
9#include <asm/dmi.h>
10
11static char * __init dmi_string(struct dmi_header *dm, u8 s)
12{
13 u8 *bp = ((u8 *) dm) + dm->length;
14 char *str = "";
15
16 if (s) {
17 s--;
18 while (s > 0 && *bp) {
19 bp += strlen(bp) + 1;
20 s--;
21 }
22
23 if (*bp != 0) {
24 str = dmi_alloc(strlen(bp) + 1);
25 if (str != NULL)
26 strcpy(str, bp);
27 else
28 printk(KERN_ERR "dmi_string: out of memory.\n");
29 }
30 }
31
32 return str;
33}
34
35/*
36 * We have to be cautious here. We have seen BIOSes with DMI pointers
37 * pointing to completely the wrong place for example
38 */
39static int __init dmi_table(u32 base, int len, int num,
40 void (*decode)(struct dmi_header *))
41{
42 u8 *buf, *data;
43 int i = 0;
44
45 buf = dmi_ioremap(base, len);
46 if (buf == NULL)
47 return -1;
48
49 data = buf;
50
51 /*
52 * Stop when we see all the items the table claimed to have
53 * OR we run off the end of the table (also happens)
54 */
55 while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) {
56 struct dmi_header *dm = (struct dmi_header *)data;
57 /*
58 * We want to know the total length (formated area and strings)
59 * before decoding to make sure we won't run off the table in
60 * dmi_decode or dmi_string
61 */
62 data += dm->length;
63 while ((data - buf < len - 1) && (data[0] || data[1]))
64 data++;
65 if (data - buf < len - 1)
66 decode(dm);
67 data += 2;
68 i++;
69 }
70 dmi_iounmap(buf, len);
71 return 0;
72}
73
74static int __init dmi_checksum(u8 *buf)
75{
76 u8 sum = 0;
77 int a;
78
79 for (a = 0; a < 15; a++)
80 sum += buf[a];
81
82 return sum == 0;
83}
84
85static char *dmi_ident[DMI_STRING_MAX];
86static LIST_HEAD(dmi_devices);
87
88/*
89 * Save a DMI string
90 */
91static void __init dmi_save_ident(struct dmi_header *dm, int slot, int string)
92{
93 char *p, *d = (char*) dm;
94
95 if (dmi_ident[slot])
96 return;
97
98 p = dmi_string(dm, d[string]);
99 if (p == NULL)
100 return;
101
102 dmi_ident[slot] = p;
103}
104
105static void __init dmi_save_devices(struct dmi_header *dm)
106{
107 int i, count = (dm->length - sizeof(struct dmi_header)) / 2;
108 struct dmi_device *dev;
109
110 for (i = 0; i < count; i++) {
111 char *d = (char *)(dm + 1) + (i * 2);
112
113 /* Skip disabled device */
114 if ((*d & 0x80) == 0)
115 continue;
116
117 dev = dmi_alloc(sizeof(*dev));
118 if (!dev) {
119 printk(KERN_ERR "dmi_save_devices: out of memory.\n");
120 break;
121 }
122
123 dev->type = *d++ & 0x7f;
124 dev->name = dmi_string(dm, *d);
125 dev->device_data = NULL;
126
127 list_add(&dev->list, &dmi_devices);
128 }
129}
130
131static void __init dmi_save_ipmi_device(struct dmi_header *dm)
132{
133 struct dmi_device *dev;
134 void * data;
135
136 data = dmi_alloc(dm->length);
137 if (data == NULL) {
138 printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
139 return;
140 }
141
142 memcpy(data, dm, dm->length);
143
144 dev = dmi_alloc(sizeof(*dev));
145 if (!dev) {
146 printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
147 return;
148 }
149
150 dev->type = DMI_DEV_TYPE_IPMI;
151 dev->name = "IPMI controller";
152 dev->device_data = data;
153
154 list_add(&dev->list, &dmi_devices);
155}
156
157/*
158 * Process a DMI table entry. Right now all we care about are the BIOS
159 * and machine entries. For 2.5 we should pull the smbus controller info
160 * out of here.
161 */
162static void __init dmi_decode(struct dmi_header *dm)
163{
164 switch(dm->type) {
165 case 0: /* BIOS Information */
166 dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
167 dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
168 dmi_save_ident(dm, DMI_BIOS_DATE, 8);
169 break;
170 case 1: /* System Information */
171 dmi_save_ident(dm, DMI_SYS_VENDOR, 4);
172 dmi_save_ident(dm, DMI_PRODUCT_NAME, 5);
173 dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
174 dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
175 break;
176 case 2: /* Base Board Information */
177 dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
178 dmi_save_ident(dm, DMI_BOARD_NAME, 5);
179 dmi_save_ident(dm, DMI_BOARD_VERSION, 6);
180 break;
181 case 10: /* Onboard Devices Information */
182 dmi_save_devices(dm);
183 break;
184 case 38: /* IPMI Device Information */
185 dmi_save_ipmi_device(dm);
186 }
187}
188
189static int __init dmi_present(char __iomem *p)
190{
191 u8 buf[15];
192 memcpy_fromio(buf, p, 15);
193 if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) {
194 u16 num = (buf[13] << 8) | buf[12];
195 u16 len = (buf[7] << 8) | buf[6];
196 u32 base = (buf[11] << 24) | (buf[10] << 16) |
197 (buf[9] << 8) | buf[8];
198
199 /*
200 * DMI version 0.0 means that the real version is taken from
201 * the SMBIOS version, which we don't know at this point.
202 */
203 if (buf[14] != 0)
204 printk(KERN_INFO "DMI %d.%d present.\n",
205 buf[14] >> 4, buf[14] & 0xF);
206 else
207 printk(KERN_INFO "DMI present.\n");
208 if (dmi_table(base,len, num, dmi_decode) == 0)
209 return 0;
210 }
211 return 1;
212}
213
214void __init dmi_scan_machine(void)
215{
216 char __iomem *p, *q;
217 int rc;
218
219 if (efi_enabled) {
220 if (efi.smbios == EFI_INVALID_TABLE_ADDR)
221 goto out;
222
223 /* This is called as a core_initcall() because it isn't
224 * needed during early boot. This also means we can
225 * iounmap the space when we're done with it.
226 */
227 p = dmi_ioremap(efi.smbios, 32);
228 if (p == NULL)
229 goto out;
230
231 rc = dmi_present(p + 0x10); /* offset of _DMI_ string */
232 dmi_iounmap(p, 32);
233 if (!rc)
234 return;
235 }
236 else {
237 /*
238 * no iounmap() for that ioremap(); it would be a no-op, but
239 * it's so early in setup that sucker gets confused into doing
240 * what it shouldn't if we actually call it.
241 */
242 p = dmi_ioremap(0xF0000, 0x10000);
243 if (p == NULL)
244 goto out;
245
246 for (q = p; q < p + 0x10000; q += 16) {
247 rc = dmi_present(q);
248 if (!rc)
249 return;
250 }
251 }
252 out: printk(KERN_INFO "DMI not present or invalid.\n");
253}
254
255/**
256 * dmi_check_system - check system DMI data
257 * @list: array of dmi_system_id structures to match against
258 *
259 * Walk the blacklist table running matching functions until someone
260 * returns non zero or we hit the end. Callback function is called for
261 * each successfull match. Returns the number of matches.
262 */
263int dmi_check_system(struct dmi_system_id *list)
264{
265 int i, count = 0;
266 struct dmi_system_id *d = list;
267
268 while (d->ident) {
269 for (i = 0; i < ARRAY_SIZE(d->matches); i++) {
270 int s = d->matches[i].slot;
271 if (s == DMI_NONE)
272 continue;
273 if (dmi_ident[s] && strstr(dmi_ident[s], d->matches[i].substr))
274 continue;
275 /* No match */
276 goto fail;
277 }
278 count++;
279 if (d->callback && d->callback(d))
280 break;
281fail: d++;
282 }
283
284 return count;
285}
286EXPORT_SYMBOL(dmi_check_system);
287
288/**
289 * dmi_get_system_info - return DMI data value
290 * @field: data index (see enum dmi_filed)
291 *
292 * Returns one DMI data value, can be used to perform
293 * complex DMI data checks.
294 */
295char *dmi_get_system_info(int field)
296{
297 return dmi_ident[field];
298}
299EXPORT_SYMBOL(dmi_get_system_info);
300
301/**
302 * dmi_find_device - find onboard device by type/name
303 * @type: device type or %DMI_DEV_TYPE_ANY to match all device types
304 * @desc: device name string or %NULL to match all
305 * @from: previous device found in search, or %NULL for new search.
306 *
307 * Iterates through the list of known onboard devices. If a device is
308 * found with a matching @vendor and @device, a pointer to its device
309 * structure is returned. Otherwise, %NULL is returned.
310 * A new search is initiated by passing %NULL to the @from argument.
311 * If @from is not %NULL, searches continue from next device.
312 */
313struct dmi_device * dmi_find_device(int type, const char *name,
314 struct dmi_device *from)
315{
316 struct list_head *d, *head = from ? &from->list : &dmi_devices;
317
318 for(d = head->next; d != &dmi_devices; d = d->next) {
319 struct dmi_device *dev = list_entry(d, struct dmi_device, list);
320
321 if (((type == DMI_DEV_TYPE_ANY) || (dev->type == type)) &&
322 ((name == NULL) || (strcmp(dev->name, name) == 0)))
323 return dev;
324 }
325
326 return NULL;
327}
328EXPORT_SYMBOL(dmi_find_device);
329
330/**
331 * dmi_get_year - Return year of a DMI date
332 * @field: data index (like dmi_get_system_info)
333 *
334 * Returns -1 when the field doesn't exist. 0 when it is broken.
335 */
336int dmi_get_year(int field)
337{
338 int year;
339 char *s = dmi_get_system_info(field);
340
341 if (!s)
342 return -1;
343 if (*s == '\0')
344 return 0;
345 s = strrchr(s, '/');
346 if (!s)
347 return 0;
348
349 s += 1;
350 year = simple_strtoul(s, NULL, 0);
351 if (year && year < 100) { /* 2-digit year */
352 year += 1900;
353 if (year < 1996) /* no dates < spec 1.0 */
354 year += 100;
355 }
356
357 return year;
358}
diff --git a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c
index 055325056a74..036a9857936f 100644
--- a/arch/i386/kernel/i386_ksyms.c
+++ b/arch/i386/kernel/i386_ksyms.c
@@ -19,7 +19,6 @@ EXPORT_SYMBOL(__put_user_2);
19EXPORT_SYMBOL(__put_user_4); 19EXPORT_SYMBOL(__put_user_4);
20EXPORT_SYMBOL(__put_user_8); 20EXPORT_SYMBOL(__put_user_8);
21 21
22EXPORT_SYMBOL(strpbrk);
23EXPORT_SYMBOL(strstr); 22EXPORT_SYMBOL(strstr);
24 23
25#ifdef CONFIG_SMP 24#ifdef CONFIG_SMP
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index f19768789e8a..043f5292e70a 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -43,7 +43,7 @@ DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
43DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 43DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
44 44
45/* insert a jmp code */ 45/* insert a jmp code */
46static inline void set_jmp_op(void *from, void *to) 46static __always_inline void set_jmp_op(void *from, void *to)
47{ 47{
48 struct __arch_jmp_op { 48 struct __arch_jmp_op {
49 char op; 49 char op;
@@ -57,7 +57,7 @@ static inline void set_jmp_op(void *from, void *to)
57/* 57/*
58 * returns non-zero if opcodes can be boosted. 58 * returns non-zero if opcodes can be boosted.
59 */ 59 */
60static inline int can_boost(kprobe_opcode_t opcode) 60static __always_inline int can_boost(kprobe_opcode_t opcode)
61{ 61{
62 switch (opcode & 0xf0 ) { 62 switch (opcode & 0xf0 ) {
63 case 0x70: 63 case 0x70:
@@ -88,7 +88,7 @@ static inline int can_boost(kprobe_opcode_t opcode)
88/* 88/*
89 * returns non-zero if opcode modifies the interrupt flag. 89 * returns non-zero if opcode modifies the interrupt flag.
90 */ 90 */
91static inline int is_IF_modifier(kprobe_opcode_t opcode) 91static int __kprobes is_IF_modifier(kprobe_opcode_t opcode)
92{ 92{
93 switch (opcode) { 93 switch (opcode) {
94 case 0xfa: /* cli */ 94 case 0xfa: /* cli */
@@ -138,7 +138,7 @@ void __kprobes arch_remove_kprobe(struct kprobe *p)
138 mutex_unlock(&kprobe_mutex); 138 mutex_unlock(&kprobe_mutex);
139} 139}
140 140
141static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) 141static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
142{ 142{
143 kcb->prev_kprobe.kp = kprobe_running(); 143 kcb->prev_kprobe.kp = kprobe_running();
144 kcb->prev_kprobe.status = kcb->kprobe_status; 144 kcb->prev_kprobe.status = kcb->kprobe_status;
@@ -146,7 +146,7 @@ static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
146 kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags; 146 kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags;
147} 147}
148 148
149static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) 149static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
150{ 150{
151 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 151 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
152 kcb->kprobe_status = kcb->prev_kprobe.status; 152 kcb->kprobe_status = kcb->prev_kprobe.status;
@@ -154,7 +154,7 @@ static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
154 kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags; 154 kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags;
155} 155}
156 156
157static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 157static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
158 struct kprobe_ctlblk *kcb) 158 struct kprobe_ctlblk *kcb)
159{ 159{
160 __get_cpu_var(current_kprobe) = p; 160 __get_cpu_var(current_kprobe) = p;
@@ -164,7 +164,7 @@ static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
164 kcb->kprobe_saved_eflags &= ~IF_MASK; 164 kcb->kprobe_saved_eflags &= ~IF_MASK;
165} 165}
166 166
167static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 167static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
168{ 168{
169 regs->eflags |= TF_MASK; 169 regs->eflags |= TF_MASK;
170 regs->eflags &= ~IF_MASK; 170 regs->eflags &= ~IF_MASK;
@@ -507,7 +507,7 @@ no_change:
507 * Interrupts are disabled on entry as trap1 is an interrupt gate and they 507 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
508 * remain disabled thoroughout this function. 508 * remain disabled thoroughout this function.
509 */ 509 */
510static inline int post_kprobe_handler(struct pt_regs *regs) 510static int __kprobes post_kprobe_handler(struct pt_regs *regs)
511{ 511{
512 struct kprobe *cur = kprobe_running(); 512 struct kprobe *cur = kprobe_running();
513 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 513 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -543,7 +543,7 @@ out:
543 return 1; 543 return 1;
544} 544}
545 545
546static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 546static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
547{ 547{
548 struct kprobe *cur = kprobe_running(); 548 struct kprobe *cur = kprobe_running();
549 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 549 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 8d8aa9d1796d..34d21e21e012 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -38,12 +38,6 @@
38int smp_found_config; 38int smp_found_config;
39unsigned int __initdata maxcpus = NR_CPUS; 39unsigned int __initdata maxcpus = NR_CPUS;
40 40
41#ifdef CONFIG_HOTPLUG_CPU
42#define CPU_HOTPLUG_ENABLED (1)
43#else
44#define CPU_HOTPLUG_ENABLED (0)
45#endif
46
47/* 41/*
48 * Various Linux-internal data structures created from the 42 * Various Linux-internal data structures created from the
49 * MP-table. 43 * MP-table.
@@ -110,21 +104,6 @@ static int __init mpf_checksum(unsigned char *mp, int len)
110static int mpc_record; 104static int mpc_record;
111static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata; 105static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata;
112 106
113#ifdef CONFIG_X86_NUMAQ
114static int MP_valid_apicid(int apicid, int version)
115{
116 return hweight_long(apicid & 0xf) == 1 && (apicid >> 4) != 0xf;
117}
118#else
119static int MP_valid_apicid(int apicid, int version)
120{
121 if (version >= 0x14)
122 return apicid < 0xff;
123 else
124 return apicid < 0xf;
125}
126#endif
127
128static void __devinit MP_processor_info (struct mpc_config_processor *m) 107static void __devinit MP_processor_info (struct mpc_config_processor *m)
129{ 108{
130 int ver, apicid; 109 int ver, apicid;
@@ -190,12 +169,6 @@ static void __devinit MP_processor_info (struct mpc_config_processor *m)
190 169
191 ver = m->mpc_apicver; 170 ver = m->mpc_apicver;
192 171
193 if (!MP_valid_apicid(apicid, ver)) {
194 printk(KERN_WARNING "Processor #%d INVALID. (Max ID: %d).\n",
195 m->mpc_apicid, MAX_APICS);
196 return;
197 }
198
199 /* 172 /*
200 * Validate version 173 * Validate version
201 */ 174 */
@@ -225,7 +198,14 @@ static void __devinit MP_processor_info (struct mpc_config_processor *m)
225 cpu_set(num_processors, cpu_possible_map); 198 cpu_set(num_processors, cpu_possible_map);
226 num_processors++; 199 num_processors++;
227 200
228 if (CPU_HOTPLUG_ENABLED || (num_processors > 8)) { 201 /*
202 * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
203 * but we need to work other dependencies like SMP_SUSPEND etc
204 * before this can be done without some confusion.
205 * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
206 * - Ashok Raj <ashok.raj@intel.com>
207 */
208 if (num_processors > 8) {
229 switch (boot_cpu_data.x86_vendor) { 209 switch (boot_cpu_data.x86_vendor) {
230 case X86_VENDOR_INTEL: 210 case X86_VENDOR_INTEL:
231 if (!APIC_XAPIC(ver)) { 211 if (!APIC_XAPIC(ver)) {
@@ -249,6 +229,13 @@ static void __init MP_bus_info (struct mpc_config_bus *m)
249 229
250 mpc_oem_bus_info(m, str, translation_table[mpc_record]); 230 mpc_oem_bus_info(m, str, translation_table[mpc_record]);
251 231
232 if (m->mpc_busid >= MAX_MP_BUSSES) {
233 printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
234 " is too large, max. supported is %d\n",
235 m->mpc_busid, str, MAX_MP_BUSSES - 1);
236 return;
237 }
238
252 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) { 239 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
253 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; 240 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
254 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) { 241 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c
index 1d0a55e68760..7a328230e540 100644
--- a/arch/i386/kernel/msr.c
+++ b/arch/i386/kernel/msr.c
@@ -251,7 +251,7 @@ static int msr_class_device_create(int i)
251 return err; 251 return err;
252} 252}
253 253
254static int __devinit msr_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 254static int msr_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
255{ 255{
256 unsigned int cpu = (unsigned long)hcpu; 256 unsigned int cpu = (unsigned long)hcpu;
257 257
diff --git a/arch/i386/kernel/reboot_fixups.c b/arch/i386/kernel/reboot_fixups.c
index 10e21a4773dd..99aab41a05b0 100644
--- a/arch/i386/kernel/reboot_fixups.c
+++ b/arch/i386/kernel/reboot_fixups.c
@@ -51,7 +51,5 @@ void mach_reboot_fixups(void)
51 51
52 cur->reboot_fixup(dev); 52 cur->reboot_fixup(dev);
53 } 53 }
54
55 printk(KERN_WARNING "No reboot fixup found for your hardware\n");
56} 54}
57 55
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index eacc3f0a2ea4..80cb3b2d0997 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -963,6 +963,36 @@ efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
963 return 0; 963 return 0;
964} 964}
965 965
966 /*
967 * This function checks if the entire range <start,end> is mapped with type.
968 *
969 * Note: this function only works correct if the e820 table is sorted and
970 * not-overlapping, which is the case
971 */
972int __init
973e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
974{
975 int i;
976 for (i = 0; i < e820.nr_map; i++) {
977 struct e820entry *ei = &e820.map[i];
978 if (type && ei->type != type)
979 continue;
980 /* is the region (part) in overlap with the current region ?*/
981 if (ei->addr >= end || ei->addr + ei->size <= start)
982 continue;
983 /* if the region is at the beginning of <start,end> we move
984 * start to the end of the region since it's ok until there
985 */
986 if (ei->addr <= start)
987 start = ei->addr + ei->size;
988 /* if start is now at or beyond end, we're done, full
989 * coverage */
990 if (start >= end)
991 return 1; /* we're done */
992 }
993 return 0;
994}
995
966/* 996/*
967 * Find the highest page frame number we have available 997 * Find the highest page frame number we have available
968 */ 998 */
@@ -1317,8 +1347,8 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat
1317/* 1347/*
1318 * Request address space for all standard resources 1348 * Request address space for all standard resources
1319 * 1349 *
1320 * This is called just before pcibios_assign_resources(), which is also 1350 * This is called just before pcibios_init(), which is also a
1321 * an fs_initcall, but is linked in later (in arch/i386/pci/i386.c). 1351 * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
1322 */ 1352 */
1323static int __init request_standard_resources(void) 1353static int __init request_standard_resources(void)
1324{ 1354{
@@ -1339,7 +1369,7 @@ static int __init request_standard_resources(void)
1339 return 0; 1369 return 0;
1340} 1370}
1341 1371
1342fs_initcall(request_standard_resources); 1372subsys_initcall(request_standard_resources);
1343 1373
1344static void __init register_memory(void) 1374static void __init register_memory(void)
1345{ 1375{
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S
index 4f58b9c0efe3..f48bef15b4f0 100644
--- a/arch/i386/kernel/syscall_table.S
+++ b/arch/i386/kernel/syscall_table.S
@@ -314,3 +314,4 @@ ENTRY(sys_call_table)
314 .long sys_get_robust_list 314 .long sys_get_robust_list
315 .long sys_splice 315 .long sys_splice
316 .long sys_sync_file_range 316 .long sys_sync_file_range
317 .long sys_tee /* 315 */
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index e38527994590..2d22f5761b1d 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -365,6 +365,9 @@ void die(const char * str, struct pt_regs * regs, long err)
365 365
366 if (++die.lock_owner_depth < 3) { 366 if (++die.lock_owner_depth < 3) {
367 int nl = 0; 367 int nl = 0;
368 unsigned long esp;
369 unsigned short ss;
370
368 handle_BUG(regs); 371 handle_BUG(regs);
369 printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); 372 printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
370#ifdef CONFIG_PREEMPT 373#ifdef CONFIG_PREEMPT
@@ -387,8 +390,19 @@ void die(const char * str, struct pt_regs * regs, long err)
387 printk("\n"); 390 printk("\n");
388 if (notify_die(DIE_OOPS, str, regs, err, 391 if (notify_die(DIE_OOPS, str, regs, err,
389 current->thread.trap_no, SIGSEGV) != 392 current->thread.trap_no, SIGSEGV) !=
390 NOTIFY_STOP) 393 NOTIFY_STOP) {
391 show_registers(regs); 394 show_registers(regs);
395 /* Executive summary in case the oops scrolled away */
396 esp = (unsigned long) (&regs->esp);
397 savesegment(ss, ss);
398 if (user_mode(regs)) {
399 esp = regs->esp;
400 ss = regs->xss & 0xffff;
401 }
402 printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
403 print_symbol("%s", regs->eip);
404 printk(" SS:ESP %04x:%08lx\n", ss, esp);
405 }
392 else 406 else
393 regs = NULL; 407 regs = NULL;
394 } else 408 } else