aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig13
-rw-r--r--arch/x86/Kconfig.cpu2
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h61
-rw-r--r--arch/x86/include/asm/es7000/apic.h32
-rw-r--r--arch/x86/include/asm/es7000/mpparse.h3
-rw-r--r--arch/x86/include/asm/genapic_32.h8
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/lguest.h2
-rw-r--r--arch/x86/include/asm/mach-default/mach_mpparse.h4
-rw-r--r--arch/x86/include/asm/mach-generic/mach_mpparse.h5
-rw-r--r--arch/x86/include/asm/mach-generic/mach_mpspec.h4
-rw-r--r--arch/x86/include/asm/mpspec_def.h102
-rw-r--r--arch/x86/include/asm/numaq/apic.h4
-rw-r--r--arch/x86/include/asm/numaq/mpparse.h3
-rw-r--r--arch/x86/include/asm/pci.h10
-rw-r--r--arch/x86/include/asm/setup.h14
-rw-r--r--arch/x86/include/asm/summit/apic.h42
-rw-r--r--arch/x86/include/asm/summit/mpparse.h2
-rw-r--r--arch/x86/include/asm/topology.h36
-rw-r--r--arch/x86/kernel/acpi/boot.c31
-rw-r--r--arch/x86/kernel/amd_iommu.c664
-rw-r--r--arch/x86/kernel/amd_iommu_init.c15
-rw-r--r--arch/x86/kernel/apic.c18
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c28
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k7.c9
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c24
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c51
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-lib.c9
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c2
-rw-r--r--arch/x86/kernel/cpuid.c2
-rw-r--r--arch/x86/kernel/i8259.c8
-rw-r--r--arch/x86/kernel/io_apic.c6
-rw-r--r--arch/x86/kernel/ioport.c4
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kernel/irq_32.c10
-rw-r--r--arch/x86/kernel/irq_64.c22
-rw-r--r--arch/x86/kernel/irqinit_32.c12
-rw-r--r--arch/x86/kernel/irqinit_64.c8
-rw-r--r--arch/x86/kernel/mpparse.c348
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/nmi.c1
-rw-r--r--arch/x86/kernel/numaq_32.c38
-rw-r--r--arch/x86/kernel/pci-swiotlb_64.c2
-rw-r--r--arch/x86/kernel/process_32.c19
-rw-r--r--arch/x86/kernel/reboot.c4
-rw-r--r--arch/x86/kernel/setup_percpu.c44
-rw-r--r--arch/x86/kernel/smpboot.c15
-rw-r--r--arch/x86/kernel/time_32.c4
-rw-r--r--arch/x86/kernel/time_64.c2
-rw-r--r--arch/x86/kernel/traps.c3
-rw-r--r--arch/x86/kernel/visws_quirks.c32
-rw-r--r--arch/x86/kvm/Makefile4
-rw-r--r--arch/x86/kvm/x86.c3
-rw-r--r--arch/x86/mach-generic/es7000.c10
-rw-r--r--arch/x86/mach-generic/numaq.c3
-rw-r--r--arch/x86/mach-generic/probe.c3
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c7
-rw-r--r--arch/x86/mm/k8topology_64.c20
-rw-r--r--arch/x86/xen/time.c10
61 files changed, 1213 insertions, 643 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 249d1e0824b5..862adb9bf0d4 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -586,6 +586,16 @@ config AMD_IOMMU
586 your BIOS for an option to enable it or if you have an IVRS ACPI 586 your BIOS for an option to enable it or if you have an IVRS ACPI
587 table. 587 table.
588 588
589config AMD_IOMMU_STATS
590 bool "Export AMD IOMMU statistics to debugfs"
591 depends on AMD_IOMMU
592 select DEBUG_FS
593 help
594 This option enables code in the AMD IOMMU driver to collect various
595 statistics about whats happening in the driver and exports that
596 information to userspace via debugfs.
597 If unsure, say N.
598
589# need this always selected by IOMMU for the VIA workaround 599# need this always selected by IOMMU for the VIA workaround
590config SWIOTLB 600config SWIOTLB
591 def_bool y if X86_64 601 def_bool y if X86_64
@@ -599,6 +609,9 @@ config SWIOTLB
599config IOMMU_HELPER 609config IOMMU_HELPER
600 def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU) 610 def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU)
601 611
612config IOMMU_API
613 def_bool (AMD_IOMMU || DMAR)
614
602config MAXSMP 615config MAXSMP
603 bool "Configure Maximum number of SMP Processors and NUMA Nodes" 616 bool "Configure Maximum number of SMP Processors and NUMA Nodes"
604 depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL 617 depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 85a78575956c..8078955845ae 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -408,7 +408,7 @@ config X86_MINIMUM_CPU_FAMILY
408 408
409config X86_DEBUGCTLMSR 409config X86_DEBUGCTLMSR
410 def_bool y 410 def_bool y
411 depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) 411 depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) && !UML
412 412
413menuconfig PROCESSOR_SELECT 413menuconfig PROCESSOR_SELECT
414 bool "Supported processor vendors" if EMBEDDED 414 bool "Supported processor vendors" if EMBEDDED
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index ac302a2fa339..95c8cd9d22b5 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -190,16 +190,23 @@
190/* FIXME: move this macro to <linux/pci.h> */ 190/* FIXME: move this macro to <linux/pci.h> */
191#define PCI_BUS(x) (((x) >> 8) & 0xff) 191#define PCI_BUS(x) (((x) >> 8) & 0xff)
192 192
193/* Protection domain flags */
194#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
195#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
196 domain for an IOMMU */
197
193/* 198/*
194 * This structure contains generic data for IOMMU protection domains 199 * This structure contains generic data for IOMMU protection domains
195 * independent of their use. 200 * independent of their use.
196 */ 201 */
197struct protection_domain { 202struct protection_domain {
198 spinlock_t lock; /* mostly used to lock the page table*/ 203 spinlock_t lock; /* mostly used to lock the page table*/
199 u16 id; /* the domain id written to the device table */ 204 u16 id; /* the domain id written to the device table */
200 int mode; /* paging mode (0-6 levels) */ 205 int mode; /* paging mode (0-6 levels) */
201 u64 *pt_root; /* page table root pointer */ 206 u64 *pt_root; /* page table root pointer */
202 void *priv; /* private data */ 207 unsigned long flags; /* flags to find out type of domain */
208 unsigned dev_cnt; /* devices assigned to this domain */
209 void *priv; /* private data */
203}; 210};
204 211
205/* 212/*
@@ -295,7 +302,7 @@ struct amd_iommu {
295 bool int_enabled; 302 bool int_enabled;
296 303
297 /* if one, we need to send a completion wait command */ 304 /* if one, we need to send a completion wait command */
298 int need_sync; 305 bool need_sync;
299 306
300 /* default dma_ops domain for that IOMMU */ 307 /* default dma_ops domain for that IOMMU */
301 struct dma_ops_domain *default_dom; 308 struct dma_ops_domain *default_dom;
@@ -374,7 +381,7 @@ extern struct protection_domain **amd_iommu_pd_table;
374extern unsigned long *amd_iommu_pd_alloc_bitmap; 381extern unsigned long *amd_iommu_pd_alloc_bitmap;
375 382
376/* will be 1 if device isolation is enabled */ 383/* will be 1 if device isolation is enabled */
377extern int amd_iommu_isolate; 384extern bool amd_iommu_isolate;
378 385
379/* 386/*
380 * If true, the addresses will be flushed on unmap time, not when 387 * If true, the addresses will be flushed on unmap time, not when
@@ -382,18 +389,6 @@ extern int amd_iommu_isolate;
382 */ 389 */
383extern bool amd_iommu_unmap_flush; 390extern bool amd_iommu_unmap_flush;
384 391
385/* takes a PCI device id and prints it out in a readable form */
386static inline void print_devid(u16 devid, int nl)
387{
388 int bus = devid >> 8;
389 int dev = devid >> 3 & 0x1f;
390 int fn = devid & 0x07;
391
392 printk("%02x:%02x.%x", bus, dev, fn);
393 if (nl)
394 printk("\n");
395}
396
397/* takes bus and device/function and returns the device id 392/* takes bus and device/function and returns the device id
398 * FIXME: should that be in generic PCI code? */ 393 * FIXME: should that be in generic PCI code? */
399static inline u16 calc_devid(u8 bus, u8 devfn) 394static inline u16 calc_devid(u8 bus, u8 devfn)
@@ -401,4 +396,32 @@ static inline u16 calc_devid(u8 bus, u8 devfn)
401 return (((u16)bus) << 8) | devfn; 396 return (((u16)bus) << 8) | devfn;
402} 397}
403 398
399#ifdef CONFIG_AMD_IOMMU_STATS
400
401struct __iommu_counter {
402 char *name;
403 struct dentry *dent;
404 u64 value;
405};
406
407#define DECLARE_STATS_COUNTER(nm) \
408 static struct __iommu_counter nm = { \
409 .name = #nm, \
410 }
411
412#define INC_STATS_COUNTER(name) name.value += 1
413#define ADD_STATS_COUNTER(name, x) name.value += (x)
414#define SUB_STATS_COUNTER(name, x) name.value -= (x)
415
416#else /* CONFIG_AMD_IOMMU_STATS */
417
418#define DECLARE_STATS_COUNTER(name)
419#define INC_STATS_COUNTER(name)
420#define ADD_STATS_COUNTER(name, x)
421#define SUB_STATS_COUNTER(name, x)
422
423static inline void amd_iommu_stats_init(void) { }
424
425#endif /* CONFIG_AMD_IOMMU_STATS */
426
404#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ 427#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h
index 51ac1230294e..bc53d5ef1386 100644
--- a/arch/x86/include/asm/es7000/apic.h
+++ b/arch/x86/include/asm/es7000/apic.h
@@ -157,7 +157,7 @@ cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
157 157
158 num_bits_set = cpumask_weight(cpumask); 158 num_bits_set = cpumask_weight(cpumask);
159 /* Return id to all */ 159 /* Return id to all */
160 if (num_bits_set == NR_CPUS) 160 if (num_bits_set == nr_cpu_ids)
161 return 0xFF; 161 return 0xFF;
162 /* 162 /*
163 * The cpus in the mask must all be on the apic cluster. If are not 163 * The cpus in the mask must all be on the apic cluster. If are not
@@ -190,7 +190,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
190 190
191 num_bits_set = cpus_weight(*cpumask); 191 num_bits_set = cpus_weight(*cpumask);
192 /* Return id to all */ 192 /* Return id to all */
193 if (num_bits_set == NR_CPUS) 193 if (num_bits_set == nr_cpu_ids)
194 return cpu_to_logical_apicid(0); 194 return cpu_to_logical_apicid(0);
195 /* 195 /*
196 * The cpus in the mask must all be on the apic cluster. If are not 196 * The cpus in the mask must all be on the apic cluster. If are not
@@ -218,9 +218,6 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
218static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, 218static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
219 const struct cpumask *andmask) 219 const struct cpumask *andmask)
220{ 220{
221 int num_bits_set;
222 int cpus_found = 0;
223 int cpu;
224 int apicid = cpu_to_logical_apicid(0); 221 int apicid = cpu_to_logical_apicid(0);
225 cpumask_var_t cpumask; 222 cpumask_var_t cpumask;
226 223
@@ -229,31 +226,8 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
229 226
230 cpumask_and(cpumask, inmask, andmask); 227 cpumask_and(cpumask, inmask, andmask);
231 cpumask_and(cpumask, cpumask, cpu_online_mask); 228 cpumask_and(cpumask, cpumask, cpu_online_mask);
229 apicid = cpu_mask_to_apicid(cpumask);
232 230
233 num_bits_set = cpumask_weight(cpumask);
234 /* Return id to all */
235 if (num_bits_set == NR_CPUS)
236 goto exit;
237 /*
238 * The cpus in the mask must all be on the apic cluster. If are not
239 * on the same apicid cluster return default value of TARGET_CPUS.
240 */
241 cpu = cpumask_first(cpumask);
242 apicid = cpu_to_logical_apicid(cpu);
243 while (cpus_found < num_bits_set) {
244 if (cpumask_test_cpu(cpu, cpumask)) {
245 int new_apicid = cpu_to_logical_apicid(cpu);
246 if (apicid_cluster(apicid) !=
247 apicid_cluster(new_apicid)){
248 printk ("%s: Not a valid mask!\n", __func__);
249 return cpu_to_logical_apicid(0);
250 }
251 apicid = new_apicid;
252 cpus_found++;
253 }
254 cpu++;
255 }
256exit:
257 free_cpumask_var(cpumask); 231 free_cpumask_var(cpumask);
258 return apicid; 232 return apicid;
259} 233}
diff --git a/arch/x86/include/asm/es7000/mpparse.h b/arch/x86/include/asm/es7000/mpparse.h
index ed5a3caae141..c1629b090ec2 100644
--- a/arch/x86/include/asm/es7000/mpparse.h
+++ b/arch/x86/include/asm/es7000/mpparse.h
@@ -10,8 +10,7 @@ extern void setup_unisys(void);
10 10
11#ifndef CONFIG_X86_GENERICARCH 11#ifndef CONFIG_X86_GENERICARCH
12extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id); 12extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
13extern int mps_oem_check(struct mp_config_table *mpc, char *oem, 13extern int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid);
14 char *productid);
15#endif 14#endif
16 15
17#ifdef CONFIG_ACPI 16#ifdef CONFIG_ACPI
diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h
index 746f37a7963a..2c05b737ee22 100644
--- a/arch/x86/include/asm/genapic_32.h
+++ b/arch/x86/include/asm/genapic_32.h
@@ -15,9 +15,9 @@
15 * Copyright 2003 Andi Kleen, SuSE Labs. 15 * Copyright 2003 Andi Kleen, SuSE Labs.
16 */ 16 */
17 17
18struct mpc_config_bus; 18struct mpc_bus;
19struct mp_config_table; 19struct mpc_table;
20struct mpc_config_processor; 20struct mpc_cpu;
21 21
22struct genapic { 22struct genapic {
23 char *name; 23 char *name;
@@ -51,7 +51,7 @@ struct genapic {
51 /* When one of the next two hooks returns 1 the genapic 51 /* When one of the next two hooks returns 1 the genapic
52 is switched to this. Essentially they are additional probe 52 is switched to this. Essentially they are additional probe
53 functions. */ 53 functions. */
54 int (*mps_oem_check)(struct mp_config_table *mpc, char *oem, 54 int (*mps_oem_check)(struct mpc_table *mpc, char *oem,
55 char *productid); 55 char *productid);
56 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); 56 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
57 57
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 97215a458e5f..730843d1d2fb 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -360,7 +360,7 @@ struct kvm_arch{
360 struct list_head active_mmu_pages; 360 struct list_head active_mmu_pages;
361 struct list_head assigned_dev_head; 361 struct list_head assigned_dev_head;
362 struct list_head oos_global_pages; 362 struct list_head oos_global_pages;
363 struct dmar_domain *intel_iommu_domain; 363 struct iommu_domain *iommu_domain;
364 struct kvm_pic *vpic; 364 struct kvm_pic *vpic;
365 struct kvm_ioapic *vioapic; 365 struct kvm_ioapic *vioapic;
366 struct kvm_pit *vpit; 366 struct kvm_pit *vpit;
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h
index d28a507cef39..1caf57628b9c 100644
--- a/arch/x86/include/asm/lguest.h
+++ b/arch/x86/include/asm/lguest.h
@@ -15,7 +15,7 @@
15#define SHARED_SWITCHER_PAGES \ 15#define SHARED_SWITCHER_PAGES \
16 DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE) 16 DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE)
17/* Pages for switcher itself, then two pages per cpu */ 17/* Pages for switcher itself, then two pages per cpu */
18#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS) 18#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids)
19 19
20/* We map at -4M for ease of mapping into the guest (one PTE page). */ 20/* We map at -4M for ease of mapping into the guest (one PTE page). */
21#define SWITCHER_ADDR 0xFFC00000 21#define SWITCHER_ADDR 0xFFC00000
diff --git a/arch/x86/include/asm/mach-default/mach_mpparse.h b/arch/x86/include/asm/mach-default/mach_mpparse.h
index 8c1ea21238a7..c70a263d68cd 100644
--- a/arch/x86/include/asm/mach-default/mach_mpparse.h
+++ b/arch/x86/include/asm/mach-default/mach_mpparse.h
@@ -1,8 +1,8 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H 1#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
2#define _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H 2#define _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
3 3
4static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, 4static inline int
5 char *productid) 5mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
6{ 6{
7 return 0; 7 return 0;
8} 8}
diff --git a/arch/x86/include/asm/mach-generic/mach_mpparse.h b/arch/x86/include/asm/mach-generic/mach_mpparse.h
index 048f1d468535..9444ab8dca94 100644
--- a/arch/x86/include/asm/mach-generic/mach_mpparse.h
+++ b/arch/x86/include/asm/mach-generic/mach_mpparse.h
@@ -2,9 +2,8 @@
2#define _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H 2#define _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
3 3
4 4
5extern int mps_oem_check(struct mp_config_table *mpc, char *oem, 5extern int mps_oem_check(struct mpc_table *, char *, char *);
6 char *productid);
7 6
8extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id); 7extern int acpi_madt_oem_check(char *, char *);
9 8
10#endif /* _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H */ 9#endif /* _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_mpspec.h b/arch/x86/include/asm/mach-generic/mach_mpspec.h
index bbab5ccfd4fe..3bc407226578 100644
--- a/arch/x86/include/asm/mach-generic/mach_mpspec.h
+++ b/arch/x86/include/asm/mach-generic/mach_mpspec.h
@@ -7,6 +7,6 @@
7/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */ 7/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
8#define MAX_MP_BUSSES 260 8#define MAX_MP_BUSSES 260
9 9
10extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, 10extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
11 char *productid); 11
12#endif /* _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H */ 12#endif /* _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H */
diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h
index e3ace7d1d35d..59568bc4767f 100644
--- a/arch/x86/include/asm/mpspec_def.h
+++ b/arch/x86/include/asm/mpspec_def.h
@@ -39,17 +39,17 @@ struct intel_mp_floating {
39 39
40#define MPC_SIGNATURE "PCMP" 40#define MPC_SIGNATURE "PCMP"
41 41
42struct mp_config_table { 42struct mpc_table {
43 char mpc_signature[4]; 43 char signature[4];
44 unsigned short mpc_length; /* Size of table */ 44 unsigned short length; /* Size of table */
45 char mpc_spec; /* 0x01 */ 45 char spec; /* 0x01 */
46 char mpc_checksum; 46 char checksum;
47 char mpc_oem[8]; 47 char oem[8];
48 char mpc_productid[12]; 48 char productid[12];
49 unsigned int mpc_oemptr; /* 0 if not present */ 49 unsigned int oemptr; /* 0 if not present */
50 unsigned short mpc_oemsize; /* 0 if not present */ 50 unsigned short oemsize; /* 0 if not present */
51 unsigned short mpc_oemcount; 51 unsigned short oemcount;
52 unsigned int mpc_lapic; /* APIC address */ 52 unsigned int lapic; /* APIC address */
53 unsigned int reserved; 53 unsigned int reserved;
54}; 54};
55 55
@@ -70,20 +70,20 @@ struct mp_config_table {
70#define CPU_MODEL_MASK 0x00F0 70#define CPU_MODEL_MASK 0x00F0
71#define CPU_FAMILY_MASK 0x0F00 71#define CPU_FAMILY_MASK 0x0F00
72 72
73struct mpc_config_processor { 73struct mpc_cpu {
74 unsigned char mpc_type; 74 unsigned char type;
75 unsigned char mpc_apicid; /* Local APIC number */ 75 unsigned char apicid; /* Local APIC number */
76 unsigned char mpc_apicver; /* Its versions */ 76 unsigned char apicver; /* Its versions */
77 unsigned char mpc_cpuflag; 77 unsigned char cpuflag;
78 unsigned int mpc_cpufeature; 78 unsigned int cpufeature;
79 unsigned int mpc_featureflag; /* CPUID feature value */ 79 unsigned int featureflag; /* CPUID feature value */
80 unsigned int mpc_reserved[2]; 80 unsigned int reserved[2];
81}; 81};
82 82
83struct mpc_config_bus { 83struct mpc_bus {
84 unsigned char mpc_type; 84 unsigned char type;
85 unsigned char mpc_busid; 85 unsigned char busid;
86 unsigned char mpc_bustype[6]; 86 unsigned char bustype[6];
87}; 87};
88 88
89/* List of Bus Type string values, Intel MP Spec. */ 89/* List of Bus Type string values, Intel MP Spec. */
@@ -108,22 +108,22 @@ struct mpc_config_bus {
108 108
109#define MPC_APIC_USABLE 0x01 109#define MPC_APIC_USABLE 0x01
110 110
111struct mpc_config_ioapic { 111struct mpc_ioapic {
112 unsigned char mpc_type; 112 unsigned char type;
113 unsigned char mpc_apicid; 113 unsigned char apicid;
114 unsigned char mpc_apicver; 114 unsigned char apicver;
115 unsigned char mpc_flags; 115 unsigned char flags;
116 unsigned int mpc_apicaddr; 116 unsigned int apicaddr;
117}; 117};
118 118
119struct mpc_config_intsrc { 119struct mpc_intsrc {
120 unsigned char mpc_type; 120 unsigned char type;
121 unsigned char mpc_irqtype; 121 unsigned char irqtype;
122 unsigned short mpc_irqflag; 122 unsigned short irqflag;
123 unsigned char mpc_srcbus; 123 unsigned char srcbus;
124 unsigned char mpc_srcbusirq; 124 unsigned char srcbusirq;
125 unsigned char mpc_dstapic; 125 unsigned char dstapic;
126 unsigned char mpc_dstirq; 126 unsigned char dstirq;
127}; 127};
128 128
129enum mp_irq_source_types { 129enum mp_irq_source_types {
@@ -139,24 +139,24 @@ enum mp_irq_source_types {
139 139
140#define MP_APIC_ALL 0xFF 140#define MP_APIC_ALL 0xFF
141 141
142struct mpc_config_lintsrc { 142struct mpc_lintsrc {
143 unsigned char mpc_type; 143 unsigned char type;
144 unsigned char mpc_irqtype; 144 unsigned char irqtype;
145 unsigned short mpc_irqflag; 145 unsigned short irqflag;
146 unsigned char mpc_srcbusid; 146 unsigned char srcbusid;
147 unsigned char mpc_srcbusirq; 147 unsigned char srcbusirq;
148 unsigned char mpc_destapic; 148 unsigned char destapic;
149 unsigned char mpc_destapiclint; 149 unsigned char destapiclint;
150}; 150};
151 151
152#define MPC_OEM_SIGNATURE "_OEM" 152#define MPC_OEM_SIGNATURE "_OEM"
153 153
154struct mp_config_oemtable { 154struct mpc_oemtable {
155 char oem_signature[4]; 155 char signature[4];
156 unsigned short oem_length; /* Size of table */ 156 unsigned short length; /* Size of table */
157 char oem_rev; /* 0x01 */ 157 char rev; /* 0x01 */
158 char oem_checksum; 158 char checksum;
159 char mpc_oem[8]; 159 char mpc[8];
160}; 160};
161 161
162/* 162/*
diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h
index c80f00d29965..bf37bc49bd8e 100644
--- a/arch/x86/include/asm/numaq/apic.h
+++ b/arch/x86/include/asm/numaq/apic.h
@@ -63,8 +63,8 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
63extern u8 cpu_2_logical_apicid[]; 63extern u8 cpu_2_logical_apicid[];
64static inline int cpu_to_logical_apicid(int cpu) 64static inline int cpu_to_logical_apicid(int cpu)
65{ 65{
66 if (cpu >= NR_CPUS) 66 if (cpu >= nr_cpu_ids)
67 return BAD_APICID; 67 return BAD_APICID;
68 return (int)cpu_2_logical_apicid[cpu]; 68 return (int)cpu_2_logical_apicid[cpu];
69} 69}
70 70
diff --git a/arch/x86/include/asm/numaq/mpparse.h b/arch/x86/include/asm/numaq/mpparse.h
index 252292e077b6..a2eeefcd1cc7 100644
--- a/arch/x86/include/asm/numaq/mpparse.h
+++ b/arch/x86/include/asm/numaq/mpparse.h
@@ -1,7 +1,6 @@
1#ifndef __ASM_NUMAQ_MPPARSE_H 1#ifndef __ASM_NUMAQ_MPPARSE_H
2#define __ASM_NUMAQ_MPPARSE_H 2#define __ASM_NUMAQ_MPPARSE_H
3 3
4extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, 4extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
5 char *productid);
6 5
7#endif /* __ASM_NUMAQ_MPPARSE_H */ 6#endif /* __ASM_NUMAQ_MPPARSE_H */
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 66834c41c049..a977de23cb4d 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -102,9 +102,9 @@ extern void pci_iommu_alloc(void);
102 102
103#ifdef CONFIG_NUMA 103#ifdef CONFIG_NUMA
104/* Returns the node based on pci bus */ 104/* Returns the node based on pci bus */
105static inline int __pcibus_to_node(struct pci_bus *bus) 105static inline int __pcibus_to_node(const struct pci_bus *bus)
106{ 106{
107 struct pci_sysdata *sd = bus->sysdata; 107 const struct pci_sysdata *sd = bus->sysdata;
108 108
109 return sd->node; 109 return sd->node;
110} 110}
@@ -113,6 +113,12 @@ static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus)
113{ 113{
114 return node_to_cpumask(__pcibus_to_node(bus)); 114 return node_to_cpumask(__pcibus_to_node(bus));
115} 115}
116
117static inline const struct cpumask *
118cpumask_of_pcibus(const struct pci_bus *bus)
119{
120 return cpumask_of_node(__pcibus_to_node(bus));
121}
116#endif 122#endif
117 123
118#endif /* _ASM_X86_PCI_H */ 124#endif /* _ASM_X86_PCI_H */
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 4fcd53fd5f43..ebe858cdc8a3 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -25,9 +25,9 @@ extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip);
25/* 25/*
26 * Any setup quirks to be performed? 26 * Any setup quirks to be performed?
27 */ 27 */
28struct mpc_config_processor; 28struct mpc_cpu;
29struct mpc_config_bus; 29struct mpc_bus;
30struct mp_config_oemtable; 30struct mpc_oemtable;
31struct x86_quirks { 31struct x86_quirks {
32 int (*arch_pre_time_init)(void); 32 int (*arch_pre_time_init)(void);
33 int (*arch_time_init)(void); 33 int (*arch_time_init)(void);
@@ -39,10 +39,10 @@ struct x86_quirks {
39 int (*mach_find_smp_config)(unsigned int reserve); 39 int (*mach_find_smp_config)(unsigned int reserve);
40 40
41 int *mpc_record; 41 int *mpc_record;
42 int (*mpc_apic_id)(struct mpc_config_processor *m); 42 int (*mpc_apic_id)(struct mpc_cpu *m);
43 void (*mpc_oem_bus_info)(struct mpc_config_bus *m, char *name); 43 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
44 void (*mpc_oem_pci_bus)(struct mpc_config_bus *m); 44 void (*mpc_oem_pci_bus)(struct mpc_bus *m);
45 void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable, 45 void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable,
46 unsigned short oemsize); 46 unsigned short oemsize);
47 int (*setup_ioapic_ids)(void); 47 int (*setup_ioapic_ids)(void);
48 int (*update_genapic)(void); 48 int (*update_genapic)(void);
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h
index 99327d1be49f..4bb5fb34f030 100644
--- a/arch/x86/include/asm/summit/apic.h
+++ b/arch/x86/include/asm/summit/apic.h
@@ -52,7 +52,7 @@ static inline void init_apic_ldr(void)
52 int i; 52 int i;
53 53
54 /* Create logical APIC IDs by counting CPUs already in cluster. */ 54 /* Create logical APIC IDs by counting CPUs already in cluster. */
55 for (count = 0, i = NR_CPUS; --i >= 0; ) { 55 for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
56 lid = cpu_2_logical_apicid[i]; 56 lid = cpu_2_logical_apicid[i];
57 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) 57 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
58 ++count; 58 ++count;
@@ -97,8 +97,8 @@ static inline int apicid_to_node(int logical_apicid)
97static inline int cpu_to_logical_apicid(int cpu) 97static inline int cpu_to_logical_apicid(int cpu)
98{ 98{
99#ifdef CONFIG_SMP 99#ifdef CONFIG_SMP
100 if (cpu >= NR_CPUS) 100 if (cpu >= nr_cpu_ids)
101 return BAD_APICID; 101 return BAD_APICID;
102 return (int)cpu_2_logical_apicid[cpu]; 102 return (int)cpu_2_logical_apicid[cpu];
103#else 103#else
104 return logical_smp_processor_id(); 104 return logical_smp_processor_id();
@@ -107,7 +107,7 @@ static inline int cpu_to_logical_apicid(int cpu)
107 107
108static inline int cpu_present_to_apicid(int mps_cpu) 108static inline int cpu_present_to_apicid(int mps_cpu)
109{ 109{
110 if (mps_cpu < NR_CPUS) 110 if (mps_cpu < nr_cpu_ids)
111 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); 111 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
112 else 112 else
113 return BAD_APICID; 113 return BAD_APICID;
@@ -146,7 +146,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
146 146
147 num_bits_set = cpus_weight(*cpumask); 147 num_bits_set = cpus_weight(*cpumask);
148 /* Return id to all */ 148 /* Return id to all */
149 if (num_bits_set == NR_CPUS) 149 if (num_bits_set >= nr_cpu_ids)
150 return (int) 0xFF; 150 return (int) 0xFF;
151 /* 151 /*
152 * The cpus in the mask must all be on the apic cluster. If are not 152 * The cpus in the mask must all be on the apic cluster. If are not
@@ -173,42 +173,16 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
173static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, 173static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
174 const struct cpumask *andmask) 174 const struct cpumask *andmask)
175{ 175{
176 int num_bits_set; 176 int apicid = cpu_to_logical_apicid(0);
177 int cpus_found = 0;
178 int cpu;
179 int apicid = 0xFF;
180 cpumask_var_t cpumask; 177 cpumask_var_t cpumask;
181 178
182 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) 179 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
183 return (int) 0xFF; 180 return apicid;
184 181
185 cpumask_and(cpumask, inmask, andmask); 182 cpumask_and(cpumask, inmask, andmask);
186 cpumask_and(cpumask, cpumask, cpu_online_mask); 183 cpumask_and(cpumask, cpumask, cpu_online_mask);
184 apicid = cpu_mask_to_apicid(cpumask);
187 185
188 num_bits_set = cpumask_weight(cpumask);
189 /* Return id to all */
190 if (num_bits_set == nr_cpu_ids)
191 goto exit;
192 /*
193 * The cpus in the mask must all be on the apic cluster. If are not
194 * on the same apicid cluster return default value of TARGET_CPUS.
195 */
196 cpu = cpumask_first(cpumask);
197 apicid = cpu_to_logical_apicid(cpu);
198 while (cpus_found < num_bits_set) {
199 if (cpumask_test_cpu(cpu, cpumask)) {
200 int new_apicid = cpu_to_logical_apicid(cpu);
201 if (apicid_cluster(apicid) !=
202 apicid_cluster(new_apicid)){
203 printk ("%s: Not a valid mask!\n", __func__);
204 return 0xFF;
205 }
206 apicid = apicid | new_apicid;
207 cpus_found++;
208 }
209 cpu++;
210 }
211exit:
212 free_cpumask_var(cpumask); 186 free_cpumask_var(cpumask);
213 return apicid; 187 return apicid;
214} 188}
diff --git a/arch/x86/include/asm/summit/mpparse.h b/arch/x86/include/asm/summit/mpparse.h
index 013ce6fab2d5..380e86c02363 100644
--- a/arch/x86/include/asm/summit/mpparse.h
+++ b/arch/x86/include/asm/summit/mpparse.h
@@ -11,7 +11,7 @@ extern void setup_summit(void);
11#define setup_summit() {} 11#define setup_summit() {}
12#endif 12#endif
13 13
14static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, 14static inline int mps_oem_check(struct mpc_table *mpc, char *oem,
15 char *productid) 15 char *productid)
16{ 16{
17 if (!strncmp(oem, "IBM ENSW", 8) && 17 if (!strncmp(oem, "IBM ENSW", 8) &&
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 79e31e9dcdda..4e2f2e0aab27 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -61,13 +61,19 @@ static inline int cpu_to_node(int cpu)
61 * 61 *
62 * Side note: this function creates the returned cpumask on the stack 62 * Side note: this function creates the returned cpumask on the stack
63 * so with a high NR_CPUS count, excessive stack space is used. The 63 * so with a high NR_CPUS count, excessive stack space is used. The
64 * node_to_cpumask_ptr function should be used whenever possible. 64 * cpumask_of_node function should be used whenever possible.
65 */ 65 */
66static inline cpumask_t node_to_cpumask(int node) 66static inline cpumask_t node_to_cpumask(int node)
67{ 67{
68 return node_to_cpumask_map[node]; 68 return node_to_cpumask_map[node];
69} 69}
70 70
71/* Returns a bitmask of CPUs on Node 'node'. */
72static inline const struct cpumask *cpumask_of_node(int node)
73{
74 return &node_to_cpumask_map[node];
75}
76
71#else /* CONFIG_X86_64 */ 77#else /* CONFIG_X86_64 */
72 78
73/* Mappings between node number and cpus on that node. */ 79/* Mappings between node number and cpus on that node. */
@@ -82,7 +88,7 @@ DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
82#ifdef CONFIG_DEBUG_PER_CPU_MAPS 88#ifdef CONFIG_DEBUG_PER_CPU_MAPS
83extern int cpu_to_node(int cpu); 89extern int cpu_to_node(int cpu);
84extern int early_cpu_to_node(int cpu); 90extern int early_cpu_to_node(int cpu);
85extern const cpumask_t *_node_to_cpumask_ptr(int node); 91extern const cpumask_t *cpumask_of_node(int node);
86extern cpumask_t node_to_cpumask(int node); 92extern cpumask_t node_to_cpumask(int node);
87 93
88#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 94#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
@@ -103,7 +109,7 @@ static inline int early_cpu_to_node(int cpu)
103} 109}
104 110
105/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 111/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
106static inline const cpumask_t *_node_to_cpumask_ptr(int node) 112static inline const cpumask_t *cpumask_of_node(int node)
107{ 113{
108 return &node_to_cpumask_map[node]; 114 return &node_to_cpumask_map[node];
109} 115}
@@ -116,12 +122,15 @@ static inline cpumask_t node_to_cpumask(int node)
116 122
117#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 123#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
118 124
119/* Replace default node_to_cpumask_ptr with optimized version */ 125/*
126 * Replace default node_to_cpumask_ptr with optimized version
127 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
128 */
120#define node_to_cpumask_ptr(v, node) \ 129#define node_to_cpumask_ptr(v, node) \
121 const cpumask_t *v = _node_to_cpumask_ptr(node) 130 const cpumask_t *v = cpumask_of_node(node)
122 131
123#define node_to_cpumask_ptr_next(v, node) \ 132#define node_to_cpumask_ptr_next(v, node) \
124 v = _node_to_cpumask_ptr(node) 133 v = cpumask_of_node(node)
125 134
126#endif /* CONFIG_X86_64 */ 135#endif /* CONFIG_X86_64 */
127 136
@@ -187,7 +196,7 @@ extern int __node_distance(int, int);
187#define cpu_to_node(cpu) 0 196#define cpu_to_node(cpu) 0
188#define early_cpu_to_node(cpu) 0 197#define early_cpu_to_node(cpu) 0
189 198
190static inline const cpumask_t *_node_to_cpumask_ptr(int node) 199static inline const cpumask_t *cpumask_of_node(int node)
191{ 200{
192 return &cpu_online_map; 201 return &cpu_online_map;
193} 202}
@@ -200,12 +209,15 @@ static inline int node_to_first_cpu(int node)
200 return first_cpu(cpu_online_map); 209 return first_cpu(cpu_online_map);
201} 210}
202 211
203/* Replace default node_to_cpumask_ptr with optimized version */ 212/*
213 * Replace default node_to_cpumask_ptr with optimized version
214 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
215 */
204#define node_to_cpumask_ptr(v, node) \ 216#define node_to_cpumask_ptr(v, node) \
205 const cpumask_t *v = _node_to_cpumask_ptr(node) 217 const cpumask_t *v = cpumask_of_node(node)
206 218
207#define node_to_cpumask_ptr_next(v, node) \ 219#define node_to_cpumask_ptr_next(v, node) \
208 v = _node_to_cpumask_ptr(node) 220 v = cpumask_of_node(node)
209#endif 221#endif
210 222
211#include <asm-generic/topology.h> 223#include <asm-generic/topology.h>
@@ -214,12 +226,12 @@ static inline int node_to_first_cpu(int node)
214/* Returns the number of the first CPU on Node 'node'. */ 226/* Returns the number of the first CPU on Node 'node'. */
215static inline int node_to_first_cpu(int node) 227static inline int node_to_first_cpu(int node)
216{ 228{
217 node_to_cpumask_ptr(mask, node); 229 return cpumask_first(cpumask_of_node(node));
218 return first_cpu(*mask);
219} 230}
220#endif 231#endif
221 232
222extern cpumask_t cpu_coregroup_map(int cpu); 233extern cpumask_t cpu_coregroup_map(int cpu);
234extern const struct cpumask *cpu_coregroup_mask(int cpu);
223 235
224#ifdef ENABLE_TOPO_DEFINES 236#ifdef ENABLE_TOPO_DEFINES
225#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) 237#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 65d0b72777ea..29dc0c89d4af 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -538,9 +538,10 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
538 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 538 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
539 union acpi_object *obj; 539 union acpi_object *obj;
540 struct acpi_madt_local_apic *lapic; 540 struct acpi_madt_local_apic *lapic;
541 cpumask_t tmp_map, new_map; 541 cpumask_var_t tmp_map, new_map;
542 u8 physid; 542 u8 physid;
543 int cpu; 543 int cpu;
544 int retval = -ENOMEM;
544 545
545 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 546 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
546 return -EINVAL; 547 return -EINVAL;
@@ -569,23 +570,37 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
569 buffer.length = ACPI_ALLOCATE_BUFFER; 570 buffer.length = ACPI_ALLOCATE_BUFFER;
570 buffer.pointer = NULL; 571 buffer.pointer = NULL;
571 572
572 tmp_map = cpu_present_map; 573 if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL))
574 goto out;
575
576 if (!alloc_cpumask_var(&new_map, GFP_KERNEL))
577 goto free_tmp_map;
578
579 cpumask_copy(tmp_map, cpu_present_mask);
573 acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED); 580 acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
574 581
575 /* 582 /*
576 * If mp_register_lapic successfully generates a new logical cpu 583 * If mp_register_lapic successfully generates a new logical cpu
577 * number, then the following will get us exactly what was mapped 584 * number, then the following will get us exactly what was mapped
578 */ 585 */
579 cpus_andnot(new_map, cpu_present_map, tmp_map); 586 cpumask_andnot(new_map, cpu_present_mask, tmp_map);
580 if (cpus_empty(new_map)) { 587 if (cpumask_empty(new_map)) {
581 printk ("Unable to map lapic to logical cpu number\n"); 588 printk ("Unable to map lapic to logical cpu number\n");
582 return -EINVAL; 589 retval = -EINVAL;
590 goto free_new_map;
583 } 591 }
584 592
585 cpu = first_cpu(new_map); 593 cpu = cpumask_first(new_map);
586 594
587 *pcpu = cpu; 595 *pcpu = cpu;
588 return 0; 596 retval = 0;
597
598free_new_map:
599 free_cpumask_var(new_map);
600free_tmp_map:
601 free_cpumask_var(tmp_map);
602out:
603 return retval;
589} 604}
590 605
591/* wrapper to silence section mismatch warning */ 606/* wrapper to silence section mismatch warning */
@@ -598,7 +613,7 @@ EXPORT_SYMBOL(acpi_map_lsapic);
598int acpi_unmap_lsapic(int cpu) 613int acpi_unmap_lsapic(int cpu)
599{ 614{
600 per_cpu(x86_cpu_to_apicid, cpu) = -1; 615 per_cpu(x86_cpu_to_apicid, cpu) = -1;
601 cpu_clear(cpu, cpu_present_map); 616 set_cpu_present(cpu, false);
602 num_processors--; 617 num_processors--;
603 618
604 return (0); 619 return (0);
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 658e29e0f49b..5113c080f0c4 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -20,8 +20,12 @@
20#include <linux/pci.h> 20#include <linux/pci.h>
21#include <linux/gfp.h> 21#include <linux/gfp.h>
22#include <linux/bitops.h> 22#include <linux/bitops.h>
23#include <linux/debugfs.h>
23#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
24#include <linux/iommu-helper.h> 25#include <linux/iommu-helper.h>
26#ifdef CONFIG_IOMMU_API
27#include <linux/iommu.h>
28#endif
25#include <asm/proto.h> 29#include <asm/proto.h>
26#include <asm/iommu.h> 30#include <asm/iommu.h>
27#include <asm/gart.h> 31#include <asm/gart.h>
@@ -38,6 +42,10 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock);
38static LIST_HEAD(iommu_pd_list); 42static LIST_HEAD(iommu_pd_list);
39static DEFINE_SPINLOCK(iommu_pd_list_lock); 43static DEFINE_SPINLOCK(iommu_pd_list_lock);
40 44
45#ifdef CONFIG_IOMMU_API
46static struct iommu_ops amd_iommu_ops;
47#endif
48
41/* 49/*
42 * general struct to manage commands send to an IOMMU 50 * general struct to manage commands send to an IOMMU
43 */ 51 */
@@ -47,6 +55,68 @@ struct iommu_cmd {
47 55
48static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, 56static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
49 struct unity_map_entry *e); 57 struct unity_map_entry *e);
58static struct dma_ops_domain *find_protection_domain(u16 devid);
59
60
61#ifdef CONFIG_AMD_IOMMU_STATS
62
63/*
64 * Initialization code for statistics collection
65 */
66
67DECLARE_STATS_COUNTER(compl_wait);
68DECLARE_STATS_COUNTER(cnt_map_single);
69DECLARE_STATS_COUNTER(cnt_unmap_single);
70DECLARE_STATS_COUNTER(cnt_map_sg);
71DECLARE_STATS_COUNTER(cnt_unmap_sg);
72DECLARE_STATS_COUNTER(cnt_alloc_coherent);
73DECLARE_STATS_COUNTER(cnt_free_coherent);
74DECLARE_STATS_COUNTER(cross_page);
75DECLARE_STATS_COUNTER(domain_flush_single);
76DECLARE_STATS_COUNTER(domain_flush_all);
77DECLARE_STATS_COUNTER(alloced_io_mem);
78DECLARE_STATS_COUNTER(total_map_requests);
79
80static struct dentry *stats_dir;
81static struct dentry *de_isolate;
82static struct dentry *de_fflush;
83
84static void amd_iommu_stats_add(struct __iommu_counter *cnt)
85{
86 if (stats_dir == NULL)
87 return;
88
89 cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
90 &cnt->value);
91}
92
93static void amd_iommu_stats_init(void)
94{
95 stats_dir = debugfs_create_dir("amd-iommu", NULL);
96 if (stats_dir == NULL)
97 return;
98
99 de_isolate = debugfs_create_bool("isolation", 0444, stats_dir,
100 (u32 *)&amd_iommu_isolate);
101
102 de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
103 (u32 *)&amd_iommu_unmap_flush);
104
105 amd_iommu_stats_add(&compl_wait);
106 amd_iommu_stats_add(&cnt_map_single);
107 amd_iommu_stats_add(&cnt_unmap_single);
108 amd_iommu_stats_add(&cnt_map_sg);
109 amd_iommu_stats_add(&cnt_unmap_sg);
110 amd_iommu_stats_add(&cnt_alloc_coherent);
111 amd_iommu_stats_add(&cnt_free_coherent);
112 amd_iommu_stats_add(&cross_page);
113 amd_iommu_stats_add(&domain_flush_single);
114 amd_iommu_stats_add(&domain_flush_all);
115 amd_iommu_stats_add(&alloced_io_mem);
116 amd_iommu_stats_add(&total_map_requests);
117}
118
119#endif
50 120
51/* returns !0 if the IOMMU is caching non-present entries in its TLB */ 121/* returns !0 if the IOMMU is caching non-present entries in its TLB */
52static int iommu_has_npcache(struct amd_iommu *iommu) 122static int iommu_has_npcache(struct amd_iommu *iommu)
@@ -189,13 +259,55 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
189 spin_lock_irqsave(&iommu->lock, flags); 259 spin_lock_irqsave(&iommu->lock, flags);
190 ret = __iommu_queue_command(iommu, cmd); 260 ret = __iommu_queue_command(iommu, cmd);
191 if (!ret) 261 if (!ret)
192 iommu->need_sync = 1; 262 iommu->need_sync = true;
193 spin_unlock_irqrestore(&iommu->lock, flags); 263 spin_unlock_irqrestore(&iommu->lock, flags);
194 264
195 return ret; 265 return ret;
196} 266}
197 267
198/* 268/*
269 * This function waits until an IOMMU has completed a completion
270 * wait command
271 */
272static void __iommu_wait_for_completion(struct amd_iommu *iommu)
273{
274 int ready = 0;
275 unsigned status = 0;
276 unsigned long i = 0;
277
278 INC_STATS_COUNTER(compl_wait);
279
280 while (!ready && (i < EXIT_LOOP_COUNT)) {
281 ++i;
282 /* wait for the bit to become one */
283 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
284 ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
285 }
286
287 /* set bit back to zero */
288 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
289 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
290
291 if (unlikely(i == EXIT_LOOP_COUNT))
292 panic("AMD IOMMU: Completion wait loop failed\n");
293}
294
295/*
296 * This function queues a completion wait command into the command
297 * buffer of an IOMMU
298 */
299static int __iommu_completion_wait(struct amd_iommu *iommu)
300{
301 struct iommu_cmd cmd;
302
303 memset(&cmd, 0, sizeof(cmd));
304 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
305 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
306
307 return __iommu_queue_command(iommu, &cmd);
308}
309
310/*
199 * This function is called whenever we need to ensure that the IOMMU has 311 * This function is called whenever we need to ensure that the IOMMU has
200 * completed execution of all commands we sent. It sends a 312 * completed execution of all commands we sent. It sends a
201 * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs 313 * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs
@@ -204,40 +316,22 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
204 */ 316 */
205static int iommu_completion_wait(struct amd_iommu *iommu) 317static int iommu_completion_wait(struct amd_iommu *iommu)
206{ 318{
207 int ret = 0, ready = 0; 319 int ret = 0;
208 unsigned status = 0; 320 unsigned long flags;
209 struct iommu_cmd cmd;
210 unsigned long flags, i = 0;
211
212 memset(&cmd, 0, sizeof(cmd));
213 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
214 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
215 321
216 spin_lock_irqsave(&iommu->lock, flags); 322 spin_lock_irqsave(&iommu->lock, flags);
217 323
218 if (!iommu->need_sync) 324 if (!iommu->need_sync)
219 goto out; 325 goto out;
220 326
221 iommu->need_sync = 0; 327 ret = __iommu_completion_wait(iommu);
222 328
223 ret = __iommu_queue_command(iommu, &cmd); 329 iommu->need_sync = false;
224 330
225 if (ret) 331 if (ret)
226 goto out; 332 goto out;
227 333
228 while (!ready && (i < EXIT_LOOP_COUNT)) { 334 __iommu_wait_for_completion(iommu);
229 ++i;
230 /* wait for the bit to become one */
231 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
232 ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
233 }
234
235 /* set bit back to zero */
236 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
237 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
238
239 if (unlikely(i == EXIT_LOOP_COUNT))
240 panic("AMD IOMMU: Completion wait loop failed\n");
241 335
242out: 336out:
243 spin_unlock_irqrestore(&iommu->lock, flags); 337 spin_unlock_irqrestore(&iommu->lock, flags);
@@ -264,6 +358,21 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
264 return ret; 358 return ret;
265} 359}
266 360
361static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
362 u16 domid, int pde, int s)
363{
364 memset(cmd, 0, sizeof(*cmd));
365 address &= PAGE_MASK;
366 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
367 cmd->data[1] |= domid;
368 cmd->data[2] = lower_32_bits(address);
369 cmd->data[3] = upper_32_bits(address);
370 if (s) /* size bit - we flush more than one 4kb page */
371 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
372 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
373 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
374}
375
267/* 376/*
268 * Generic command send function for invalidaing TLB entries 377 * Generic command send function for invalidaing TLB entries
269 */ 378 */
@@ -273,16 +382,7 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
273 struct iommu_cmd cmd; 382 struct iommu_cmd cmd;
274 int ret; 383 int ret;
275 384
276 memset(&cmd, 0, sizeof(cmd)); 385 __iommu_build_inv_iommu_pages(&cmd, address, domid, pde, s);
277 address &= PAGE_MASK;
278 CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES);
279 cmd.data[1] |= domid;
280 cmd.data[2] = lower_32_bits(address);
281 cmd.data[3] = upper_32_bits(address);
282 if (s) /* size bit - we flush more than one 4kb page */
283 cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
284 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
285 cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
286 386
287 ret = iommu_queue_command(iommu, &cmd); 387 ret = iommu_queue_command(iommu, &cmd);
288 388
@@ -321,9 +421,35 @@ static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
321{ 421{
322 u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; 422 u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
323 423
424 INC_STATS_COUNTER(domain_flush_single);
425
324 iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1); 426 iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
325} 427}
326 428
429/*
430 * This function is used to flush the IO/TLB for a given protection domain
431 * on every IOMMU in the system
432 */
433static void iommu_flush_domain(u16 domid)
434{
435 unsigned long flags;
436 struct amd_iommu *iommu;
437 struct iommu_cmd cmd;
438
439 INC_STATS_COUNTER(domain_flush_all);
440
441 __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
442 domid, 1, 1);
443
444 list_for_each_entry(iommu, &amd_iommu_list, list) {
445 spin_lock_irqsave(&iommu->lock, flags);
446 __iommu_queue_command(iommu, &cmd);
447 __iommu_completion_wait(iommu);
448 __iommu_wait_for_completion(iommu);
449 spin_unlock_irqrestore(&iommu->lock, flags);
450 }
451}
452
327/**************************************************************************** 453/****************************************************************************
328 * 454 *
329 * The functions below are used the create the page table mappings for 455 * The functions below are used the create the page table mappings for
@@ -338,10 +464,10 @@ static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
338 * supporting all features of AMD IOMMU page tables like level skipping 464 * supporting all features of AMD IOMMU page tables like level skipping
339 * and full 64 bit address spaces. 465 * and full 64 bit address spaces.
340 */ 466 */
341static int iommu_map(struct protection_domain *dom, 467static int iommu_map_page(struct protection_domain *dom,
342 unsigned long bus_addr, 468 unsigned long bus_addr,
343 unsigned long phys_addr, 469 unsigned long phys_addr,
344 int prot) 470 int prot)
345{ 471{
346 u64 __pte, *pte, *page; 472 u64 __pte, *pte, *page;
347 473
@@ -388,6 +514,28 @@ static int iommu_map(struct protection_domain *dom,
388 return 0; 514 return 0;
389} 515}
390 516
517static void iommu_unmap_page(struct protection_domain *dom,
518 unsigned long bus_addr)
519{
520 u64 *pte;
521
522 pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)];
523
524 if (!IOMMU_PTE_PRESENT(*pte))
525 return;
526
527 pte = IOMMU_PTE_PAGE(*pte);
528 pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
529
530 if (!IOMMU_PTE_PRESENT(*pte))
531 return;
532
533 pte = IOMMU_PTE_PAGE(*pte);
534 pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
535
536 *pte = 0;
537}
538
391/* 539/*
392 * This function checks if a specific unity mapping entry is needed for 540 * This function checks if a specific unity mapping entry is needed for
393 * this specific IOMMU. 541 * this specific IOMMU.
@@ -440,7 +588,7 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
440 588
441 for (addr = e->address_start; addr < e->address_end; 589 for (addr = e->address_start; addr < e->address_end;
442 addr += PAGE_SIZE) { 590 addr += PAGE_SIZE) {
443 ret = iommu_map(&dma_dom->domain, addr, addr, e->prot); 591 ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot);
444 if (ret) 592 if (ret)
445 return ret; 593 return ret;
446 /* 594 /*
@@ -571,6 +719,16 @@ static u16 domain_id_alloc(void)
571 return id; 719 return id;
572} 720}
573 721
722static void domain_id_free(int id)
723{
724 unsigned long flags;
725
726 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
727 if (id > 0 && id < MAX_DOMAIN_ID)
728 __clear_bit(id, amd_iommu_pd_alloc_bitmap);
729 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
730}
731
574/* 732/*
575 * Used to reserve address ranges in the aperture (e.g. for exclusion 733 * Used to reserve address ranges in the aperture (e.g. for exclusion
576 * ranges. 734 * ranges.
@@ -587,12 +745,12 @@ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
587 iommu_area_reserve(dom->bitmap, start_page, pages); 745 iommu_area_reserve(dom->bitmap, start_page, pages);
588} 746}
589 747
590static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom) 748static void free_pagetable(struct protection_domain *domain)
591{ 749{
592 int i, j; 750 int i, j;
593 u64 *p1, *p2, *p3; 751 u64 *p1, *p2, *p3;
594 752
595 p1 = dma_dom->domain.pt_root; 753 p1 = domain->pt_root;
596 754
597 if (!p1) 755 if (!p1)
598 return; 756 return;
@@ -613,6 +771,8 @@ static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
613 } 771 }
614 772
615 free_page((unsigned long)p1); 773 free_page((unsigned long)p1);
774
775 domain->pt_root = NULL;
616} 776}
617 777
618/* 778/*
@@ -624,7 +784,7 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
624 if (!dom) 784 if (!dom)
625 return; 785 return;
626 786
627 dma_ops_free_pagetable(dom); 787 free_pagetable(&dom->domain);
628 788
629 kfree(dom->pte_pages); 789 kfree(dom->pte_pages);
630 790
@@ -663,6 +823,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
663 goto free_dma_dom; 823 goto free_dma_dom;
664 dma_dom->domain.mode = PAGE_MODE_3_LEVEL; 824 dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
665 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); 825 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
826 dma_dom->domain.flags = PD_DMA_OPS_MASK;
666 dma_dom->domain.priv = dma_dom; 827 dma_dom->domain.priv = dma_dom;
667 if (!dma_dom->domain.pt_root) 828 if (!dma_dom->domain.pt_root)
668 goto free_dma_dom; 829 goto free_dma_dom;
@@ -725,6 +886,15 @@ free_dma_dom:
725} 886}
726 887
727/* 888/*
889 * little helper function to check whether a given protection domain is a
890 * dma_ops domain
891 */
892static bool dma_ops_domain(struct protection_domain *domain)
893{
894 return domain->flags & PD_DMA_OPS_MASK;
895}
896
897/*
728 * Find out the protection domain structure for a given PCI device. This 898 * Find out the protection domain structure for a given PCI device. This
729 * will give us the pointer to the page table root for example. 899 * will give us the pointer to the page table root for example.
730 */ 900 */
@@ -744,14 +914,15 @@ static struct protection_domain *domain_for_device(u16 devid)
744 * If a device is not yet associated with a domain, this function does 914 * If a device is not yet associated with a domain, this function does
745 * assigns it visible for the hardware 915 * assigns it visible for the hardware
746 */ 916 */
747static void set_device_domain(struct amd_iommu *iommu, 917static void attach_device(struct amd_iommu *iommu,
748 struct protection_domain *domain, 918 struct protection_domain *domain,
749 u16 devid) 919 u16 devid)
750{ 920{
751 unsigned long flags; 921 unsigned long flags;
752
753 u64 pte_root = virt_to_phys(domain->pt_root); 922 u64 pte_root = virt_to_phys(domain->pt_root);
754 923
924 domain->dev_cnt += 1;
925
755 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) 926 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
756 << DEV_ENTRY_MODE_SHIFT; 927 << DEV_ENTRY_MODE_SHIFT;
757 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; 928 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
@@ -767,6 +938,116 @@ static void set_device_domain(struct amd_iommu *iommu,
767 iommu_queue_inv_dev_entry(iommu, devid); 938 iommu_queue_inv_dev_entry(iommu, devid);
768} 939}
769 940
941/*
942 * Removes a device from a protection domain (unlocked)
943 */
944static void __detach_device(struct protection_domain *domain, u16 devid)
945{
946
947 /* lock domain */
948 spin_lock(&domain->lock);
949
950 /* remove domain from the lookup table */
951 amd_iommu_pd_table[devid] = NULL;
952
953 /* remove entry from the device table seen by the hardware */
954 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
955 amd_iommu_dev_table[devid].data[1] = 0;
956 amd_iommu_dev_table[devid].data[2] = 0;
957
958 /* decrease reference counter */
959 domain->dev_cnt -= 1;
960
961 /* ready */
962 spin_unlock(&domain->lock);
963}
964
965/*
966 * Removes a device from a protection domain (with devtable_lock held)
967 */
968static void detach_device(struct protection_domain *domain, u16 devid)
969{
970 unsigned long flags;
971
972 /* lock device table */
973 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
974 __detach_device(domain, devid);
975 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
976}
977
978static int device_change_notifier(struct notifier_block *nb,
979 unsigned long action, void *data)
980{
981 struct device *dev = data;
982 struct pci_dev *pdev = to_pci_dev(dev);
983 u16 devid = calc_devid(pdev->bus->number, pdev->devfn);
984 struct protection_domain *domain;
985 struct dma_ops_domain *dma_domain;
986 struct amd_iommu *iommu;
987 int order = amd_iommu_aperture_order;
988 unsigned long flags;
989
990 if (devid > amd_iommu_last_bdf)
991 goto out;
992
993 devid = amd_iommu_alias_table[devid];
994
995 iommu = amd_iommu_rlookup_table[devid];
996 if (iommu == NULL)
997 goto out;
998
999 domain = domain_for_device(devid);
1000
1001 if (domain && !dma_ops_domain(domain))
1002 WARN_ONCE(1, "AMD IOMMU WARNING: device %s already bound "
1003 "to a non-dma-ops domain\n", dev_name(dev));
1004
1005 switch (action) {
1006 case BUS_NOTIFY_BOUND_DRIVER:
1007 if (domain)
1008 goto out;
1009 dma_domain = find_protection_domain(devid);
1010 if (!dma_domain)
1011 dma_domain = iommu->default_dom;
1012 attach_device(iommu, &dma_domain->domain, devid);
1013 printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
1014 "device %s\n", dma_domain->domain.id, dev_name(dev));
1015 break;
1016 case BUS_NOTIFY_UNBIND_DRIVER:
1017 if (!domain)
1018 goto out;
1019 detach_device(domain, devid);
1020 break;
1021 case BUS_NOTIFY_ADD_DEVICE:
1022 /* allocate a protection domain if a device is added */
1023 dma_domain = find_protection_domain(devid);
1024 if (dma_domain)
1025 goto out;
1026 dma_domain = dma_ops_domain_alloc(iommu, order);
1027 if (!dma_domain)
1028 goto out;
1029 dma_domain->target_dev = devid;
1030
1031 spin_lock_irqsave(&iommu_pd_list_lock, flags);
1032 list_add_tail(&dma_domain->list, &iommu_pd_list);
1033 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
1034
1035 break;
1036 default:
1037 goto out;
1038 }
1039
1040 iommu_queue_inv_dev_entry(iommu, devid);
1041 iommu_completion_wait(iommu);
1042
1043out:
1044 return 0;
1045}
1046
1047struct notifier_block device_nb = {
1048 .notifier_call = device_change_notifier,
1049};
1050
770/***************************************************************************** 1051/*****************************************************************************
771 * 1052 *
772 * The next functions belong to the dma_ops mapping/unmapping code. 1053 * The next functions belong to the dma_ops mapping/unmapping code.
@@ -802,7 +1083,6 @@ static struct dma_ops_domain *find_protection_domain(u16 devid)
802 list_for_each_entry(entry, &iommu_pd_list, list) { 1083 list_for_each_entry(entry, &iommu_pd_list, list) {
803 if (entry->target_dev == devid) { 1084 if (entry->target_dev == devid) {
804 ret = entry; 1085 ret = entry;
805 list_del(&ret->list);
806 break; 1086 break;
807 } 1087 }
808 } 1088 }
@@ -853,14 +1133,13 @@ static int get_device_resources(struct device *dev,
853 if (!dma_dom) 1133 if (!dma_dom)
854 dma_dom = (*iommu)->default_dom; 1134 dma_dom = (*iommu)->default_dom;
855 *domain = &dma_dom->domain; 1135 *domain = &dma_dom->domain;
856 set_device_domain(*iommu, *domain, *bdf); 1136 attach_device(*iommu, *domain, *bdf);
857 printk(KERN_INFO "AMD IOMMU: Using protection domain %d for " 1137 printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
858 "device ", (*domain)->id); 1138 "device %s\n", (*domain)->id, dev_name(dev));
859 print_devid(_bdf, 1);
860 } 1139 }
861 1140
862 if (domain_for_device(_bdf) == NULL) 1141 if (domain_for_device(_bdf) == NULL)
863 set_device_domain(*iommu, *domain, _bdf); 1142 attach_device(*iommu, *domain, _bdf);
864 1143
865 return 1; 1144 return 1;
866} 1145}
@@ -946,6 +1225,11 @@ static dma_addr_t __map_single(struct device *dev,
946 pages = iommu_num_pages(paddr, size, PAGE_SIZE); 1225 pages = iommu_num_pages(paddr, size, PAGE_SIZE);
947 paddr &= PAGE_MASK; 1226 paddr &= PAGE_MASK;
948 1227
1228 INC_STATS_COUNTER(total_map_requests);
1229
1230 if (pages > 1)
1231 INC_STATS_COUNTER(cross_page);
1232
949 if (align) 1233 if (align)
950 align_mask = (1UL << get_order(size)) - 1; 1234 align_mask = (1UL << get_order(size)) - 1;
951 1235
@@ -962,6 +1246,8 @@ static dma_addr_t __map_single(struct device *dev,
962 } 1246 }
963 address += offset; 1247 address += offset;
964 1248
1249 ADD_STATS_COUNTER(alloced_io_mem, size);
1250
965 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { 1251 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
966 iommu_flush_tlb(iommu, dma_dom->domain.id); 1252 iommu_flush_tlb(iommu, dma_dom->domain.id);
967 dma_dom->need_flush = false; 1253 dma_dom->need_flush = false;
@@ -998,6 +1284,8 @@ static void __unmap_single(struct amd_iommu *iommu,
998 start += PAGE_SIZE; 1284 start += PAGE_SIZE;
999 } 1285 }
1000 1286
1287 SUB_STATS_COUNTER(alloced_io_mem, size);
1288
1001 dma_ops_free_addresses(dma_dom, dma_addr, pages); 1289 dma_ops_free_addresses(dma_dom, dma_addr, pages);
1002 1290
1003 if (amd_iommu_unmap_flush || dma_dom->need_flush) { 1291 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
@@ -1019,6 +1307,8 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
1019 dma_addr_t addr; 1307 dma_addr_t addr;
1020 u64 dma_mask; 1308 u64 dma_mask;
1021 1309
1310 INC_STATS_COUNTER(cnt_map_single);
1311
1022 if (!check_device(dev)) 1312 if (!check_device(dev))
1023 return bad_dma_address; 1313 return bad_dma_address;
1024 1314
@@ -1030,6 +1320,9 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
1030 /* device not handled by any AMD IOMMU */ 1320 /* device not handled by any AMD IOMMU */
1031 return (dma_addr_t)paddr; 1321 return (dma_addr_t)paddr;
1032 1322
1323 if (!dma_ops_domain(domain))
1324 return bad_dma_address;
1325
1033 spin_lock_irqsave(&domain->lock, flags); 1326 spin_lock_irqsave(&domain->lock, flags);
1034 addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false, 1327 addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
1035 dma_mask); 1328 dma_mask);
@@ -1055,11 +1348,16 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
1055 struct protection_domain *domain; 1348 struct protection_domain *domain;
1056 u16 devid; 1349 u16 devid;
1057 1350
1351 INC_STATS_COUNTER(cnt_unmap_single);
1352
1058 if (!check_device(dev) || 1353 if (!check_device(dev) ||
1059 !get_device_resources(dev, &iommu, &domain, &devid)) 1354 !get_device_resources(dev, &iommu, &domain, &devid))
1060 /* device not handled by any AMD IOMMU */ 1355 /* device not handled by any AMD IOMMU */
1061 return; 1356 return;
1062 1357
1358 if (!dma_ops_domain(domain))
1359 return;
1360
1063 spin_lock_irqsave(&domain->lock, flags); 1361 spin_lock_irqsave(&domain->lock, flags);
1064 1362
1065 __unmap_single(iommu, domain->priv, dma_addr, size, dir); 1363 __unmap_single(iommu, domain->priv, dma_addr, size, dir);
@@ -1104,6 +1402,8 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1104 int mapped_elems = 0; 1402 int mapped_elems = 0;
1105 u64 dma_mask; 1403 u64 dma_mask;
1106 1404
1405 INC_STATS_COUNTER(cnt_map_sg);
1406
1107 if (!check_device(dev)) 1407 if (!check_device(dev))
1108 return 0; 1408 return 0;
1109 1409
@@ -1114,6 +1414,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1114 if (!iommu || !domain) 1414 if (!iommu || !domain)
1115 return map_sg_no_iommu(dev, sglist, nelems, dir); 1415 return map_sg_no_iommu(dev, sglist, nelems, dir);
1116 1416
1417 if (!dma_ops_domain(domain))
1418 return 0;
1419
1117 spin_lock_irqsave(&domain->lock, flags); 1420 spin_lock_irqsave(&domain->lock, flags);
1118 1421
1119 for_each_sg(sglist, s, nelems, i) { 1422 for_each_sg(sglist, s, nelems, i) {
@@ -1163,10 +1466,15 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
1163 u16 devid; 1466 u16 devid;
1164 int i; 1467 int i;
1165 1468
1469 INC_STATS_COUNTER(cnt_unmap_sg);
1470
1166 if (!check_device(dev) || 1471 if (!check_device(dev) ||
1167 !get_device_resources(dev, &iommu, &domain, &devid)) 1472 !get_device_resources(dev, &iommu, &domain, &devid))
1168 return; 1473 return;
1169 1474
1475 if (!dma_ops_domain(domain))
1476 return;
1477
1170 spin_lock_irqsave(&domain->lock, flags); 1478 spin_lock_irqsave(&domain->lock, flags);
1171 1479
1172 for_each_sg(sglist, s, nelems, i) { 1480 for_each_sg(sglist, s, nelems, i) {
@@ -1194,6 +1502,8 @@ static void *alloc_coherent(struct device *dev, size_t size,
1194 phys_addr_t paddr; 1502 phys_addr_t paddr;
1195 u64 dma_mask = dev->coherent_dma_mask; 1503 u64 dma_mask = dev->coherent_dma_mask;
1196 1504
1505 INC_STATS_COUNTER(cnt_alloc_coherent);
1506
1197 if (!check_device(dev)) 1507 if (!check_device(dev))
1198 return NULL; 1508 return NULL;
1199 1509
@@ -1212,6 +1522,9 @@ static void *alloc_coherent(struct device *dev, size_t size,
1212 return virt_addr; 1522 return virt_addr;
1213 } 1523 }
1214 1524
1525 if (!dma_ops_domain(domain))
1526 goto out_free;
1527
1215 if (!dma_mask) 1528 if (!dma_mask)
1216 dma_mask = *dev->dma_mask; 1529 dma_mask = *dev->dma_mask;
1217 1530
@@ -1220,18 +1533,20 @@ static void *alloc_coherent(struct device *dev, size_t size,
1220 *dma_addr = __map_single(dev, iommu, domain->priv, paddr, 1533 *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
1221 size, DMA_BIDIRECTIONAL, true, dma_mask); 1534 size, DMA_BIDIRECTIONAL, true, dma_mask);
1222 1535
1223 if (*dma_addr == bad_dma_address) { 1536 if (*dma_addr == bad_dma_address)
1224 free_pages((unsigned long)virt_addr, get_order(size)); 1537 goto out_free;
1225 virt_addr = NULL;
1226 goto out;
1227 }
1228 1538
1229 iommu_completion_wait(iommu); 1539 iommu_completion_wait(iommu);
1230 1540
1231out:
1232 spin_unlock_irqrestore(&domain->lock, flags); 1541 spin_unlock_irqrestore(&domain->lock, flags);
1233 1542
1234 return virt_addr; 1543 return virt_addr;
1544
1545out_free:
1546
1547 free_pages((unsigned long)virt_addr, get_order(size));
1548
1549 return NULL;
1235} 1550}
1236 1551
1237/* 1552/*
@@ -1245,6 +1560,8 @@ static void free_coherent(struct device *dev, size_t size,
1245 struct protection_domain *domain; 1560 struct protection_domain *domain;
1246 u16 devid; 1561 u16 devid;
1247 1562
1563 INC_STATS_COUNTER(cnt_free_coherent);
1564
1248 if (!check_device(dev)) 1565 if (!check_device(dev))
1249 return; 1566 return;
1250 1567
@@ -1253,6 +1570,9 @@ static void free_coherent(struct device *dev, size_t size,
1253 if (!iommu || !domain) 1570 if (!iommu || !domain)
1254 goto free_mem; 1571 goto free_mem;
1255 1572
1573 if (!dma_ops_domain(domain))
1574 goto free_mem;
1575
1256 spin_lock_irqsave(&domain->lock, flags); 1576 spin_lock_irqsave(&domain->lock, flags);
1257 1577
1258 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); 1578 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
@@ -1305,7 +1625,7 @@ static void prealloc_protection_domains(void)
1305 u16 devid; 1625 u16 devid;
1306 1626
1307 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 1627 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1308 devid = (dev->bus->number << 8) | dev->devfn; 1628 devid = calc_devid(dev->bus->number, dev->devfn);
1309 if (devid > amd_iommu_last_bdf) 1629 if (devid > amd_iommu_last_bdf)
1310 continue; 1630 continue;
1311 devid = amd_iommu_alias_table[devid]; 1631 devid = amd_iommu_alias_table[devid];
@@ -1352,6 +1672,7 @@ int __init amd_iommu_init_dma_ops(void)
1352 iommu->default_dom = dma_ops_domain_alloc(iommu, order); 1672 iommu->default_dom = dma_ops_domain_alloc(iommu, order);
1353 if (iommu->default_dom == NULL) 1673 if (iommu->default_dom == NULL)
1354 return -ENOMEM; 1674 return -ENOMEM;
1675 iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
1355 ret = iommu_init_unity_mappings(iommu); 1676 ret = iommu_init_unity_mappings(iommu);
1356 if (ret) 1677 if (ret)
1357 goto free_domains; 1678 goto free_domains;
@@ -1375,6 +1696,12 @@ int __init amd_iommu_init_dma_ops(void)
1375 /* Make the driver finally visible to the drivers */ 1696 /* Make the driver finally visible to the drivers */
1376 dma_ops = &amd_iommu_dma_ops; 1697 dma_ops = &amd_iommu_dma_ops;
1377 1698
1699 register_iommu(&amd_iommu_ops);
1700
1701 bus_register_notifier(&pci_bus_type, &device_nb);
1702
1703 amd_iommu_stats_init();
1704
1378 return 0; 1705 return 0;
1379 1706
1380free_domains: 1707free_domains:
@@ -1386,3 +1713,224 @@ free_domains:
1386 1713
1387 return ret; 1714 return ret;
1388} 1715}
1716
1717/*****************************************************************************
1718 *
1719 * The following functions belong to the exported interface of AMD IOMMU
1720 *
1721 * This interface allows access to lower level functions of the IOMMU
1722 * like protection domain handling and assignement of devices to domains
1723 * which is not possible with the dma_ops interface.
1724 *
1725 *****************************************************************************/
1726
1727static void cleanup_domain(struct protection_domain *domain)
1728{
1729 unsigned long flags;
1730 u16 devid;
1731
1732 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1733
1734 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
1735 if (amd_iommu_pd_table[devid] == domain)
1736 __detach_device(domain, devid);
1737
1738 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1739}
1740
1741static int amd_iommu_domain_init(struct iommu_domain *dom)
1742{
1743 struct protection_domain *domain;
1744
1745 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
1746 if (!domain)
1747 return -ENOMEM;
1748
1749 spin_lock_init(&domain->lock);
1750 domain->mode = PAGE_MODE_3_LEVEL;
1751 domain->id = domain_id_alloc();
1752 if (!domain->id)
1753 goto out_free;
1754 domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
1755 if (!domain->pt_root)
1756 goto out_free;
1757
1758 dom->priv = domain;
1759
1760 return 0;
1761
1762out_free:
1763 kfree(domain);
1764
1765 return -ENOMEM;
1766}
1767
1768static void amd_iommu_domain_destroy(struct iommu_domain *dom)
1769{
1770 struct protection_domain *domain = dom->priv;
1771
1772 if (!domain)
1773 return;
1774
1775 if (domain->dev_cnt > 0)
1776 cleanup_domain(domain);
1777
1778 BUG_ON(domain->dev_cnt != 0);
1779
1780 free_pagetable(domain);
1781
1782 domain_id_free(domain->id);
1783
1784 kfree(domain);
1785
1786 dom->priv = NULL;
1787}
1788
1789static void amd_iommu_detach_device(struct iommu_domain *dom,
1790 struct device *dev)
1791{
1792 struct protection_domain *domain = dom->priv;
1793 struct amd_iommu *iommu;
1794 struct pci_dev *pdev;
1795 u16 devid;
1796
1797 if (dev->bus != &pci_bus_type)
1798 return;
1799
1800 pdev = to_pci_dev(dev);
1801
1802 devid = calc_devid(pdev->bus->number, pdev->devfn);
1803
1804 if (devid > 0)
1805 detach_device(domain, devid);
1806
1807 iommu = amd_iommu_rlookup_table[devid];
1808 if (!iommu)
1809 return;
1810
1811 iommu_queue_inv_dev_entry(iommu, devid);
1812 iommu_completion_wait(iommu);
1813}
1814
1815static int amd_iommu_attach_device(struct iommu_domain *dom,
1816 struct device *dev)
1817{
1818 struct protection_domain *domain = dom->priv;
1819 struct protection_domain *old_domain;
1820 struct amd_iommu *iommu;
1821 struct pci_dev *pdev;
1822 u16 devid;
1823
1824 if (dev->bus != &pci_bus_type)
1825 return -EINVAL;
1826
1827 pdev = to_pci_dev(dev);
1828
1829 devid = calc_devid(pdev->bus->number, pdev->devfn);
1830
1831 if (devid >= amd_iommu_last_bdf ||
1832 devid != amd_iommu_alias_table[devid])
1833 return -EINVAL;
1834
1835 iommu = amd_iommu_rlookup_table[devid];
1836 if (!iommu)
1837 return -EINVAL;
1838
1839 old_domain = domain_for_device(devid);
1840 if (old_domain)
1841 return -EBUSY;
1842
1843 attach_device(iommu, domain, devid);
1844
1845 iommu_completion_wait(iommu);
1846
1847 return 0;
1848}
1849
1850static int amd_iommu_map_range(struct iommu_domain *dom,
1851 unsigned long iova, phys_addr_t paddr,
1852 size_t size, int iommu_prot)
1853{
1854 struct protection_domain *domain = dom->priv;
1855 unsigned long i, npages = iommu_num_pages(paddr, size, PAGE_SIZE);
1856 int prot = 0;
1857 int ret;
1858
1859 if (iommu_prot & IOMMU_READ)
1860 prot |= IOMMU_PROT_IR;
1861 if (iommu_prot & IOMMU_WRITE)
1862 prot |= IOMMU_PROT_IW;
1863
1864 iova &= PAGE_MASK;
1865 paddr &= PAGE_MASK;
1866
1867 for (i = 0; i < npages; ++i) {
1868 ret = iommu_map_page(domain, iova, paddr, prot);
1869 if (ret)
1870 return ret;
1871
1872 iova += PAGE_SIZE;
1873 paddr += PAGE_SIZE;
1874 }
1875
1876 return 0;
1877}
1878
1879static void amd_iommu_unmap_range(struct iommu_domain *dom,
1880 unsigned long iova, size_t size)
1881{
1882
1883 struct protection_domain *domain = dom->priv;
1884 unsigned long i, npages = iommu_num_pages(iova, size, PAGE_SIZE);
1885
1886 iova &= PAGE_MASK;
1887
1888 for (i = 0; i < npages; ++i) {
1889 iommu_unmap_page(domain, iova);
1890 iova += PAGE_SIZE;
1891 }
1892
1893 iommu_flush_domain(domain->id);
1894}
1895
1896static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
1897 unsigned long iova)
1898{
1899 struct protection_domain *domain = dom->priv;
1900 unsigned long offset = iova & ~PAGE_MASK;
1901 phys_addr_t paddr;
1902 u64 *pte;
1903
1904 pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(iova)];
1905
1906 if (!IOMMU_PTE_PRESENT(*pte))
1907 return 0;
1908
1909 pte = IOMMU_PTE_PAGE(*pte);
1910 pte = &pte[IOMMU_PTE_L1_INDEX(iova)];
1911
1912 if (!IOMMU_PTE_PRESENT(*pte))
1913 return 0;
1914
1915 pte = IOMMU_PTE_PAGE(*pte);
1916 pte = &pte[IOMMU_PTE_L0_INDEX(iova)];
1917
1918 if (!IOMMU_PTE_PRESENT(*pte))
1919 return 0;
1920
1921 paddr = *pte & IOMMU_PAGE_MASK;
1922 paddr |= offset;
1923
1924 return paddr;
1925}
1926
1927static struct iommu_ops amd_iommu_ops = {
1928 .domain_init = amd_iommu_domain_init,
1929 .domain_destroy = amd_iommu_domain_destroy,
1930 .attach_dev = amd_iommu_attach_device,
1931 .detach_dev = amd_iommu_detach_device,
1932 .map = amd_iommu_map_range,
1933 .unmap = amd_iommu_unmap_range,
1934 .iova_to_phys = amd_iommu_iova_to_phys,
1935};
1936
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index fb85e8d466cc..42c33cebf00f 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -122,7 +122,8 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
122LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings 122LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
123 we find in ACPI */ 123 we find in ACPI */
124unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */ 124unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */
125int amd_iommu_isolate = 1; /* if 1, device isolation is enabled */ 125bool amd_iommu_isolate = true; /* if true, device isolation is
126 enabled */
126bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ 127bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
127 128
128LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the 129LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
@@ -245,12 +246,8 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
245/* Function to enable the hardware */ 246/* Function to enable the hardware */
246static void __init iommu_enable(struct amd_iommu *iommu) 247static void __init iommu_enable(struct amd_iommu *iommu)
247{ 248{
248 printk(KERN_INFO "AMD IOMMU: Enabling IOMMU " 249 printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at %s cap 0x%hx\n",
249 "at %02x:%02x.%x cap 0x%hx\n", 250 dev_name(&iommu->dev->dev), iommu->cap_ptr);
250 iommu->dev->bus->number,
251 PCI_SLOT(iommu->dev->devfn),
252 PCI_FUNC(iommu->dev->devfn),
253 iommu->cap_ptr);
254 251
255 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 252 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
256} 253}
@@ -1218,9 +1215,9 @@ static int __init parse_amd_iommu_options(char *str)
1218{ 1215{
1219 for (; *str; ++str) { 1216 for (; *str; ++str) {
1220 if (strncmp(str, "isolate", 7) == 0) 1217 if (strncmp(str, "isolate", 7) == 0)
1221 amd_iommu_isolate = 1; 1218 amd_iommu_isolate = true;
1222 if (strncmp(str, "share", 5) == 0) 1219 if (strncmp(str, "share", 5) == 0)
1223 amd_iommu_isolate = 0; 1220 amd_iommu_isolate = false;
1224 if (strncmp(str, "fullflush", 9) == 0) 1221 if (strncmp(str, "fullflush", 9) == 0)
1225 amd_iommu_unmap_flush = true; 1222 amd_iommu_unmap_flush = true;
1226 } 1223 }
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index d652515e2855..566a08466b19 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -31,9 +31,11 @@
31#include <linux/dmi.h> 31#include <linux/dmi.h>
32#include <linux/dmar.h> 32#include <linux/dmar.h>
33#include <linux/ftrace.h> 33#include <linux/ftrace.h>
34#include <linux/smp.h>
35#include <linux/nmi.h>
36#include <linux/timex.h>
34 37
35#include <asm/atomic.h> 38#include <asm/atomic.h>
36#include <asm/smp.h>
37#include <asm/mtrr.h> 39#include <asm/mtrr.h>
38#include <asm/mpspec.h> 40#include <asm/mpspec.h>
39#include <asm/desc.h> 41#include <asm/desc.h>
@@ -41,10 +43,8 @@
41#include <asm/hpet.h> 43#include <asm/hpet.h>
42#include <asm/pgalloc.h> 44#include <asm/pgalloc.h>
43#include <asm/i8253.h> 45#include <asm/i8253.h>
44#include <asm/nmi.h>
45#include <asm/idle.h> 46#include <asm/idle.h>
46#include <asm/proto.h> 47#include <asm/proto.h>
47#include <asm/timex.h>
48#include <asm/apic.h> 48#include <asm/apic.h>
49#include <asm/i8259.h> 49#include <asm/i8259.h>
50 50
@@ -140,7 +140,7 @@ static int lapic_next_event(unsigned long delta,
140 struct clock_event_device *evt); 140 struct clock_event_device *evt);
141static void lapic_timer_setup(enum clock_event_mode mode, 141static void lapic_timer_setup(enum clock_event_mode mode,
142 struct clock_event_device *evt); 142 struct clock_event_device *evt);
143static void lapic_timer_broadcast(const cpumask_t *mask); 143static void lapic_timer_broadcast(const struct cpumask *mask);
144static void apic_pm_activate(void); 144static void apic_pm_activate(void);
145 145
146/* 146/*
@@ -453,7 +453,7 @@ static void lapic_timer_setup(enum clock_event_mode mode,
453/* 453/*
454 * Local APIC timer broadcast function 454 * Local APIC timer broadcast function
455 */ 455 */
456static void lapic_timer_broadcast(const cpumask_t *mask) 456static void lapic_timer_broadcast(const struct cpumask *mask)
457{ 457{
458#ifdef CONFIG_SMP 458#ifdef CONFIG_SMP
459 send_IPI_mask(mask, LOCAL_TIMER_VECTOR); 459 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
@@ -687,7 +687,7 @@ static int __init calibrate_APIC_clock(void)
687 local_irq_enable(); 687 local_irq_enable();
688 688
689 if (levt->features & CLOCK_EVT_FEAT_DUMMY) { 689 if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
690 pr_warning("APIC timer disabled due to verification failure.\n"); 690 pr_warning("APIC timer disabled due to verification failure\n");
691 return -1; 691 return -1;
692 } 692 }
693 693
@@ -2087,14 +2087,12 @@ __cpuinit int apic_is_clustered_box(void)
2087 /* are we being called early in kernel startup? */ 2087 /* are we being called early in kernel startup? */
2088 if (bios_cpu_apicid) { 2088 if (bios_cpu_apicid) {
2089 id = bios_cpu_apicid[i]; 2089 id = bios_cpu_apicid[i];
2090 } 2090 } else if (i < nr_cpu_ids) {
2091 else if (i < nr_cpu_ids) {
2092 if (cpu_present(i)) 2091 if (cpu_present(i))
2093 id = per_cpu(x86_bios_cpu_apicid, i); 2092 id = per_cpu(x86_bios_cpu_apicid, i);
2094 else 2093 else
2095 continue; 2094 continue;
2096 } 2095 } else
2097 else
2098 break; 2096 break;
2099 2097
2100 if (id != BAD_APICID) 2098 if (id != BAD_APICID)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 42e0853030cb..3f95a40f718a 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -355,7 +355,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
355 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 355 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
356 } else if (smp_num_siblings > 1) { 356 } else if (smp_num_siblings > 1) {
357 357
358 if (smp_num_siblings > NR_CPUS) { 358 if (smp_num_siblings > nr_cpu_ids) {
359 printk(KERN_WARNING "CPU: Unsupported number of siblings %d", 359 printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
360 smp_num_siblings); 360 smp_num_siblings);
361 smp_num_siblings = 1; 361 smp_num_siblings = 1;
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 88ea02dcb622..28102ad1a363 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -517,6 +517,17 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
517 } 517 }
518} 518}
519 519
520static void free_acpi_perf_data(void)
521{
522 unsigned int i;
523
524 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
525 for_each_possible_cpu(i)
526 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
527 ->shared_cpu_map);
528 free_percpu(acpi_perf_data);
529}
530
520/* 531/*
521 * acpi_cpufreq_early_init - initialize ACPI P-States library 532 * acpi_cpufreq_early_init - initialize ACPI P-States library
522 * 533 *
@@ -527,6 +538,7 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
527 */ 538 */
528static int __init acpi_cpufreq_early_init(void) 539static int __init acpi_cpufreq_early_init(void)
529{ 540{
541 unsigned int i;
530 dprintk("acpi_cpufreq_early_init\n"); 542 dprintk("acpi_cpufreq_early_init\n");
531 543
532 acpi_perf_data = alloc_percpu(struct acpi_processor_performance); 544 acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
@@ -534,6 +546,16 @@ static int __init acpi_cpufreq_early_init(void)
534 dprintk("Memory allocation error for acpi_perf_data.\n"); 546 dprintk("Memory allocation error for acpi_perf_data.\n");
535 return -ENOMEM; 547 return -ENOMEM;
536 } 548 }
549 for_each_possible_cpu(i) {
550 if (!alloc_cpumask_var_node(
551 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
552 GFP_KERNEL, cpu_to_node(i))) {
553
554 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
555 free_acpi_perf_data();
556 return -ENOMEM;
557 }
558 }
537 559
538 /* Do initialization in ACPI core */ 560 /* Do initialization in ACPI core */
539 acpi_processor_preregister_performance(acpi_perf_data); 561 acpi_processor_preregister_performance(acpi_perf_data);
@@ -604,9 +626,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
604 */ 626 */
605 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || 627 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
606 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { 628 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
607 policy->cpus = perf->shared_cpu_map; 629 cpumask_copy(&policy->cpus, perf->shared_cpu_map);
608 } 630 }
609 policy->related_cpus = perf->shared_cpu_map; 631 cpumask_copy(&policy->related_cpus, perf->shared_cpu_map);
610 632
611#ifdef CONFIG_SMP 633#ifdef CONFIG_SMP
612 dmi_check_system(sw_any_bug_dmi_table); 634 dmi_check_system(sw_any_bug_dmi_table);
@@ -795,7 +817,7 @@ static int __init acpi_cpufreq_init(void)
795 817
796 ret = cpufreq_register_driver(&acpi_cpufreq_driver); 818 ret = cpufreq_register_driver(&acpi_cpufreq_driver);
797 if (ret) 819 if (ret)
798 free_percpu(acpi_perf_data); 820 free_acpi_perf_data();
799 821
800 return ret; 822 return ret;
801} 823}
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index b8e05ee4f736..beea4466b063 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -160,6 +160,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
160 switch (c->x86_model) { 160 switch (c->x86_model) {
161 case 0x0E: /* Core */ 161 case 0x0E: /* Core */
162 case 0x0F: /* Core Duo */ 162 case 0x0F: /* Core Duo */
163 case 0x16: /* Celeron Core */
163 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; 164 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
164 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PCORE); 165 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PCORE);
165 case 0x0D: /* Pentium M (Dothan) */ 166 case 0x0D: /* Pentium M (Dothan) */
@@ -171,7 +172,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
171 } 172 }
172 173
173 if (c->x86 != 0xF) { 174 if (c->x86 != 0xF) {
174 printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <cpufreq@vger.kernel.org>\n"); 175 if (!cpu_has(c, X86_FEATURE_EST))
176 printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. "
177 "Please send an e-mail to <cpufreq@vger.kernel.org>\n");
175 return 0; 178 return 0;
176 } 179 }
177 180
@@ -274,6 +277,7 @@ static struct cpufreq_driver p4clockmod_driver = {
274 .name = "p4-clockmod", 277 .name = "p4-clockmod",
275 .owner = THIS_MODULE, 278 .owner = THIS_MODULE,
276 .attr = p4clockmod_attr, 279 .attr = p4clockmod_attr,
280 .hide_interface = 1,
277}; 281};
278 282
279 283
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
index 7c7d56b43136..1b446d79a8fd 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
@@ -310,6 +310,12 @@ static int powernow_acpi_init(void)
310 goto err0; 310 goto err0;
311 } 311 }
312 312
313 if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
314 GFP_KERNEL)) {
315 retval = -ENOMEM;
316 goto err05;
317 }
318
313 if (acpi_processor_register_performance(acpi_processor_perf, 0)) { 319 if (acpi_processor_register_performance(acpi_processor_perf, 0)) {
314 retval = -EIO; 320 retval = -EIO;
315 goto err1; 321 goto err1;
@@ -412,6 +418,8 @@ static int powernow_acpi_init(void)
412err2: 418err2:
413 acpi_processor_unregister_performance(acpi_processor_perf, 0); 419 acpi_processor_unregister_performance(acpi_processor_perf, 0);
414err1: 420err1:
421 free_cpumask_var(acpi_processor_perf->shared_cpu_map);
422err05:
415 kfree(acpi_processor_perf); 423 kfree(acpi_processor_perf);
416err0: 424err0:
417 printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n"); 425 printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n");
@@ -652,6 +660,7 @@ static int powernow_cpu_exit (struct cpufreq_policy *policy) {
652#ifdef CONFIG_X86_POWERNOW_K7_ACPI 660#ifdef CONFIG_X86_POWERNOW_K7_ACPI
653 if (acpi_processor_perf) { 661 if (acpi_processor_perf) {
654 acpi_processor_unregister_performance(acpi_processor_perf, 0); 662 acpi_processor_unregister_performance(acpi_processor_perf, 0);
663 free_cpumask_var(acpi_processor_perf->shared_cpu_map);
655 kfree(acpi_processor_perf); 664 kfree(acpi_processor_perf);
656 } 665 }
657#endif 666#endif
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 7f05f44b97e9..c3c9adbaa26f 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -766,7 +766,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned
766static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) 766static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
767{ 767{
768 struct cpufreq_frequency_table *powernow_table; 768 struct cpufreq_frequency_table *powernow_table;
769 int ret_val; 769 int ret_val = -ENODEV;
770 770
771 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { 771 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
772 dprintk("register performance failed: bad ACPI data\n"); 772 dprintk("register performance failed: bad ACPI data\n");
@@ -815,6 +815,13 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
815 /* notify BIOS that we exist */ 815 /* notify BIOS that we exist */
816 acpi_processor_notify_smm(THIS_MODULE); 816 acpi_processor_notify_smm(THIS_MODULE);
817 817
818 if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
819 printk(KERN_ERR PFX
820 "unable to alloc powernow_k8_data cpumask\n");
821 ret_val = -ENOMEM;
822 goto err_out_mem;
823 }
824
818 return 0; 825 return 0;
819 826
820err_out_mem: 827err_out_mem:
@@ -826,7 +833,7 @@ err_out:
826 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ 833 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
827 data->acpi_data.state_count = 0; 834 data->acpi_data.state_count = 0;
828 835
829 return -ENODEV; 836 return ret_val;
830} 837}
831 838
832static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) 839static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table)
@@ -929,6 +936,7 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
929{ 936{
930 if (data->acpi_data.state_count) 937 if (data->acpi_data.state_count)
931 acpi_processor_unregister_performance(&data->acpi_data, data->cpu); 938 acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
939 free_cpumask_var(data->acpi_data.shared_cpu_map);
932} 940}
933 941
934#else 942#else
@@ -1134,7 +1142,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1134 data->cpu = pol->cpu; 1142 data->cpu = pol->cpu;
1135 data->currpstate = HW_PSTATE_INVALID; 1143 data->currpstate = HW_PSTATE_INVALID;
1136 1144
1137 if (powernow_k8_cpu_init_acpi(data)) { 1145 rc = powernow_k8_cpu_init_acpi(data);
1146 if (rc) {
1138 /* 1147 /*
1139 * Use the PSB BIOS structure. This is only availabe on 1148 * Use the PSB BIOS structure. This is only availabe on
1140 * an UP version, and is deprecated by AMD. 1149 * an UP version, and is deprecated by AMD.
@@ -1152,20 +1161,17 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1152 "ACPI maintainers and complain to your BIOS " 1161 "ACPI maintainers and complain to your BIOS "
1153 "vendor.\n"); 1162 "vendor.\n");
1154#endif 1163#endif
1155 kfree(data); 1164 goto err_out;
1156 return -ENODEV;
1157 } 1165 }
1158 if (pol->cpu != 0) { 1166 if (pol->cpu != 0) {
1159 printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " 1167 printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for "
1160 "CPU other than CPU0. Complain to your BIOS " 1168 "CPU other than CPU0. Complain to your BIOS "
1161 "vendor.\n"); 1169 "vendor.\n");
1162 kfree(data); 1170 goto err_out;
1163 return -ENODEV;
1164 } 1171 }
1165 rc = find_psb_table(data); 1172 rc = find_psb_table(data);
1166 if (rc) { 1173 if (rc) {
1167 kfree(data); 1174 goto err_out;
1168 return -ENODEV;
1169 } 1175 }
1170 } 1176 }
1171 1177
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index 3b5f06423e77..f0ea6fa2f53c 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -459,9 +459,7 @@ static int centrino_verify (struct cpufreq_policy *policy)
459 * Sets a new CPUFreq policy. 459 * Sets a new CPUFreq policy.
460 */ 460 */
461struct allmasks { 461struct allmasks {
462 cpumask_t online_policy_cpus;
463 cpumask_t saved_mask; 462 cpumask_t saved_mask;
464 cpumask_t set_mask;
465 cpumask_t covered_cpus; 463 cpumask_t covered_cpus;
466}; 464};
467 465
@@ -475,9 +473,7 @@ static int centrino_target (struct cpufreq_policy *policy,
475 int retval = 0; 473 int retval = 0;
476 unsigned int j, k, first_cpu, tmp; 474 unsigned int j, k, first_cpu, tmp;
477 CPUMASK_ALLOC(allmasks); 475 CPUMASK_ALLOC(allmasks);
478 CPUMASK_PTR(online_policy_cpus, allmasks);
479 CPUMASK_PTR(saved_mask, allmasks); 476 CPUMASK_PTR(saved_mask, allmasks);
480 CPUMASK_PTR(set_mask, allmasks);
481 CPUMASK_PTR(covered_cpus, allmasks); 477 CPUMASK_PTR(covered_cpus, allmasks);
482 478
483 if (unlikely(allmasks == NULL)) 479 if (unlikely(allmasks == NULL))
@@ -497,30 +493,28 @@ static int centrino_target (struct cpufreq_policy *policy,
497 goto out; 493 goto out;
498 } 494 }
499 495
500#ifdef CONFIG_HOTPLUG_CPU
501 /* cpufreq holds the hotplug lock, so we are safe from here on */
502 cpus_and(*online_policy_cpus, cpu_online_map, policy->cpus);
503#else
504 *online_policy_cpus = policy->cpus;
505#endif
506
507 *saved_mask = current->cpus_allowed; 496 *saved_mask = current->cpus_allowed;
508 first_cpu = 1; 497 first_cpu = 1;
509 cpus_clear(*covered_cpus); 498 cpus_clear(*covered_cpus);
510 for_each_cpu_mask_nr(j, *online_policy_cpus) { 499 for_each_cpu_mask_nr(j, policy->cpus) {
500 const cpumask_t *mask;
501
502 /* cpufreq holds the hotplug lock, so we are safe here */
503 if (!cpu_online(j))
504 continue;
505
511 /* 506 /*
512 * Support for SMP systems. 507 * Support for SMP systems.
513 * Make sure we are running on CPU that wants to change freq 508 * Make sure we are running on CPU that wants to change freq
514 */ 509 */
515 cpus_clear(*set_mask);
516 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 510 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
517 cpus_or(*set_mask, *set_mask, *online_policy_cpus); 511 mask = &policy->cpus;
518 else 512 else
519 cpu_set(j, *set_mask); 513 mask = &cpumask_of_cpu(j);
520 514
521 set_cpus_allowed_ptr(current, set_mask); 515 set_cpus_allowed_ptr(current, mask);
522 preempt_disable(); 516 preempt_disable();
523 if (unlikely(!cpu_isset(smp_processor_id(), *set_mask))) { 517 if (unlikely(!cpu_isset(smp_processor_id(), *mask))) {
524 dprintk("couldn't limit to CPUs in this domain\n"); 518 dprintk("couldn't limit to CPUs in this domain\n");
525 retval = -EAGAIN; 519 retval = -EAGAIN;
526 if (first_cpu) { 520 if (first_cpu) {
@@ -548,7 +542,9 @@ static int centrino_target (struct cpufreq_policy *policy,
548 dprintk("target=%dkHz old=%d new=%d msr=%04x\n", 542 dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
549 target_freq, freqs.old, freqs.new, msr); 543 target_freq, freqs.old, freqs.new, msr);
550 544
551 for_each_cpu_mask_nr(k, *online_policy_cpus) { 545 for_each_cpu_mask_nr(k, policy->cpus) {
546 if (!cpu_online(k))
547 continue;
552 freqs.cpu = k; 548 freqs.cpu = k;
553 cpufreq_notify_transition(&freqs, 549 cpufreq_notify_transition(&freqs,
554 CPUFREQ_PRECHANGE); 550 CPUFREQ_PRECHANGE);
@@ -571,7 +567,9 @@ static int centrino_target (struct cpufreq_policy *policy,
571 preempt_enable(); 567 preempt_enable();
572 } 568 }
573 569
574 for_each_cpu_mask_nr(k, *online_policy_cpus) { 570 for_each_cpu_mask_nr(k, policy->cpus) {
571 if (!cpu_online(k))
572 continue;
575 freqs.cpu = k; 573 freqs.cpu = k;
576 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 574 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
577 } 575 }
@@ -584,18 +582,17 @@ static int centrino_target (struct cpufreq_policy *policy,
584 * Best effort undo.. 582 * Best effort undo..
585 */ 583 */
586 584
587 if (!cpus_empty(*covered_cpus)) 585 for_each_cpu_mask_nr(j, *covered_cpus) {
588 for_each_cpu_mask_nr(j, *covered_cpus) { 586 set_cpus_allowed_ptr(current, &cpumask_of_cpu(j));
589 set_cpus_allowed_ptr(current, 587 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
590 &cpumask_of_cpu(j)); 588 }
591 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
592 }
593 589
594 tmp = freqs.new; 590 tmp = freqs.new;
595 freqs.new = freqs.old; 591 freqs.new = freqs.old;
596 freqs.old = tmp; 592 freqs.old = tmp;
597 for_each_cpu_mask_nr(j, *online_policy_cpus) { 593 for_each_cpu_mask_nr(j, policy->cpus) {
598 freqs.cpu = j; 594 if (!cpu_online(j))
595 continue;
599 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 596 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
600 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 597 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
601 } 598 }
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
index 98d4fdb7dc04..cdac7d62369b 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
@@ -139,6 +139,15 @@ static unsigned int pentium_core_get_frequency(void)
139 case 3: 139 case 3:
140 fsb = 166667; 140 fsb = 166667;
141 break; 141 break;
142 case 2:
143 fsb = 200000;
144 break;
145 case 0:
146 fsb = 266667;
147 break;
148 case 4:
149 fsb = 333333;
150 break;
142 default: 151 default:
143 printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value"); 152 printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value");
144 } 153 }
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index c6ecda64f5f1..48533d77be78 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -534,7 +534,7 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
534 per_cpu(cpuid4_info, cpu) = NULL; 534 per_cpu(cpuid4_info, cpu) = NULL;
535} 535}
536 536
537static void get_cpu_leaves(void *_retval) 537static void __cpuinit get_cpu_leaves(void *_retval)
538{ 538{
539 int j, *retval = _retval, cpu = smp_processor_id(); 539 int j, *retval = _retval, cpu = smp_processor_id();
540 540
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 85d28d53f5d3..2ac1f0c2beb3 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -121,7 +121,7 @@ static int cpuid_open(struct inode *inode, struct file *file)
121 lock_kernel(); 121 lock_kernel();
122 122
123 cpu = iminor(file->f_path.dentry->d_inode); 123 cpu = iminor(file->f_path.dentry->d_inode);
124 if (cpu >= NR_CPUS || !cpu_online(cpu)) { 124 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
125 ret = -ENXIO; /* No such CPU */ 125 ret = -ENXIO; /* No such CPU */
126 goto out; 126 goto out;
127 } 127 }
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 4b8a53d841f7..11d5093eb281 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -11,15 +11,15 @@
11#include <linux/kernel_stat.h> 11#include <linux/kernel_stat.h>
12#include <linux/sysdev.h> 12#include <linux/sysdev.h>
13#include <linux/bitops.h> 13#include <linux/bitops.h>
14#include <linux/acpi.h>
15#include <linux/io.h>
16#include <linux/delay.h>
14 17
15#include <asm/acpi.h>
16#include <asm/atomic.h> 18#include <asm/atomic.h>
17#include <asm/system.h> 19#include <asm/system.h>
18#include <asm/io.h>
19#include <asm/timer.h> 20#include <asm/timer.h>
20#include <asm/hw_irq.h> 21#include <asm/hw_irq.h>
21#include <asm/pgtable.h> 22#include <asm/pgtable.h>
22#include <asm/delay.h>
23#include <asm/desc.h> 23#include <asm/desc.h>
24#include <asm/apic.h> 24#include <asm/apic.h>
25#include <asm/arch_hooks.h> 25#include <asm/arch_hooks.h>
@@ -323,7 +323,7 @@ void init_8259A(int auto_eoi)
323 outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ 323 outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
324 324
325 /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 on x86-64, 325 /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 on x86-64,
326 to 0x20-0x27 on i386 */ 326 to 0x20-0x27 on i386 */
327 outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR); 327 outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR);
328 328
329 /* 8259A-1 (the master) has a slave on IR2 */ 329 /* 8259A-1 (the master) has a slave on IR2 */
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 45073520407f..1c4a1302536c 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -213,11 +213,11 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu)
213 213
214 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); 214 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
215 if (cfg) { 215 if (cfg) {
216 /* FIXME: needs alloc_cpumask_var_node() */ 216 if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
217 if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) {
218 kfree(cfg); 217 kfree(cfg);
219 cfg = NULL; 218 cfg = NULL;
220 } else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) { 219 } else if (!alloc_cpumask_var_node(&cfg->old_domain,
220 GFP_ATOMIC, node)) {
221 free_cpumask_var(cfg->domain); 221 free_cpumask_var(cfg->domain);
222 kfree(cfg); 222 kfree(cfg);
223 cfg = NULL; 223 cfg = NULL;
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index 191914302744..b12208f4dfee 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -35,8 +35,8 @@ static void set_bitmap(unsigned long *bitmap, unsigned int base,
35 */ 35 */
36asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) 36asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
37{ 37{
38 struct thread_struct * t = &current->thread; 38 struct thread_struct *t = &current->thread;
39 struct tss_struct * tss; 39 struct tss_struct *tss;
40 unsigned int i, max_long, bytes, bytes_updated; 40 unsigned int i, max_long, bytes, bytes_updated;
41 41
42 if ((from + num <= from) || (from + num > IO_BITMAP_BITS)) 42 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index bce53e1352a0..3973e2df7f87 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -5,10 +5,10 @@
5#include <linux/interrupt.h> 5#include <linux/interrupt.h>
6#include <linux/kernel_stat.h> 6#include <linux/kernel_stat.h>
7#include <linux/seq_file.h> 7#include <linux/seq_file.h>
8#include <linux/smp.h>
8 9
9#include <asm/apic.h> 10#include <asm/apic.h>
10#include <asm/io_apic.h> 11#include <asm/io_apic.h>
11#include <asm/smp.h>
12#include <asm/irq.h> 12#include <asm/irq.h>
13 13
14atomic_t irq_err_count; 14atomic_t irq_err_count;
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 9dc5588f336a..74b9ff7341e9 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -15,9 +15,9 @@
15#include <linux/notifier.h> 15#include <linux/notifier.h>
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/uaccess.h>
18 19
19#include <asm/apic.h> 20#include <asm/apic.h>
20#include <asm/uaccess.h>
21 21
22DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 22DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
23EXPORT_PER_CPU_SYMBOL(irq_stat); 23EXPORT_PER_CPU_SYMBOL(irq_stat);
@@ -93,7 +93,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
93 return 0; 93 return 0;
94 94
95 /* build the stack frame on the IRQ stack */ 95 /* build the stack frame on the IRQ stack */
96 isp = (u32 *) ((char*)irqctx + sizeof(*irqctx)); 96 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
97 irqctx->tinfo.task = curctx->tinfo.task; 97 irqctx->tinfo.task = curctx->tinfo.task;
98 irqctx->tinfo.previous_esp = current_stack_pointer; 98 irqctx->tinfo.previous_esp = current_stack_pointer;
99 99
@@ -137,7 +137,7 @@ void __cpuinit irq_ctx_init(int cpu)
137 137
138 hardirq_ctx[cpu] = irqctx; 138 hardirq_ctx[cpu] = irqctx;
139 139
140 irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE]; 140 irqctx = (union irq_ctx *) &softirq_stack[cpu*THREAD_SIZE];
141 irqctx->tinfo.task = NULL; 141 irqctx->tinfo.task = NULL;
142 irqctx->tinfo.exec_domain = NULL; 142 irqctx->tinfo.exec_domain = NULL;
143 irqctx->tinfo.cpu = cpu; 143 irqctx->tinfo.cpu = cpu;
@@ -147,7 +147,7 @@ void __cpuinit irq_ctx_init(int cpu)
147 softirq_ctx[cpu] = irqctx; 147 softirq_ctx[cpu] = irqctx;
148 148
149 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", 149 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
150 cpu,hardirq_ctx[cpu],softirq_ctx[cpu]); 150 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
151} 151}
152 152
153void irq_ctx_exit(int cpu) 153void irq_ctx_exit(int cpu)
@@ -174,7 +174,7 @@ asmlinkage void do_softirq(void)
174 irqctx->tinfo.previous_esp = current_stack_pointer; 174 irqctx->tinfo.previous_esp = current_stack_pointer;
175 175
176 /* build the stack frame on the softirq stack */ 176 /* build the stack frame on the softirq stack */
177 isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); 177 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
178 178
179 call_on_stack(__do_softirq, isp); 179 call_on_stack(__do_softirq, isp);
180 /* 180 /*
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 6383d50f82ea..63c88e6ec025 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -14,10 +14,10 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/ftrace.h> 16#include <linux/ftrace.h>
17#include <asm/uaccess.h> 17#include <linux/uaccess.h>
18#include <linux/smp.h>
18#include <asm/io_apic.h> 19#include <asm/io_apic.h>
19#include <asm/idle.h> 20#include <asm/idle.h>
20#include <asm/smp.h>
21 21
22/* 22/*
23 * Probabilistic stack overflow check: 23 * Probabilistic stack overflow check:
@@ -142,18 +142,18 @@ extern void call_softirq(void);
142 142
143asmlinkage void do_softirq(void) 143asmlinkage void do_softirq(void)
144{ 144{
145 __u32 pending; 145 __u32 pending;
146 unsigned long flags; 146 unsigned long flags;
147 147
148 if (in_interrupt()) 148 if (in_interrupt())
149 return; 149 return;
150 150
151 local_irq_save(flags); 151 local_irq_save(flags);
152 pending = local_softirq_pending(); 152 pending = local_softirq_pending();
153 /* Switch to interrupt stack */ 153 /* Switch to interrupt stack */
154 if (pending) { 154 if (pending) {
155 call_softirq(); 155 call_softirq();
156 WARN_ON_ONCE(softirq_count()); 156 WARN_ON_ONCE(softirq_count());
157 } 157 }
158 local_irq_restore(flags); 158 local_irq_restore(flags);
159} 159}
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index 84723295f88a..1507ad4e674d 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -9,18 +9,18 @@
9#include <linux/kernel_stat.h> 9#include <linux/kernel_stat.h>
10#include <linux/sysdev.h> 10#include <linux/sysdev.h>
11#include <linux/bitops.h> 11#include <linux/bitops.h>
12#include <linux/io.h>
13#include <linux/delay.h>
12 14
13#include <asm/atomic.h> 15#include <asm/atomic.h>
14#include <asm/system.h> 16#include <asm/system.h>
15#include <asm/io.h>
16#include <asm/timer.h> 17#include <asm/timer.h>
17#include <asm/pgtable.h> 18#include <asm/pgtable.h>
18#include <asm/delay.h>
19#include <asm/desc.h> 19#include <asm/desc.h>
20#include <asm/apic.h> 20#include <asm/apic.h>
21#include <asm/arch_hooks.h> 21#include <asm/arch_hooks.h>
22#include <asm/i8259.h> 22#include <asm/i8259.h>
23 23#include <asm/traps.h>
24 24
25 25
26/* 26/*
@@ -34,12 +34,10 @@
34 * leads to races. IBM designers who came up with it should 34 * leads to races. IBM designers who came up with it should
35 * be shot. 35 * be shot.
36 */ 36 */
37
38 37
39static irqreturn_t math_error_irq(int cpl, void *dev_id) 38static irqreturn_t math_error_irq(int cpl, void *dev_id)
40{ 39{
41 extern void math_error(void __user *); 40 outb(0, 0xF0);
42 outb(0,0xF0);
43 if (ignore_fpu_irq || !boot_cpu_data.hard_math) 41 if (ignore_fpu_irq || !boot_cpu_data.hard_math)
44 return IRQ_NONE; 42 return IRQ_NONE;
45 math_error((void __user *)get_irq_regs()->ip); 43 math_error((void __user *)get_irq_regs()->ip);
@@ -56,7 +54,7 @@ static struct irqaction fpu_irq = {
56 .name = "fpu", 54 .name = "fpu",
57}; 55};
58 56
59void __init init_ISA_irqs (void) 57void __init init_ISA_irqs(void)
60{ 58{
61 int i; 59 int i;
62 60
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c
index 31ebfe38e96c..da481a1e3f30 100644
--- a/arch/x86/kernel/irqinit_64.c
+++ b/arch/x86/kernel/irqinit_64.c
@@ -11,14 +11,14 @@
11#include <linux/kernel_stat.h> 11#include <linux/kernel_stat.h>
12#include <linux/sysdev.h> 12#include <linux/sysdev.h>
13#include <linux/bitops.h> 13#include <linux/bitops.h>
14#include <linux/acpi.h>
15#include <linux/io.h>
16#include <linux/delay.h>
14 17
15#include <asm/acpi.h>
16#include <asm/atomic.h> 18#include <asm/atomic.h>
17#include <asm/system.h> 19#include <asm/system.h>
18#include <asm/io.h>
19#include <asm/hw_irq.h> 20#include <asm/hw_irq.h>
20#include <asm/pgtable.h> 21#include <asm/pgtable.h>
21#include <asm/delay.h>
22#include <asm/desc.h> 22#include <asm/desc.h>
23#include <asm/apic.h> 23#include <asm/apic.h>
24#include <asm/i8259.h> 24#include <asm/i8259.h>
@@ -81,7 +81,7 @@ int vector_used_by_percpu_irq(unsigned int vector)
81 return 0; 81 return 0;
82} 82}
83 83
84void __init init_ISA_irqs(void) 84static void __init init_ISA_irqs(void)
85{ 85{
86 int i; 86 int i;
87 87
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index cead7e27585d..c0601c2848a1 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -17,7 +17,6 @@
17#include <linux/acpi.h> 17#include <linux/acpi.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/smp.h> 19#include <linux/smp.h>
20#include <linux/acpi.h>
21 20
22#include <asm/mtrr.h> 21#include <asm/mtrr.h>
23#include <asm/mpspec.h> 22#include <asm/mpspec.h>
@@ -49,12 +48,12 @@ static int __init mpf_checksum(unsigned char *mp, int len)
49 return sum & 0xFF; 48 return sum & 0xFF;
50} 49}
51 50
52static void __init MP_processor_info(struct mpc_config_processor *m) 51static void __init MP_processor_info(struct mpc_cpu *m)
53{ 52{
54 int apicid; 53 int apicid;
55 char *bootup_cpu = ""; 54 char *bootup_cpu = "";
56 55
57 if (!(m->mpc_cpuflag & CPU_ENABLED)) { 56 if (!(m->cpuflag & CPU_ENABLED)) {
58 disabled_cpus++; 57 disabled_cpus++;
59 return; 58 return;
60 } 59 }
@@ -62,54 +61,54 @@ static void __init MP_processor_info(struct mpc_config_processor *m)
62 if (x86_quirks->mpc_apic_id) 61 if (x86_quirks->mpc_apic_id)
63 apicid = x86_quirks->mpc_apic_id(m); 62 apicid = x86_quirks->mpc_apic_id(m);
64 else 63 else
65 apicid = m->mpc_apicid; 64 apicid = m->apicid;
66 65
67 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { 66 if (m->cpuflag & CPU_BOOTPROCESSOR) {
68 bootup_cpu = " (Bootup-CPU)"; 67 bootup_cpu = " (Bootup-CPU)";
69 boot_cpu_physical_apicid = m->mpc_apicid; 68 boot_cpu_physical_apicid = m->apicid;
70 } 69 }
71 70
72 printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu); 71 printk(KERN_INFO "Processor #%d%s\n", m->apicid, bootup_cpu);
73 generic_processor_info(apicid, m->mpc_apicver); 72 generic_processor_info(apicid, m->apicver);
74} 73}
75 74
76#ifdef CONFIG_X86_IO_APIC 75#ifdef CONFIG_X86_IO_APIC
77static void __init MP_bus_info(struct mpc_config_bus *m) 76static void __init MP_bus_info(struct mpc_bus *m)
78{ 77{
79 char str[7]; 78 char str[7];
80 memcpy(str, m->mpc_bustype, 6); 79 memcpy(str, m->bustype, 6);
81 str[6] = 0; 80 str[6] = 0;
82 81
83 if (x86_quirks->mpc_oem_bus_info) 82 if (x86_quirks->mpc_oem_bus_info)
84 x86_quirks->mpc_oem_bus_info(m, str); 83 x86_quirks->mpc_oem_bus_info(m, str);
85 else 84 else
86 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->mpc_busid, str); 85 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
87 86
88#if MAX_MP_BUSSES < 256 87#if MAX_MP_BUSSES < 256
89 if (m->mpc_busid >= MAX_MP_BUSSES) { 88 if (m->busid >= MAX_MP_BUSSES) {
90 printk(KERN_WARNING "MP table busid value (%d) for bustype %s " 89 printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
91 " is too large, max. supported is %d\n", 90 " is too large, max. supported is %d\n",
92 m->mpc_busid, str, MAX_MP_BUSSES - 1); 91 m->busid, str, MAX_MP_BUSSES - 1);
93 return; 92 return;
94 } 93 }
95#endif 94#endif
96 95
97 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { 96 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
98 set_bit(m->mpc_busid, mp_bus_not_pci); 97 set_bit(m->busid, mp_bus_not_pci);
99#if defined(CONFIG_EISA) || defined(CONFIG_MCA) 98#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
100 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; 99 mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
101#endif 100#endif
102 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { 101 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
103 if (x86_quirks->mpc_oem_pci_bus) 102 if (x86_quirks->mpc_oem_pci_bus)
104 x86_quirks->mpc_oem_pci_bus(m); 103 x86_quirks->mpc_oem_pci_bus(m);
105 104
106 clear_bit(m->mpc_busid, mp_bus_not_pci); 105 clear_bit(m->busid, mp_bus_not_pci);
107#if defined(CONFIG_EISA) || defined(CONFIG_MCA) 106#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
108 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; 107 mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
109 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) { 108 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
110 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; 109 mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
111 } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0) { 110 } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0) {
112 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; 111 mp_bus_id_to_type[m->busid] = MP_BUS_MCA;
113#endif 112#endif
114 } else 113 } else
115 printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); 114 printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
@@ -133,32 +132,31 @@ static int bad_ioapic(unsigned long address)
133 return 0; 132 return 0;
134} 133}
135 134
136static void __init MP_ioapic_info(struct mpc_config_ioapic *m) 135static void __init MP_ioapic_info(struct mpc_ioapic *m)
137{ 136{
138 if (!(m->mpc_flags & MPC_APIC_USABLE)) 137 if (!(m->flags & MPC_APIC_USABLE))
139 return; 138 return;
140 139
141 printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n", 140 printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n",
142 m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr); 141 m->apicid, m->apicver, m->apicaddr);
143 142
144 if (bad_ioapic(m->mpc_apicaddr)) 143 if (bad_ioapic(m->apicaddr))
145 return; 144 return;
146 145
147 mp_ioapics[nr_ioapics].mp_apicaddr = m->mpc_apicaddr; 146 mp_ioapics[nr_ioapics].mp_apicaddr = m->apicaddr;
148 mp_ioapics[nr_ioapics].mp_apicid = m->mpc_apicid; 147 mp_ioapics[nr_ioapics].mp_apicid = m->apicid;
149 mp_ioapics[nr_ioapics].mp_type = m->mpc_type; 148 mp_ioapics[nr_ioapics].mp_type = m->type;
150 mp_ioapics[nr_ioapics].mp_apicver = m->mpc_apicver; 149 mp_ioapics[nr_ioapics].mp_apicver = m->apicver;
151 mp_ioapics[nr_ioapics].mp_flags = m->mpc_flags; 150 mp_ioapics[nr_ioapics].mp_flags = m->flags;
152 nr_ioapics++; 151 nr_ioapics++;
153} 152}
154 153
155static void print_MP_intsrc_info(struct mpc_config_intsrc *m) 154static void print_MP_intsrc_info(struct mpc_intsrc *m)
156{ 155{
157 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," 156 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
158 " IRQ %02x, APIC ID %x, APIC INT %02x\n", 157 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
159 m->mpc_irqtype, m->mpc_irqflag & 3, 158 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus,
160 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, 159 m->srcbusirq, m->dstapic, m->dstirq);
161 m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
162} 160}
163 161
164static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq) 162static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq)
@@ -170,52 +168,52 @@ static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq)
170 mp_irq->mp_srcbusirq, mp_irq->mp_dstapic, mp_irq->mp_dstirq); 168 mp_irq->mp_srcbusirq, mp_irq->mp_dstapic, mp_irq->mp_dstirq);
171} 169}
172 170
173static void __init assign_to_mp_irq(struct mpc_config_intsrc *m, 171static void __init assign_to_mp_irq(struct mpc_intsrc *m,
174 struct mp_config_intsrc *mp_irq) 172 struct mp_config_intsrc *mp_irq)
175{ 173{
176 mp_irq->mp_dstapic = m->mpc_dstapic; 174 mp_irq->mp_dstapic = m->dstapic;
177 mp_irq->mp_type = m->mpc_type; 175 mp_irq->mp_type = m->type;
178 mp_irq->mp_irqtype = m->mpc_irqtype; 176 mp_irq->mp_irqtype = m->irqtype;
179 mp_irq->mp_irqflag = m->mpc_irqflag; 177 mp_irq->mp_irqflag = m->irqflag;
180 mp_irq->mp_srcbus = m->mpc_srcbus; 178 mp_irq->mp_srcbus = m->srcbus;
181 mp_irq->mp_srcbusirq = m->mpc_srcbusirq; 179 mp_irq->mp_srcbusirq = m->srcbusirq;
182 mp_irq->mp_dstirq = m->mpc_dstirq; 180 mp_irq->mp_dstirq = m->dstirq;
183} 181}
184 182
185static void __init assign_to_mpc_intsrc(struct mp_config_intsrc *mp_irq, 183static void __init assign_to_mpc_intsrc(struct mp_config_intsrc *mp_irq,
186 struct mpc_config_intsrc *m) 184 struct mpc_intsrc *m)
187{ 185{
188 m->mpc_dstapic = mp_irq->mp_dstapic; 186 m->dstapic = mp_irq->mp_dstapic;
189 m->mpc_type = mp_irq->mp_type; 187 m->type = mp_irq->mp_type;
190 m->mpc_irqtype = mp_irq->mp_irqtype; 188 m->irqtype = mp_irq->mp_irqtype;
191 m->mpc_irqflag = mp_irq->mp_irqflag; 189 m->irqflag = mp_irq->mp_irqflag;
192 m->mpc_srcbus = mp_irq->mp_srcbus; 190 m->srcbus = mp_irq->mp_srcbus;
193 m->mpc_srcbusirq = mp_irq->mp_srcbusirq; 191 m->srcbusirq = mp_irq->mp_srcbusirq;
194 m->mpc_dstirq = mp_irq->mp_dstirq; 192 m->dstirq = mp_irq->mp_dstirq;
195} 193}
196 194
197static int __init mp_irq_mpc_intsrc_cmp(struct mp_config_intsrc *mp_irq, 195static int __init mp_irq_mpc_intsrc_cmp(struct mp_config_intsrc *mp_irq,
198 struct mpc_config_intsrc *m) 196 struct mpc_intsrc *m)
199{ 197{
200 if (mp_irq->mp_dstapic != m->mpc_dstapic) 198 if (mp_irq->mp_dstapic != m->dstapic)
201 return 1; 199 return 1;
202 if (mp_irq->mp_type != m->mpc_type) 200 if (mp_irq->mp_type != m->type)
203 return 2; 201 return 2;
204 if (mp_irq->mp_irqtype != m->mpc_irqtype) 202 if (mp_irq->mp_irqtype != m->irqtype)
205 return 3; 203 return 3;
206 if (mp_irq->mp_irqflag != m->mpc_irqflag) 204 if (mp_irq->mp_irqflag != m->irqflag)
207 return 4; 205 return 4;
208 if (mp_irq->mp_srcbus != m->mpc_srcbus) 206 if (mp_irq->mp_srcbus != m->srcbus)
209 return 5; 207 return 5;
210 if (mp_irq->mp_srcbusirq != m->mpc_srcbusirq) 208 if (mp_irq->mp_srcbusirq != m->srcbusirq)
211 return 6; 209 return 6;
212 if (mp_irq->mp_dstirq != m->mpc_dstirq) 210 if (mp_irq->mp_dstirq != m->dstirq)
213 return 7; 211 return 7;
214 212
215 return 0; 213 return 0;
216} 214}
217 215
218static void __init MP_intsrc_info(struct mpc_config_intsrc *m) 216static void __init MP_intsrc_info(struct mpc_intsrc *m)
219{ 217{
220 int i; 218 int i;
221 219
@@ -233,57 +231,55 @@ static void __init MP_intsrc_info(struct mpc_config_intsrc *m)
233 231
234#endif 232#endif
235 233
236static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m) 234static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
237{ 235{
238 apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x," 236 apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x,"
239 " IRQ %02x, APIC ID %x, APIC LINT %02x\n", 237 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
240 m->mpc_irqtype, m->mpc_irqflag & 3, 238 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
241 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid, 239 m->srcbusirq, m->destapic, m->destapiclint);
242 m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
243} 240}
244 241
245/* 242/*
246 * Read/parse the MPC 243 * Read/parse the MPC
247 */ 244 */
248 245
249static int __init smp_check_mpc(struct mp_config_table *mpc, char *oem, 246static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
250 char *str)
251{ 247{
252 248
253 if (memcmp(mpc->mpc_signature, MPC_SIGNATURE, 4)) { 249 if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
254 printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n", 250 printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n",
255 mpc->mpc_signature[0], mpc->mpc_signature[1], 251 mpc->signature[0], mpc->signature[1],
256 mpc->mpc_signature[2], mpc->mpc_signature[3]); 252 mpc->signature[2], mpc->signature[3]);
257 return 0; 253 return 0;
258 } 254 }
259 if (mpf_checksum((unsigned char *)mpc, mpc->mpc_length)) { 255 if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
260 printk(KERN_ERR "MPTABLE: checksum error!\n"); 256 printk(KERN_ERR "MPTABLE: checksum error!\n");
261 return 0; 257 return 0;
262 } 258 }
263 if (mpc->mpc_spec != 0x01 && mpc->mpc_spec != 0x04) { 259 if (mpc->spec != 0x01 && mpc->spec != 0x04) {
264 printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n", 260 printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n",
265 mpc->mpc_spec); 261 mpc->spec);
266 return 0; 262 return 0;
267 } 263 }
268 if (!mpc->mpc_lapic) { 264 if (!mpc->lapic) {
269 printk(KERN_ERR "MPTABLE: null local APIC address!\n"); 265 printk(KERN_ERR "MPTABLE: null local APIC address!\n");
270 return 0; 266 return 0;
271 } 267 }
272 memcpy(oem, mpc->mpc_oem, 8); 268 memcpy(oem, mpc->oem, 8);
273 oem[8] = 0; 269 oem[8] = 0;
274 printk(KERN_INFO "MPTABLE: OEM ID: %s\n", oem); 270 printk(KERN_INFO "MPTABLE: OEM ID: %s\n", oem);
275 271
276 memcpy(str, mpc->mpc_productid, 12); 272 memcpy(str, mpc->productid, 12);
277 str[12] = 0; 273 str[12] = 0;
278 274
279 printk(KERN_INFO "MPTABLE: Product ID: %s\n", str); 275 printk(KERN_INFO "MPTABLE: Product ID: %s\n", str);
280 276
281 printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->mpc_lapic); 277 printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->lapic);
282 278
283 return 1; 279 return 1;
284} 280}
285 281
286static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) 282static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
287{ 283{
288 char str[16]; 284 char str[16];
289 char oem[10]; 285 char oem[10];
@@ -308,14 +304,14 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
308#endif 304#endif
309 /* save the local APIC address, it might be non-default */ 305 /* save the local APIC address, it might be non-default */
310 if (!acpi_lapic) 306 if (!acpi_lapic)
311 mp_lapic_addr = mpc->mpc_lapic; 307 mp_lapic_addr = mpc->lapic;
312 308
313 if (early) 309 if (early)
314 return 1; 310 return 1;
315 311
316 if (mpc->mpc_oemptr && x86_quirks->smp_read_mpc_oem) { 312 if (mpc->oemptr && x86_quirks->smp_read_mpc_oem) {
317 struct mp_config_oemtable *oem_table = (struct mp_config_oemtable *)(unsigned long)mpc->mpc_oemptr; 313 struct mpc_oemtable *oem_table = (void *)(long)mpc->oemptr;
318 x86_quirks->smp_read_mpc_oem(oem_table, mpc->mpc_oemsize); 314 x86_quirks->smp_read_mpc_oem(oem_table, mpc->oemsize);
319 } 315 }
320 316
321 /* 317 /*
@@ -324,12 +320,11 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
324 if (x86_quirks->mpc_record) 320 if (x86_quirks->mpc_record)
325 *x86_quirks->mpc_record = 0; 321 *x86_quirks->mpc_record = 0;
326 322
327 while (count < mpc->mpc_length) { 323 while (count < mpc->length) {
328 switch (*mpt) { 324 switch (*mpt) {
329 case MP_PROCESSOR: 325 case MP_PROCESSOR:
330 { 326 {
331 struct mpc_config_processor *m = 327 struct mpc_cpu *m = (struct mpc_cpu *)mpt;
332 (struct mpc_config_processor *)mpt;
333 /* ACPI may have already provided this data */ 328 /* ACPI may have already provided this data */
334 if (!acpi_lapic) 329 if (!acpi_lapic)
335 MP_processor_info(m); 330 MP_processor_info(m);
@@ -339,8 +334,7 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
339 } 334 }
340 case MP_BUS: 335 case MP_BUS:
341 { 336 {
342 struct mpc_config_bus *m = 337 struct mpc_bus *m = (struct mpc_bus *)mpt;
343 (struct mpc_config_bus *)mpt;
344#ifdef CONFIG_X86_IO_APIC 338#ifdef CONFIG_X86_IO_APIC
345 MP_bus_info(m); 339 MP_bus_info(m);
346#endif 340#endif
@@ -351,30 +345,28 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
351 case MP_IOAPIC: 345 case MP_IOAPIC:
352 { 346 {
353#ifdef CONFIG_X86_IO_APIC 347#ifdef CONFIG_X86_IO_APIC
354 struct mpc_config_ioapic *m = 348 struct mpc_ioapic *m = (struct mpc_ioapic *)mpt;
355 (struct mpc_config_ioapic *)mpt;
356 MP_ioapic_info(m); 349 MP_ioapic_info(m);
357#endif 350#endif
358 mpt += sizeof(struct mpc_config_ioapic); 351 mpt += sizeof(struct mpc_ioapic);
359 count += sizeof(struct mpc_config_ioapic); 352 count += sizeof(struct mpc_ioapic);
360 break; 353 break;
361 } 354 }
362 case MP_INTSRC: 355 case MP_INTSRC:
363 { 356 {
364#ifdef CONFIG_X86_IO_APIC 357#ifdef CONFIG_X86_IO_APIC
365 struct mpc_config_intsrc *m = 358 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
366 (struct mpc_config_intsrc *)mpt;
367 359
368 MP_intsrc_info(m); 360 MP_intsrc_info(m);
369#endif 361#endif
370 mpt += sizeof(struct mpc_config_intsrc); 362 mpt += sizeof(struct mpc_intsrc);
371 count += sizeof(struct mpc_config_intsrc); 363 count += sizeof(struct mpc_intsrc);
372 break; 364 break;
373 } 365 }
374 case MP_LINTSRC: 366 case MP_LINTSRC:
375 { 367 {
376 struct mpc_config_lintsrc *m = 368 struct mpc_lintsrc *m =
377 (struct mpc_config_lintsrc *)mpt; 369 (struct mpc_lintsrc *)mpt;
378 MP_lintsrc_info(m); 370 MP_lintsrc_info(m);
379 mpt += sizeof(*m); 371 mpt += sizeof(*m);
380 count += sizeof(*m); 372 count += sizeof(*m);
@@ -385,8 +377,8 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
385 printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n"); 377 printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n");
386 printk(KERN_ERR "type %x\n", *mpt); 378 printk(KERN_ERR "type %x\n", *mpt);
387 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16, 379 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16,
388 1, mpc, mpc->mpc_length, 1); 380 1, mpc, mpc->length, 1);
389 count = mpc->mpc_length; 381 count = mpc->length;
390 break; 382 break;
391 } 383 }
392 if (x86_quirks->mpc_record) 384 if (x86_quirks->mpc_record)
@@ -417,16 +409,16 @@ static int __init ELCR_trigger(unsigned int irq)
417 409
418static void __init construct_default_ioirq_mptable(int mpc_default_type) 410static void __init construct_default_ioirq_mptable(int mpc_default_type)
419{ 411{
420 struct mpc_config_intsrc intsrc; 412 struct mpc_intsrc intsrc;
421 int i; 413 int i;
422 int ELCR_fallback = 0; 414 int ELCR_fallback = 0;
423 415
424 intsrc.mpc_type = MP_INTSRC; 416 intsrc.type = MP_INTSRC;
425 intsrc.mpc_irqflag = 0; /* conforming */ 417 intsrc.irqflag = 0; /* conforming */
426 intsrc.mpc_srcbus = 0; 418 intsrc.srcbus = 0;
427 intsrc.mpc_dstapic = mp_ioapics[0].mp_apicid; 419 intsrc.dstapic = mp_ioapics[0].mp_apicid;
428 420
429 intsrc.mpc_irqtype = mp_INT; 421 intsrc.irqtype = mp_INT;
430 422
431 /* 423 /*
432 * If true, we have an ISA/PCI system with no IRQ entries 424 * If true, we have an ISA/PCI system with no IRQ entries
@@ -469,30 +461,30 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
469 * irqflag field (level sensitive, active high polarity). 461 * irqflag field (level sensitive, active high polarity).
470 */ 462 */
471 if (ELCR_trigger(i)) 463 if (ELCR_trigger(i))
472 intsrc.mpc_irqflag = 13; 464 intsrc.irqflag = 13;
473 else 465 else
474 intsrc.mpc_irqflag = 0; 466 intsrc.irqflag = 0;
475 } 467 }
476 468
477 intsrc.mpc_srcbusirq = i; 469 intsrc.srcbusirq = i;
478 intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ 470 intsrc.dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
479 MP_intsrc_info(&intsrc); 471 MP_intsrc_info(&intsrc);
480 } 472 }
481 473
482 intsrc.mpc_irqtype = mp_ExtINT; 474 intsrc.irqtype = mp_ExtINT;
483 intsrc.mpc_srcbusirq = 0; 475 intsrc.srcbusirq = 0;
484 intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */ 476 intsrc.dstirq = 0; /* 8259A to INTIN0 */
485 MP_intsrc_info(&intsrc); 477 MP_intsrc_info(&intsrc);
486} 478}
487 479
488 480
489static void __init construct_ioapic_table(int mpc_default_type) 481static void __init construct_ioapic_table(int mpc_default_type)
490{ 482{
491 struct mpc_config_ioapic ioapic; 483 struct mpc_ioapic ioapic;
492 struct mpc_config_bus bus; 484 struct mpc_bus bus;
493 485
494 bus.mpc_type = MP_BUS; 486 bus.type = MP_BUS;
495 bus.mpc_busid = 0; 487 bus.busid = 0;
496 switch (mpc_default_type) { 488 switch (mpc_default_type) {
497 default: 489 default:
498 printk(KERN_ERR "???\nUnknown standard configuration %d\n", 490 printk(KERN_ERR "???\nUnknown standard configuration %d\n",
@@ -500,29 +492,29 @@ static void __init construct_ioapic_table(int mpc_default_type)
500 /* fall through */ 492 /* fall through */
501 case 1: 493 case 1:
502 case 5: 494 case 5:
503 memcpy(bus.mpc_bustype, "ISA ", 6); 495 memcpy(bus.bustype, "ISA ", 6);
504 break; 496 break;
505 case 2: 497 case 2:
506 case 6: 498 case 6:
507 case 3: 499 case 3:
508 memcpy(bus.mpc_bustype, "EISA ", 6); 500 memcpy(bus.bustype, "EISA ", 6);
509 break; 501 break;
510 case 4: 502 case 4:
511 case 7: 503 case 7:
512 memcpy(bus.mpc_bustype, "MCA ", 6); 504 memcpy(bus.bustype, "MCA ", 6);
513 } 505 }
514 MP_bus_info(&bus); 506 MP_bus_info(&bus);
515 if (mpc_default_type > 4) { 507 if (mpc_default_type > 4) {
516 bus.mpc_busid = 1; 508 bus.busid = 1;
517 memcpy(bus.mpc_bustype, "PCI ", 6); 509 memcpy(bus.bustype, "PCI ", 6);
518 MP_bus_info(&bus); 510 MP_bus_info(&bus);
519 } 511 }
520 512
521 ioapic.mpc_type = MP_IOAPIC; 513 ioapic.type = MP_IOAPIC;
522 ioapic.mpc_apicid = 2; 514 ioapic.apicid = 2;
523 ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; 515 ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
524 ioapic.mpc_flags = MPC_APIC_USABLE; 516 ioapic.flags = MPC_APIC_USABLE;
525 ioapic.mpc_apicaddr = 0xFEC00000; 517 ioapic.apicaddr = 0xFEC00000;
526 MP_ioapic_info(&ioapic); 518 MP_ioapic_info(&ioapic);
527 519
528 /* 520 /*
@@ -536,8 +528,8 @@ static inline void __init construct_ioapic_table(int mpc_default_type) { }
536 528
537static inline void __init construct_default_ISA_mptable(int mpc_default_type) 529static inline void __init construct_default_ISA_mptable(int mpc_default_type)
538{ 530{
539 struct mpc_config_processor processor; 531 struct mpc_cpu processor;
540 struct mpc_config_lintsrc lintsrc; 532 struct mpc_lintsrc lintsrc;
541 int linttypes[2] = { mp_ExtINT, mp_NMI }; 533 int linttypes[2] = { mp_ExtINT, mp_NMI };
542 int i; 534 int i;
543 535
@@ -549,30 +541,30 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
549 /* 541 /*
550 * 2 CPUs, numbered 0 & 1. 542 * 2 CPUs, numbered 0 & 1.
551 */ 543 */
552 processor.mpc_type = MP_PROCESSOR; 544 processor.type = MP_PROCESSOR;
553 /* Either an integrated APIC or a discrete 82489DX. */ 545 /* Either an integrated APIC or a discrete 82489DX. */
554 processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; 546 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
555 processor.mpc_cpuflag = CPU_ENABLED; 547 processor.cpuflag = CPU_ENABLED;
556 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | 548 processor.cpufeature = (boot_cpu_data.x86 << 8) |
557 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; 549 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
558 processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; 550 processor.featureflag = boot_cpu_data.x86_capability[0];
559 processor.mpc_reserved[0] = 0; 551 processor.reserved[0] = 0;
560 processor.mpc_reserved[1] = 0; 552 processor.reserved[1] = 0;
561 for (i = 0; i < 2; i++) { 553 for (i = 0; i < 2; i++) {
562 processor.mpc_apicid = i; 554 processor.apicid = i;
563 MP_processor_info(&processor); 555 MP_processor_info(&processor);
564 } 556 }
565 557
566 construct_ioapic_table(mpc_default_type); 558 construct_ioapic_table(mpc_default_type);
567 559
568 lintsrc.mpc_type = MP_LINTSRC; 560 lintsrc.type = MP_LINTSRC;
569 lintsrc.mpc_irqflag = 0; /* conforming */ 561 lintsrc.irqflag = 0; /* conforming */
570 lintsrc.mpc_srcbusid = 0; 562 lintsrc.srcbusid = 0;
571 lintsrc.mpc_srcbusirq = 0; 563 lintsrc.srcbusirq = 0;
572 lintsrc.mpc_destapic = MP_APIC_ALL; 564 lintsrc.destapic = MP_APIC_ALL;
573 for (i = 0; i < 2; i++) { 565 for (i = 0; i < 2; i++) {
574 lintsrc.mpc_irqtype = linttypes[i]; 566 lintsrc.irqtype = linttypes[i];
575 lintsrc.mpc_destapiclint = i; 567 lintsrc.destapiclint = i;
576 MP_lintsrc_info(&lintsrc); 568 MP_lintsrc_info(&lintsrc);
577 } 569 }
578} 570}
@@ -657,15 +649,15 @@ static void __init __get_smp_config(unsigned int early)
657 * ISA defaults and hope it will work. 649 * ISA defaults and hope it will work.
658 */ 650 */
659 if (!mp_irq_entries) { 651 if (!mp_irq_entries) {
660 struct mpc_config_bus bus; 652 struct mpc_bus bus;
661 653
662 printk(KERN_ERR "BIOS bug, no explicit IRQ entries, " 654 printk(KERN_ERR "BIOS bug, no explicit IRQ entries, "
663 "using default mptable. " 655 "using default mptable. "
664 "(tell your hw vendor)\n"); 656 "(tell your hw vendor)\n");
665 657
666 bus.mpc_type = MP_BUS; 658 bus.type = MP_BUS;
667 bus.mpc_busid = 0; 659 bus.busid = 0;
668 memcpy(bus.mpc_bustype, "ISA ", 6); 660 memcpy(bus.bustype, "ISA ", 6);
669 MP_bus_info(&bus); 661 MP_bus_info(&bus);
670 662
671 construct_default_ioirq_mptable(0); 663 construct_default_ioirq_mptable(0);
@@ -803,14 +795,14 @@ void __init find_smp_config(void)
803#ifdef CONFIG_X86_IO_APIC 795#ifdef CONFIG_X86_IO_APIC
804static u8 __initdata irq_used[MAX_IRQ_SOURCES]; 796static u8 __initdata irq_used[MAX_IRQ_SOURCES];
805 797
806static int __init get_MP_intsrc_index(struct mpc_config_intsrc *m) 798static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
807{ 799{
808 int i; 800 int i;
809 801
810 if (m->mpc_irqtype != mp_INT) 802 if (m->irqtype != mp_INT)
811 return 0; 803 return 0;
812 804
813 if (m->mpc_irqflag != 0x0f) 805 if (m->irqflag != 0x0f)
814 return 0; 806 return 0;
815 807
816 /* not legacy */ 808 /* not legacy */
@@ -822,9 +814,9 @@ static int __init get_MP_intsrc_index(struct mpc_config_intsrc *m)
822 if (mp_irqs[i].mp_irqflag != 0x0f) 814 if (mp_irqs[i].mp_irqflag != 0x0f)
823 continue; 815 continue;
824 816
825 if (mp_irqs[i].mp_srcbus != m->mpc_srcbus) 817 if (mp_irqs[i].mp_srcbus != m->srcbus)
826 continue; 818 continue;
827 if (mp_irqs[i].mp_srcbusirq != m->mpc_srcbusirq) 819 if (mp_irqs[i].mp_srcbusirq != m->srcbusirq)
828 continue; 820 continue;
829 if (irq_used[i]) { 821 if (irq_used[i]) {
830 /* already claimed */ 822 /* already claimed */
@@ -840,10 +832,10 @@ static int __init get_MP_intsrc_index(struct mpc_config_intsrc *m)
840 832
841#define SPARE_SLOT_NUM 20 833#define SPARE_SLOT_NUM 20
842 834
843static struct mpc_config_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; 835static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
844#endif 836#endif
845 837
846static int __init replace_intsrc_all(struct mp_config_table *mpc, 838static int __init replace_intsrc_all(struct mpc_table *mpc,
847 unsigned long mpc_new_phys, 839 unsigned long mpc_new_phys,
848 unsigned long mpc_new_length) 840 unsigned long mpc_new_length)
849{ 841{
@@ -855,36 +847,33 @@ static int __init replace_intsrc_all(struct mp_config_table *mpc,
855 int count = sizeof(*mpc); 847 int count = sizeof(*mpc);
856 unsigned char *mpt = ((unsigned char *)mpc) + count; 848 unsigned char *mpt = ((unsigned char *)mpc) + count;
857 849
858 printk(KERN_INFO "mpc_length %x\n", mpc->mpc_length); 850 printk(KERN_INFO "mpc_length %x\n", mpc->length);
859 while (count < mpc->mpc_length) { 851 while (count < mpc->length) {
860 switch (*mpt) { 852 switch (*mpt) {
861 case MP_PROCESSOR: 853 case MP_PROCESSOR:
862 { 854 {
863 struct mpc_config_processor *m = 855 struct mpc_cpu *m = (struct mpc_cpu *)mpt;
864 (struct mpc_config_processor *)mpt;
865 mpt += sizeof(*m); 856 mpt += sizeof(*m);
866 count += sizeof(*m); 857 count += sizeof(*m);
867 break; 858 break;
868 } 859 }
869 case MP_BUS: 860 case MP_BUS:
870 { 861 {
871 struct mpc_config_bus *m = 862 struct mpc_bus *m = (struct mpc_bus *)mpt;
872 (struct mpc_config_bus *)mpt;
873 mpt += sizeof(*m); 863 mpt += sizeof(*m);
874 count += sizeof(*m); 864 count += sizeof(*m);
875 break; 865 break;
876 } 866 }
877 case MP_IOAPIC: 867 case MP_IOAPIC:
878 { 868 {
879 mpt += sizeof(struct mpc_config_ioapic); 869 mpt += sizeof(struct mpc_ioapic);
880 count += sizeof(struct mpc_config_ioapic); 870 count += sizeof(struct mpc_ioapic);
881 break; 871 break;
882 } 872 }
883 case MP_INTSRC: 873 case MP_INTSRC:
884 { 874 {
885#ifdef CONFIG_X86_IO_APIC 875#ifdef CONFIG_X86_IO_APIC
886 struct mpc_config_intsrc *m = 876 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
887 (struct mpc_config_intsrc *)mpt;
888 877
889 printk(KERN_INFO "OLD "); 878 printk(KERN_INFO "OLD ");
890 print_MP_intsrc_info(m); 879 print_MP_intsrc_info(m);
@@ -905,14 +894,14 @@ static int __init replace_intsrc_all(struct mp_config_table *mpc,
905 nr_m_spare++; 894 nr_m_spare++;
906 } 895 }
907#endif 896#endif
908 mpt += sizeof(struct mpc_config_intsrc); 897 mpt += sizeof(struct mpc_intsrc);
909 count += sizeof(struct mpc_config_intsrc); 898 count += sizeof(struct mpc_intsrc);
910 break; 899 break;
911 } 900 }
912 case MP_LINTSRC: 901 case MP_LINTSRC:
913 { 902 {
914 struct mpc_config_lintsrc *m = 903 struct mpc_lintsrc *m =
915 (struct mpc_config_lintsrc *)mpt; 904 (struct mpc_lintsrc *)mpt;
916 mpt += sizeof(*m); 905 mpt += sizeof(*m);
917 count += sizeof(*m); 906 count += sizeof(*m);
918 break; 907 break;
@@ -922,7 +911,7 @@ static int __init replace_intsrc_all(struct mp_config_table *mpc,
922 printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n"); 911 printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n");
923 printk(KERN_ERR "type %x\n", *mpt); 912 printk(KERN_ERR "type %x\n", *mpt);
924 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16, 913 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16,
925 1, mpc, mpc->mpc_length, 1); 914 1, mpc, mpc->length, 1);
926 goto out; 915 goto out;
927 } 916 }
928 } 917 }
@@ -944,9 +933,8 @@ static int __init replace_intsrc_all(struct mp_config_table *mpc,
944 assign_to_mpc_intsrc(&mp_irqs[i], m_spare[nr_m_spare]); 933 assign_to_mpc_intsrc(&mp_irqs[i], m_spare[nr_m_spare]);
945 m_spare[nr_m_spare] = NULL; 934 m_spare[nr_m_spare] = NULL;
946 } else { 935 } else {
947 struct mpc_config_intsrc *m = 936 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
948 (struct mpc_config_intsrc *)mpt; 937 count += sizeof(struct mpc_intsrc);
949 count += sizeof(struct mpc_config_intsrc);
950 if (!mpc_new_phys) { 938 if (!mpc_new_phys) {
951 printk(KERN_INFO "No spare slots, try to append...take your risk, new mpc_length %x\n", count); 939 printk(KERN_INFO "No spare slots, try to append...take your risk, new mpc_length %x\n", count);
952 } else { 940 } else {
@@ -958,17 +946,16 @@ static int __init replace_intsrc_all(struct mp_config_table *mpc,
958 } 946 }
959 } 947 }
960 assign_to_mpc_intsrc(&mp_irqs[i], m); 948 assign_to_mpc_intsrc(&mp_irqs[i], m);
961 mpc->mpc_length = count; 949 mpc->length = count;
962 mpt += sizeof(struct mpc_config_intsrc); 950 mpt += sizeof(struct mpc_intsrc);
963 } 951 }
964 print_mp_irq_info(&mp_irqs[i]); 952 print_mp_irq_info(&mp_irqs[i]);
965 } 953 }
966#endif 954#endif
967out: 955out:
968 /* update checksum */ 956 /* update checksum */
969 mpc->mpc_checksum = 0; 957 mpc->checksum = 0;
970 mpc->mpc_checksum -= mpf_checksum((unsigned char *)mpc, 958 mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
971 mpc->mpc_length);
972 959
973 return 0; 960 return 0;
974} 961}
@@ -1014,8 +1001,7 @@ static int __init update_mp_table(void)
1014 char str[16]; 1001 char str[16];
1015 char oem[10]; 1002 char oem[10];
1016 struct intel_mp_floating *mpf; 1003 struct intel_mp_floating *mpf;
1017 struct mp_config_table *mpc; 1004 struct mpc_table *mpc, *mpc_new;
1018 struct mp_config_table *mpc_new;
1019 1005
1020 if (!enable_update_mptable) 1006 if (!enable_update_mptable)
1021 return 0; 1007 return 0;
@@ -1041,7 +1027,7 @@ static int __init update_mp_table(void)
1041 printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf)); 1027 printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf));
1042 printk(KERN_INFO "mpf_physptr: %x\n", mpf->mpf_physptr); 1028 printk(KERN_INFO "mpf_physptr: %x\n", mpf->mpf_physptr);
1043 1029
1044 if (mpc_new_phys && mpc->mpc_length > mpc_new_length) { 1030 if (mpc_new_phys && mpc->length > mpc_new_length) {
1045 mpc_new_phys = 0; 1031 mpc_new_phys = 0;
1046 printk(KERN_INFO "mpc_new_length is %ld, please use alloc_mptable=8k\n", 1032 printk(KERN_INFO "mpc_new_length is %ld, please use alloc_mptable=8k\n",
1047 mpc_new_length); 1033 mpc_new_length);
@@ -1050,10 +1036,10 @@ static int __init update_mp_table(void)
1050 if (!mpc_new_phys) { 1036 if (!mpc_new_phys) {
1051 unsigned char old, new; 1037 unsigned char old, new;
1052 /* check if we can change the postion */ 1038 /* check if we can change the postion */
1053 mpc->mpc_checksum = 0; 1039 mpc->checksum = 0;
1054 old = mpf_checksum((unsigned char *)mpc, mpc->mpc_length); 1040 old = mpf_checksum((unsigned char *)mpc, mpc->length);
1055 mpc->mpc_checksum = 0xff; 1041 mpc->checksum = 0xff;
1056 new = mpf_checksum((unsigned char *)mpc, mpc->mpc_length); 1042 new = mpf_checksum((unsigned char *)mpc, mpc->length);
1057 if (old == new) { 1043 if (old == new) {
1058 printk(KERN_INFO "mpc is readonly, please try alloc_mptable instead\n"); 1044 printk(KERN_INFO "mpc is readonly, please try alloc_mptable instead\n");
1059 return 0; 1045 return 0;
@@ -1062,7 +1048,7 @@ static int __init update_mp_table(void)
1062 } else { 1048 } else {
1063 mpf->mpf_physptr = mpc_new_phys; 1049 mpf->mpf_physptr = mpc_new_phys;
1064 mpc_new = phys_to_virt(mpc_new_phys); 1050 mpc_new = phys_to_virt(mpc_new_phys);
1065 memcpy(mpc_new, mpc, mpc->mpc_length); 1051 memcpy(mpc_new, mpc, mpc->length);
1066 mpc = mpc_new; 1052 mpc = mpc_new;
1067 /* check if we can modify that */ 1053 /* check if we can modify that */
1068 if (mpc_new_phys - mpf->mpf_physptr) { 1054 if (mpc_new_phys - mpf->mpf_physptr) {
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 82a7c7ed6d45..726266695b2c 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -136,7 +136,7 @@ static int msr_open(struct inode *inode, struct file *file)
136 lock_kernel(); 136 lock_kernel();
137 cpu = iminor(file->f_path.dentry->d_inode); 137 cpu = iminor(file->f_path.dentry->d_inode);
138 138
139 if (cpu >= NR_CPUS || !cpu_online(cpu)) { 139 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
140 ret = -ENXIO; /* No such CPU */ 140 ret = -ENXIO; /* No such CPU */
141 goto out; 141 goto out;
142 } 142 }
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 45a09ccdc214..7228979f1e7f 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -26,7 +26,6 @@
26#include <linux/kernel_stat.h> 26#include <linux/kernel_stat.h>
27#include <linux/kdebug.h> 27#include <linux/kdebug.h>
28#include <linux/smp.h> 28#include <linux/smp.h>
29#include <linux/nmi.h>
30 29
31#include <asm/i8259.h> 30#include <asm/i8259.h>
32#include <asm/io_apic.h> 31#include <asm/io_apic.h>
diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c
index 0deea37a53cf..f2191d4f2717 100644
--- a/arch/x86/kernel/numaq_32.c
+++ b/arch/x86/kernel/numaq_32.c
@@ -117,16 +117,15 @@ static inline int generate_logical_apicid(int quad, int phys_apicid)
117} 117}
118 118
119/* x86_quirks member */ 119/* x86_quirks member */
120static int mpc_apic_id(struct mpc_config_processor *m) 120static int mpc_apic_id(struct mpc_cpu *m)
121{ 121{
122 int quad = translation_table[mpc_record]->trans_quad; 122 int quad = translation_table[mpc_record]->trans_quad;
123 int logical_apicid = generate_logical_apicid(quad, m->mpc_apicid); 123 int logical_apicid = generate_logical_apicid(quad, m->apicid);
124 124
125 printk(KERN_DEBUG "Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n", 125 printk(KERN_DEBUG "Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n",
126 m->mpc_apicid, 126 m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8,
127 (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, 127 (m->cpufeature & CPU_MODEL_MASK) >> 4,
128 (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, 128 m->apicver, quad, logical_apicid);
129 m->mpc_apicver, quad, logical_apicid);
130 return logical_apicid; 129 return logical_apicid;
131} 130}
132 131
@@ -135,26 +134,26 @@ int mp_bus_id_to_node[MAX_MP_BUSSES];
135int mp_bus_id_to_local[MAX_MP_BUSSES]; 134int mp_bus_id_to_local[MAX_MP_BUSSES];
136 135
137/* x86_quirks member */ 136/* x86_quirks member */
138static void mpc_oem_bus_info(struct mpc_config_bus *m, char *name) 137static void mpc_oem_bus_info(struct mpc_bus *m, char *name)
139{ 138{
140 int quad = translation_table[mpc_record]->trans_quad; 139 int quad = translation_table[mpc_record]->trans_quad;
141 int local = translation_table[mpc_record]->trans_local; 140 int local = translation_table[mpc_record]->trans_local;
142 141
143 mp_bus_id_to_node[m->mpc_busid] = quad; 142 mp_bus_id_to_node[m->busid] = quad;
144 mp_bus_id_to_local[m->mpc_busid] = local; 143 mp_bus_id_to_local[m->busid] = local;
145 printk(KERN_INFO "Bus #%d is %s (node %d)\n", 144 printk(KERN_INFO "Bus #%d is %s (node %d)\n",
146 m->mpc_busid, name, quad); 145 m->busid, name, quad);
147} 146}
148 147
149int quad_local_to_mp_bus_id [NR_CPUS/4][4]; 148int quad_local_to_mp_bus_id [NR_CPUS/4][4];
150 149
151/* x86_quirks member */ 150/* x86_quirks member */
152static void mpc_oem_pci_bus(struct mpc_config_bus *m) 151static void mpc_oem_pci_bus(struct mpc_bus *m)
153{ 152{
154 int quad = translation_table[mpc_record]->trans_quad; 153 int quad = translation_table[mpc_record]->trans_quad;
155 int local = translation_table[mpc_record]->trans_local; 154 int local = translation_table[mpc_record]->trans_local;
156 155
157 quad_local_to_mp_bus_id[quad][local] = m->mpc_busid; 156 quad_local_to_mp_bus_id[quad][local] = m->busid;
158} 157}
159 158
160static void __init MP_translation_info(struct mpc_config_translation *m) 159static void __init MP_translation_info(struct mpc_config_translation *m)
@@ -186,7 +185,7 @@ static int __init mpf_checksum(unsigned char *mp, int len)
186 * Read/parse the MPC oem tables 185 * Read/parse the MPC oem tables
187 */ 186 */
188 187
189static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, 188static void __init smp_read_mpc_oem(struct mpc_oemtable *oemtable,
190 unsigned short oemsize) 189 unsigned short oemsize)
191{ 190{
192 int count = sizeof(*oemtable); /* the header size */ 191 int count = sizeof(*oemtable); /* the header size */
@@ -195,18 +194,18 @@ static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable,
195 mpc_record = 0; 194 mpc_record = 0;
196 printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", 195 printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n",
197 oemtable); 196 oemtable);
198 if (memcmp(oemtable->oem_signature, MPC_OEM_SIGNATURE, 4)) { 197 if (memcmp(oemtable->signature, MPC_OEM_SIGNATURE, 4)) {
199 printk(KERN_WARNING 198 printk(KERN_WARNING
200 "SMP mpc oemtable: bad signature [%c%c%c%c]!\n", 199 "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
201 oemtable->oem_signature[0], oemtable->oem_signature[1], 200 oemtable->signature[0], oemtable->signature[1],
202 oemtable->oem_signature[2], oemtable->oem_signature[3]); 201 oemtable->signature[2], oemtable->signature[3]);
203 return; 202 return;
204 } 203 }
205 if (mpf_checksum((unsigned char *)oemtable, oemtable->oem_length)) { 204 if (mpf_checksum((unsigned char *)oemtable, oemtable->length)) {
206 printk(KERN_WARNING "SMP oem mptable: checksum error!\n"); 205 printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
207 return; 206 return;
208 } 207 }
209 while (count < oemtable->oem_length) { 208 while (count < oemtable->length) {
210 switch (*oemptr) { 209 switch (*oemptr) {
211 case MP_TRANSLATION: 210 case MP_TRANSLATION:
212 { 211 {
@@ -260,8 +259,7 @@ static struct x86_quirks numaq_x86_quirks __initdata = {
260 .update_genapic = numaq_update_genapic, 259 .update_genapic = numaq_update_genapic,
261}; 260};
262 261
263void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, 262void numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
264 char *productid)
265{ 263{
266 if (strncmp(oem, "IBM NUMA", 8)) 264 if (strncmp(oem, "IBM NUMA", 8))
267 printk("Warning! Not a NUMA-Q system!\n"); 265 printk("Warning! Not a NUMA-Q system!\n");
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c
index 242c3440687f..8cba3749a511 100644
--- a/arch/x86/kernel/pci-swiotlb_64.c
+++ b/arch/x86/kernel/pci-swiotlb_64.c
@@ -13,7 +13,7 @@
13 13
14int swiotlb __read_mostly; 14int swiotlb __read_mostly;
15 15
16void *swiotlb_alloc_boot(size_t size, unsigned long nslabs) 16void * __init swiotlb_alloc_boot(size_t size, unsigned long nslabs)
17{ 17{
18 return alloc_bootmem_low_pages(size); 18 return alloc_bootmem_low_pages(size);
19} 19}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 3ba155d24884..a546f55c77b4 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -39,11 +39,12 @@
39#include <linux/prctl.h> 39#include <linux/prctl.h>
40#include <linux/dmi.h> 40#include <linux/dmi.h>
41#include <linux/ftrace.h> 41#include <linux/ftrace.h>
42#include <linux/uaccess.h>
43#include <linux/io.h>
44#include <linux/kdebug.h>
42 45
43#include <asm/uaccess.h>
44#include <asm/pgtable.h> 46#include <asm/pgtable.h>
45#include <asm/system.h> 47#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/ldt.h> 48#include <asm/ldt.h>
48#include <asm/processor.h> 49#include <asm/processor.h>
49#include <asm/i387.h> 50#include <asm/i387.h>
@@ -56,10 +57,8 @@
56 57
57#include <asm/tlbflush.h> 58#include <asm/tlbflush.h>
58#include <asm/cpu.h> 59#include <asm/cpu.h>
59#include <asm/kdebug.h>
60#include <asm/idle.h> 60#include <asm/idle.h>
61#include <asm/syscalls.h> 61#include <asm/syscalls.h>
62#include <asm/smp.h>
63#include <asm/ds.h> 62#include <asm/ds.h>
64 63
65asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 64asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
@@ -205,7 +204,7 @@ extern void kernel_thread_helper(void);
205/* 204/*
206 * Create a kernel thread 205 * Create a kernel thread
207 */ 206 */
208int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 207int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
209{ 208{
210 struct pt_regs regs; 209 struct pt_regs regs;
211 210
@@ -266,7 +265,7 @@ void flush_thread(void)
266 tsk->thread.debugreg3 = 0; 265 tsk->thread.debugreg3 = 0;
267 tsk->thread.debugreg6 = 0; 266 tsk->thread.debugreg6 = 0;
268 tsk->thread.debugreg7 = 0; 267 tsk->thread.debugreg7 = 0;
269 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); 268 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
270 clear_tsk_thread_flag(tsk, TIF_DEBUG); 269 clear_tsk_thread_flag(tsk, TIF_DEBUG);
271 /* 270 /*
272 * Forget coprocessor state.. 271 * Forget coprocessor state..
@@ -293,9 +292,9 @@ void prepare_to_copy(struct task_struct *tsk)
293 292
294int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 293int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
295 unsigned long unused, 294 unsigned long unused,
296 struct task_struct * p, struct pt_regs * regs) 295 struct task_struct *p, struct pt_regs *regs)
297{ 296{
298 struct pt_regs * childregs; 297 struct pt_regs *childregs;
299 struct task_struct *tsk; 298 struct task_struct *tsk;
300 int err; 299 int err;
301 300
@@ -347,7 +346,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
347void 346void
348start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) 347start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
349{ 348{
350 __asm__("movl %0, %%gs" :: "r"(0)); 349 __asm__("movl %0, %%gs" : : "r"(0));
351 regs->fs = 0; 350 regs->fs = 0;
352 set_fs(USER_DS); 351 set_fs(USER_DS);
353 regs->ds = __USER_DS; 352 regs->ds = __USER_DS;
@@ -638,7 +637,7 @@ asmlinkage int sys_vfork(struct pt_regs regs)
638asmlinkage int sys_execve(struct pt_regs regs) 637asmlinkage int sys_execve(struct pt_regs regs)
639{ 638{
640 int error; 639 int error;
641 char * filename; 640 char *filename;
642 641
643 filename = getname((char __user *) regs.bx); 642 filename = getname((char __user *) regs.bx);
644 error = PTR_ERR(filename); 643 error = PTR_ERR(filename);
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index bf088c61fa40..2b46eb41643b 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -501,7 +501,7 @@ void native_machine_shutdown(void)
501 501
502#ifdef CONFIG_X86_32 502#ifdef CONFIG_X86_32
503 /* See if there has been given a command line override */ 503 /* See if there has been given a command line override */
504 if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && 504 if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) &&
505 cpu_online(reboot_cpu)) 505 cpu_online(reboot_cpu))
506 reboot_cpu_id = reboot_cpu; 506 reboot_cpu_id = reboot_cpu;
507#endif 507#endif
@@ -511,7 +511,7 @@ void native_machine_shutdown(void)
511 reboot_cpu_id = smp_processor_id(); 511 reboot_cpu_id = smp_processor_id();
512 512
513 /* Make certain I only run on the appropriate processor */ 513 /* Make certain I only run on the appropriate processor */
514 set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id)); 514 set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
515 515
516 /* O.K Now that I'm on the appropriate processor, 516 /* O.K Now that I'm on the appropriate processor,
517 * stop all of the others. 517 * stop all of the others.
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 0b63b08e7530..e4433bf44209 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -5,12 +5,11 @@
5#include <linux/percpu.h> 5#include <linux/percpu.h>
6#include <linux/kexec.h> 6#include <linux/kexec.h>
7#include <linux/crash_dump.h> 7#include <linux/crash_dump.h>
8#include <asm/smp.h> 8#include <linux/smp.h>
9#include <asm/percpu.h> 9#include <linux/topology.h>
10#include <asm/sections.h> 10#include <asm/sections.h>
11#include <asm/processor.h> 11#include <asm/processor.h>
12#include <asm/setup.h> 12#include <asm/setup.h>
13#include <asm/topology.h>
14#include <asm/mpspec.h> 13#include <asm/mpspec.h>
15#include <asm/apicdef.h> 14#include <asm/apicdef.h>
16#include <asm/highmem.h> 15#include <asm/highmem.h>
@@ -20,8 +19,8 @@ unsigned int num_processors;
20unsigned disabled_cpus __cpuinitdata; 19unsigned disabled_cpus __cpuinitdata;
21/* Processor that is doing the boot up */ 20/* Processor that is doing the boot up */
22unsigned int boot_cpu_physical_apicid = -1U; 21unsigned int boot_cpu_physical_apicid = -1U;
23unsigned int max_physical_apicid;
24EXPORT_SYMBOL(boot_cpu_physical_apicid); 22EXPORT_SYMBOL(boot_cpu_physical_apicid);
23unsigned int max_physical_apicid;
25 24
26/* Bitmask of physically existing CPUs */ 25/* Bitmask of physically existing CPUs */
27physid_mask_t phys_cpu_present_map; 26physid_mask_t phys_cpu_present_map;
@@ -153,12 +152,10 @@ void __init setup_per_cpu_areas(void)
153 align = max_t(unsigned long, PAGE_SIZE, align); 152 align = max_t(unsigned long, PAGE_SIZE, align);
154 size = roundup(old_size, align); 153 size = roundup(old_size, align);
155 154
156 printk(KERN_INFO 155 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
157 "NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
158 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 156 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
159 157
160 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", 158 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
161 size);
162 159
163 for_each_possible_cpu(cpu) { 160 for_each_possible_cpu(cpu) {
164#ifndef CONFIG_NEED_MULTIPLE_NODES 161#ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -169,22 +166,15 @@ void __init setup_per_cpu_areas(void)
169 if (!node_online(node) || !NODE_DATA(node)) { 166 if (!node_online(node) || !NODE_DATA(node)) {
170 ptr = __alloc_bootmem(size, align, 167 ptr = __alloc_bootmem(size, align,
171 __pa(MAX_DMA_ADDRESS)); 168 __pa(MAX_DMA_ADDRESS));
172 printk(KERN_INFO 169 pr_info("cpu %d has no node %d or node-local memory\n",
173 "cpu %d has no node %d or node-local memory\n",
174 cpu, node); 170 cpu, node);
175 if (ptr) 171 pr_debug("per cpu data for cpu%d at %016lx\n",
176 printk(KERN_DEBUG 172 cpu, __pa(ptr));
177 "per cpu data for cpu%d at %016lx\n", 173 } else {
178 cpu, __pa(ptr));
179 }
180 else {
181 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, 174 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
182 __pa(MAX_DMA_ADDRESS)); 175 __pa(MAX_DMA_ADDRESS));
183 if (ptr) 176 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
184 printk(KERN_DEBUG 177 cpu, node, __pa(ptr));
185 "per cpu data for cpu%d on node%d "
186 "at %016lx\n",
187 cpu, node, __pa(ptr));
188 } 178 }
189#endif 179#endif
190 per_cpu_offset(cpu) = ptr - __per_cpu_start; 180 per_cpu_offset(cpu) = ptr - __per_cpu_start;
@@ -289,8 +279,8 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable)
289 279
290 cpulist_scnprintf(buf, sizeof(buf), mask); 280 cpulist_scnprintf(buf, sizeof(buf), mask);
291 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", 281 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
292 enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf); 282 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
293 } 283}
294 284
295void __cpuinit numa_add_cpu(int cpu) 285void __cpuinit numa_add_cpu(int cpu)
296{ 286{
@@ -339,25 +329,25 @@ static const cpumask_t cpu_mask_none;
339/* 329/*
340 * Returns a pointer to the bitmask of CPUs on Node 'node'. 330 * Returns a pointer to the bitmask of CPUs on Node 'node'.
341 */ 331 */
342const cpumask_t *_node_to_cpumask_ptr(int node) 332const cpumask_t *cpumask_of_node(int node)
343{ 333{
344 if (node_to_cpumask_map == NULL) { 334 if (node_to_cpumask_map == NULL) {
345 printk(KERN_WARNING 335 printk(KERN_WARNING
346 "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n", 336 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
347 node); 337 node);
348 dump_stack(); 338 dump_stack();
349 return (const cpumask_t *)&cpu_online_map; 339 return (const cpumask_t *)&cpu_online_map;
350 } 340 }
351 if (node >= nr_node_ids) { 341 if (node >= nr_node_ids) {
352 printk(KERN_WARNING 342 printk(KERN_WARNING
353 "_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n", 343 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
354 node, nr_node_ids); 344 node, nr_node_ids);
355 dump_stack(); 345 dump_stack();
356 return &cpu_mask_none; 346 return &cpu_mask_none;
357 } 347 }
358 return &node_to_cpumask_map[node]; 348 return &node_to_cpumask_map[node];
359} 349}
360EXPORT_SYMBOL(_node_to_cpumask_ptr); 350EXPORT_SYMBOL(cpumask_of_node);
361 351
362/* 352/*
363 * Returns a bitmask of CPUs on Node 'node'. 353 * Returns a bitmask of CPUs on Node 'node'.
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index c628f9178cd8..07576bee03ef 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -496,7 +496,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
496} 496}
497 497
498/* maps the cpu to the sched domain representing multi-core */ 498/* maps the cpu to the sched domain representing multi-core */
499cpumask_t cpu_coregroup_map(int cpu) 499const struct cpumask *cpu_coregroup_mask(int cpu)
500{ 500{
501 struct cpuinfo_x86 *c = &cpu_data(cpu); 501 struct cpuinfo_x86 *c = &cpu_data(cpu);
502 /* 502 /*
@@ -504,9 +504,14 @@ cpumask_t cpu_coregroup_map(int cpu)
504 * And for power savings, we return cpu_core_map 504 * And for power savings, we return cpu_core_map
505 */ 505 */
506 if (sched_mc_power_savings || sched_smt_power_savings) 506 if (sched_mc_power_savings || sched_smt_power_savings)
507 return per_cpu(cpu_core_map, cpu); 507 return &per_cpu(cpu_core_map, cpu);
508 else 508 else
509 return c->llc_shared_map; 509 return &c->llc_shared_map;
510}
511
512cpumask_t cpu_coregroup_map(int cpu)
513{
514 return *cpu_coregroup_mask(cpu);
510} 515}
511 516
512static void impress_friends(void) 517static void impress_friends(void)
@@ -1149,7 +1154,7 @@ static void __init smp_cpu_index_default(void)
1149 for_each_possible_cpu(i) { 1154 for_each_possible_cpu(i) {
1150 c = &cpu_data(i); 1155 c = &cpu_data(i);
1151 /* mark all to hotplug */ 1156 /* mark all to hotplug */
1152 c->cpu_index = NR_CPUS; 1157 c->cpu_index = nr_cpu_ids;
1153 } 1158 }
1154} 1159}
1155 1160
@@ -1293,6 +1298,8 @@ __init void prefill_possible_map(void)
1293 else 1298 else
1294 possible = setup_possible_cpus; 1299 possible = setup_possible_cpus;
1295 1300
1301 total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1302
1296 if (possible > CONFIG_NR_CPUS) { 1303 if (possible > CONFIG_NR_CPUS) {
1297 printk(KERN_WARNING 1304 printk(KERN_WARNING
1298 "%d Processors exceeds NR_CPUS limit of %d\n", 1305 "%d Processors exceeds NR_CPUS limit of %d\n",
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
index 65309e4cb1c0..3985cac0ed47 100644
--- a/arch/x86/kernel/time_32.c
+++ b/arch/x86/kernel/time_32.c
@@ -105,8 +105,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
105 high bit of the PPI port B (0x61). Note that some PS/2s, 105 high bit of the PPI port B (0x61). Note that some PS/2s,
106 notably the 55SX, work fine if this is removed. */ 106 notably the 55SX, work fine if this is removed. */
107 107
108 u8 irq_v = inb_p( 0x61 ); /* read the current state */ 108 u8 irq_v = inb_p(0x61); /* read the current state */
109 outb_p( irq_v|0x80, 0x61 ); /* reset the IRQ */ 109 outb_p(irq_v | 0x80, 0x61); /* reset the IRQ */
110 } 110 }
111#endif 111#endif
112 112
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c
index 891e7a7c4334..e6e695acd725 100644
--- a/arch/x86/kernel/time_64.c
+++ b/arch/x86/kernel/time_64.c
@@ -17,10 +17,10 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/time.h> 18#include <linux/time.h>
19#include <linux/mca.h> 19#include <linux/mca.h>
20#include <linux/nmi.h>
20 21
21#include <asm/i8253.h> 22#include <asm/i8253.h>
22#include <asm/hpet.h> 23#include <asm/hpet.h>
23#include <asm/nmi.h>
24#include <asm/vgtod.h> 24#include <asm/vgtod.h>
25#include <asm/time.h> 25#include <asm/time.h>
26#include <asm/timer.h> 26#include <asm/timer.h>
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index ce6650eb64e9..25d5c3073582 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -65,9 +65,6 @@
65#else 65#else
66#include <asm/processor-flags.h> 66#include <asm/processor-flags.h>
67#include <asm/arch_hooks.h> 67#include <asm/arch_hooks.h>
68#include <asm/nmi.h>
69#include <asm/smp.h>
70#include <asm/io.h>
71#include <asm/traps.h> 68#include <asm/traps.h>
72 69
73#include "cpu/mcheck/mce.h" 70#include "cpu/mcheck/mce.h"
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 0c9667f0752a..d801d06af068 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -176,33 +176,31 @@ static int __init visws_get_smp_config(unsigned int early)
176 * No problem for Linux. 176 * No problem for Linux.
177 */ 177 */
178 178
179static void __init MP_processor_info(struct mpc_config_processor *m) 179static void __init MP_processor_info(struct mpc_cpu *m)
180{ 180{
181 int ver, logical_apicid; 181 int ver, logical_apicid;
182 physid_mask_t apic_cpus; 182 physid_mask_t apic_cpus;
183 183
184 if (!(m->mpc_cpuflag & CPU_ENABLED)) 184 if (!(m->cpuflag & CPU_ENABLED))
185 return; 185 return;
186 186
187 logical_apicid = m->mpc_apicid; 187 logical_apicid = m->apicid;
188 printk(KERN_INFO "%sCPU #%d %u:%u APIC version %d\n", 188 printk(KERN_INFO "%sCPU #%d %u:%u APIC version %d\n",
189 m->mpc_cpuflag & CPU_BOOTPROCESSOR ? "Bootup " : "", 189 m->cpuflag & CPU_BOOTPROCESSOR ? "Bootup " : "",
190 m->mpc_apicid, 190 m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8,
191 (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, 191 (m->cpufeature & CPU_MODEL_MASK) >> 4, m->apicver);
192 (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
193 m->mpc_apicver);
194 192
195 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) 193 if (m->cpuflag & CPU_BOOTPROCESSOR)
196 boot_cpu_physical_apicid = m->mpc_apicid; 194 boot_cpu_physical_apicid = m->apicid;
197 195
198 ver = m->mpc_apicver; 196 ver = m->apicver;
199 if ((ver >= 0x14 && m->mpc_apicid >= 0xff) || m->mpc_apicid >= 0xf) { 197 if ((ver >= 0x14 && m->apicid >= 0xff) || m->apicid >= 0xf) {
200 printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n", 198 printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
201 m->mpc_apicid, MAX_APICS); 199 m->apicid, MAX_APICS);
202 return; 200 return;
203 } 201 }
204 202
205 apic_cpus = apicid_to_cpu_present(m->mpc_apicid); 203 apic_cpus = apicid_to_cpu_present(m->apicid);
206 physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus); 204 physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus);
207 /* 205 /*
208 * Validate version 206 * Validate version
@@ -210,15 +208,15 @@ static void __init MP_processor_info(struct mpc_config_processor *m)
210 if (ver == 0x0) { 208 if (ver == 0x0) {
211 printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! " 209 printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! "
212 "fixing up to 0x10. (tell your hw vendor)\n", 210 "fixing up to 0x10. (tell your hw vendor)\n",
213 m->mpc_apicid); 211 m->apicid);
214 ver = 0x10; 212 ver = 0x10;
215 } 213 }
216 apic_version[m->mpc_apicid] = ver; 214 apic_version[m->apicid] = ver;
217} 215}
218 216
219static int __init visws_find_smp_config(unsigned int reserve) 217static int __init visws_find_smp_config(unsigned int reserve)
220{ 218{
221 struct mpc_config_processor *mp = phys_to_virt(CO_CPU_TAB_PHYS); 219 struct mpc_cpu *mp = phys_to_virt(CO_CPU_TAB_PHYS);
222 unsigned short ncpus = readw(phys_to_virt(CO_CPU_NUM_PHYS)); 220 unsigned short ncpus = readw(phys_to_virt(CO_CPU_NUM_PHYS));
223 221
224 if (ncpus > CO_CPU_MAX) { 222 if (ncpus > CO_CPU_MAX) {
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index c02343594b4d..d3ec292f00f2 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -7,8 +7,8 @@ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
7ifeq ($(CONFIG_KVM_TRACE),y) 7ifeq ($(CONFIG_KVM_TRACE),y)
8common-objs += $(addprefix ../../../virt/kvm/, kvm_trace.o) 8common-objs += $(addprefix ../../../virt/kvm/, kvm_trace.o)
9endif 9endif
10ifeq ($(CONFIG_DMAR),y) 10ifeq ($(CONFIG_IOMMU_API),y)
11common-objs += $(addprefix ../../../virt/kvm/, vtd.o) 11common-objs += $(addprefix ../../../virt/kvm/, iommu.o)
12endif 12endif
13 13
14EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm 14EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0e6aa8141dcd..cc17546a2406 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -34,6 +34,7 @@
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/mman.h> 35#include <linux/mman.h>
36#include <linux/highmem.h> 36#include <linux/highmem.h>
37#include <linux/iommu.h>
37#include <linux/intel-iommu.h> 38#include <linux/intel-iommu.h>
38 39
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
@@ -989,7 +990,7 @@ int kvm_dev_ioctl_check_extension(long ext)
989 r = !tdp_enabled; 990 r = !tdp_enabled;
990 break; 991 break;
991 case KVM_CAP_IOMMU: 992 case KVM_CAP_IOMMU:
992 r = intel_iommu_found(); 993 r = iommu_found();
993 break; 994 break;
994 default: 995 default:
995 r = 0; 996 r = 0;
diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c
index 4ba5ccaa1584..c2ded1448024 100644
--- a/arch/x86/mach-generic/es7000.c
+++ b/arch/x86/mach-generic/es7000.c
@@ -43,12 +43,12 @@ static void __init enable_apic_mode(void)
43 return; 43 return;
44} 44}
45 45
46static __init int mps_oem_check(struct mp_config_table *mpc, char *oem, 46static __init int
47 char *productid) 47mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
48{ 48{
49 if (mpc->mpc_oemptr) { 49 if (mpc->oemptr) {
50 struct mp_config_oemtable *oem_table = 50 struct mpc_oemtable *oem_table =
51 (struct mp_config_oemtable *)mpc->mpc_oemptr; 51 (struct mpc_oemtable *)mpc->oemptr;
52 if (!strncmp(oem, "UNISYS", 6)) 52 if (!strncmp(oem, "UNISYS", 6))
53 return parse_unisys_oem((char *)oem_table); 53 return parse_unisys_oem((char *)oem_table);
54 } 54 }
diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c
index 511d7941364f..3679e2255645 100644
--- a/arch/x86/mach-generic/numaq.c
+++ b/arch/x86/mach-generic/numaq.c
@@ -19,8 +19,7 @@
19#include <asm/numaq/wakecpu.h> 19#include <asm/numaq/wakecpu.h>
20#include <asm/numaq.h> 20#include <asm/numaq.h>
21 21
22static int mps_oem_check(struct mp_config_table *mpc, char *oem, 22static int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
23 char *productid)
24{ 23{
25 numaq_mps_oem_check(mpc, oem, productid); 24 numaq_mps_oem_check(mpc, oem, productid);
26 return found_numaq; 25 return found_numaq;
diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c
index c346d9d0226f..15a38daef1a8 100644
--- a/arch/x86/mach-generic/probe.c
+++ b/arch/x86/mach-generic/probe.c
@@ -110,8 +110,7 @@ void __init generic_apic_probe(void)
110 110
111/* These functions can switch the APIC even after the initial ->probe() */ 111/* These functions can switch the APIC even after the initial ->probe() */
112 112
113int __init mps_oem_check(struct mp_config_table *mpc, char *oem, 113int __init mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
114 char *productid)
115{ 114{
116 int i; 115 int i;
117 for (i = 0; apic_probe[i]; ++i) { 116 for (i = 0; apic_probe[i]; ++i) {
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index a5bc05492b1e..9840b7ec749a 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -357,9 +357,8 @@ void __init find_smp_config(void)
357 printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id); 357 printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id);
358 358
359 /* initialize the CPU structures (moved from smp_boot_cpus) */ 359 /* initialize the CPU structures (moved from smp_boot_cpus) */
360 for (i = 0; i < NR_CPUS; i++) { 360 for (i = 0; i < nr_cpu_ids; i++)
361 cpu_irq_affinity[i] = ~0; 361 cpu_irq_affinity[i] = ~0;
362 }
363 cpu_online_map = cpumask_of_cpu(boot_cpu_id); 362 cpu_online_map = cpumask_of_cpu(boot_cpu_id);
364 363
365 /* The boot CPU must be extended */ 364 /* The boot CPU must be extended */
@@ -1227,7 +1226,7 @@ int setup_profiling_timer(unsigned int multiplier)
1227 * new values until the next timer interrupt in which they do process 1226 * new values until the next timer interrupt in which they do process
1228 * accounting. 1227 * accounting.
1229 */ 1228 */
1230 for (i = 0; i < NR_CPUS; ++i) 1229 for (i = 0; i < nr_cpu_ids; ++i)
1231 per_cpu(prof_multiplier, i) = multiplier; 1230 per_cpu(prof_multiplier, i) = multiplier;
1232 1231
1233 return 0; 1232 return 0;
@@ -1257,7 +1256,7 @@ void __init voyager_smp_intr_init(void)
1257 int i; 1256 int i;
1258 1257
1259 /* initialize the per cpu irq mask to all disabled */ 1258 /* initialize the per cpu irq mask to all disabled */
1260 for (i = 0; i < NR_CPUS; i++) 1259 for (i = 0; i < nr_cpu_ids; i++)
1261 vic_irq_mask[i] = 0xFFFF; 1260 vic_irq_mask[i] = 0xFFFF;
1262 1261
1263 VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt); 1262 VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt);
diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c
index 41f1b5c00a1d..268f8255280f 100644
--- a/arch/x86/mm/k8topology_64.c
+++ b/arch/x86/mm/k8topology_64.c
@@ -81,7 +81,6 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
81 unsigned numnodes, cores, bits, apicid_base; 81 unsigned numnodes, cores, bits, apicid_base;
82 unsigned long prevbase; 82 unsigned long prevbase;
83 struct bootnode nodes[8]; 83 struct bootnode nodes[8];
84 unsigned char nodeids[8];
85 int i, j, nb, found = 0; 84 int i, j, nb, found = 0;
86 u32 nodeid, reg; 85 u32 nodeid, reg;
87 86
@@ -110,7 +109,6 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
110 limit = read_pci_config(0, nb, 1, 0x44 + i*8); 109 limit = read_pci_config(0, nb, 1, 0x44 + i*8);
111 110
112 nodeid = limit & 7; 111 nodeid = limit & 7;
113 nodeids[i] = nodeid;
114 if ((base & 3) == 0) { 112 if ((base & 3) == 0) {
115 if (i < numnodes) 113 if (i < numnodes)
116 printk("Skipping disabled node %d\n", i); 114 printk("Skipping disabled node %d\n", i);
@@ -179,9 +177,6 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
179 177
180 nodes[nodeid].start = base; 178 nodes[nodeid].start = base;
181 nodes[nodeid].end = limit; 179 nodes[nodeid].end = limit;
182 e820_register_active_regions(nodeid,
183 nodes[nodeid].start >> PAGE_SHIFT,
184 nodes[nodeid].end >> PAGE_SHIFT);
185 180
186 prevbase = base; 181 prevbase = base;
187 182
@@ -211,12 +206,15 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
211 } 206 }
212 207
213 for (i = 0; i < 8; i++) { 208 for (i = 0; i < 8; i++) {
214 if (nodes[i].start != nodes[i].end) { 209 if (nodes[i].start == nodes[i].end)
215 nodeid = nodeids[i]; 210 continue;
216 for (j = apicid_base; j < cores + apicid_base; j++) 211
217 apicid_to_node[(nodeid << bits) + j] = i; 212 e820_register_active_regions(i,
218 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 213 nodes[i].start >> PAGE_SHIFT,
219 } 214 nodes[i].end >> PAGE_SHIFT);
215 for (j = apicid_base; j < cores + apicid_base; j++)
216 apicid_to_node[(i << bits) + j] = i;
217 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
220 } 218 }
221 219
222 numa_init_array(); 220 numa_init_array();
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 65d75a6be0ba..14f240623497 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -132,8 +132,7 @@ static void do_stolen_accounting(void)
132 *snap = state; 132 *snap = state;
133 133
134 /* Add the appropriate number of ticks of stolen time, 134 /* Add the appropriate number of ticks of stolen time,
135 including any left-overs from last time. Passing NULL to 135 including any left-overs from last time. */
136 account_steal_time accounts the time as stolen. */
137 stolen = runnable + offline + __get_cpu_var(residual_stolen); 136 stolen = runnable + offline + __get_cpu_var(residual_stolen);
138 137
139 if (stolen < 0) 138 if (stolen < 0)
@@ -141,11 +140,10 @@ static void do_stolen_accounting(void)
141 140
142 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); 141 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
143 __get_cpu_var(residual_stolen) = stolen; 142 __get_cpu_var(residual_stolen) = stolen;
144 account_steal_time(NULL, ticks); 143 account_steal_ticks(ticks);
145 144
146 /* Add the appropriate number of ticks of blocked time, 145 /* Add the appropriate number of ticks of blocked time,
147 including any left-overs from last time. Passing idle to 146 including any left-overs from last time. */
148 account_steal_time accounts the time as idle/wait. */
149 blocked += __get_cpu_var(residual_blocked); 147 blocked += __get_cpu_var(residual_blocked);
150 148
151 if (blocked < 0) 149 if (blocked < 0)
@@ -153,7 +151,7 @@ static void do_stolen_accounting(void)
153 151
154 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); 152 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
155 __get_cpu_var(residual_blocked) = blocked; 153 __get_cpu_var(residual_blocked) = blocked;
156 account_steal_time(idle_task(smp_processor_id()), ticks); 154 account_idle_ticks(ticks);
157} 155}
158 156
159/* 157/*